entry_point
stringlengths 1
65
| original_triton_python_code
stringlengths 208
619k
| optimised_triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
listlengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
|
|---|---|---|---|---|---|---|---|---|---|---|
depthwise_clipseg_conv
|
import torch
import torch.nn as nn
import torch.utils.data
class depthwise_clipseg_conv(nn.Module):
def __init__(self):
super(depthwise_clipseg_conv, self).__init__()
self.depthwise = nn.Conv2d(1, 1, kernel_size=3, padding=1)
def depthwise_clipseg(self, x, channels):
x = torch.cat([self.depthwise(x[:, i].unsqueeze(1)) for i in range(
channels)], dim=1)
return x
def forward(self, x):
channels = x.shape[1]
out = self.depthwise_clipseg(x, channels)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 4
x0 = xindex % 16
x2 = xindex // 64
x3 = xindex
tmp6 = tl.load(in_ptr1 + 0)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp8 = tmp5 + tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp4, tmp8, tmp9)
tmp11 = tmp0 >= tmp3
tmp12 = tl.full([1], 2, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + (x0 + 16 * x2), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tmp15 + tmp7
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp14, tmp16, tmp17)
tmp19 = tmp0 >= tmp12
tmp20 = tl.full([1], 3, tl.int64)
tmp21 = tmp0 < tmp20
tmp22 = tmp19 & tmp21
tmp23 = tl.load(in_ptr3 + (x0 + 16 * x2), tmp22 & xmask,
eviction_policy='evict_last', other=0.0)
tmp24 = tmp23 + tmp7
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp22, tmp24, tmp25)
tmp27 = tmp0 >= tmp20
tl.full([1], 4, tl.int64)
tmp30 = tl.load(in_ptr4 + (x0 + 16 * x2), tmp27 & xmask,
eviction_policy='evict_last', other=0.0)
tmp31 = tmp30 + tmp7
tmp32 = tl.full(tmp31.shape, 0.0, tmp31.dtype)
tmp33 = tl.where(tmp27, tmp31, tmp32)
tmp34 = tl.where(tmp22, tmp26, tmp33)
tmp35 = tl.where(tmp14, tmp18, tmp34)
tmp36 = tl.where(tmp4, tmp10, tmp35)
tl.store(out_ptr0 + x3, tmp36, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4,
1, 4, 4), (64, 0, 4, 1), 0), primals_2, stride=(1, 1), padding=
(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0
), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1))
buf1 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4,
1, 4, 4), (64, 0, 4, 1), 16), primals_2, stride=(1, 1), padding
=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0,
0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1))
buf2 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4,
1, 4, 4), (64, 0, 4, 1), 32), primals_2, stride=(1, 1), padding
=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0,
0), groups=1, bias=None)
assert_size_stride(buf2, (4, 1, 4, 4), (16, 16, 4, 1))
buf3 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4,
1, 4, 4), (64, 0, 4, 1), 48), primals_2, stride=(1, 1), padding
=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0,
0), groups=1, bias=None)
assert_size_stride(buf3, (4, 1, 4, 4), (16, 16, 4, 1))
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(256)](buf0, primals_3, buf1, buf2, buf3,
buf4, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del buf1
del buf2
del buf3
del primals_3
return buf4, primals_2, reinterpret_tensor(primals_1, (4, 1, 4, 4), (64,
16, 4, 1), 0), reinterpret_tensor(primals_1, (4, 1, 4, 4), (64, 16,
4, 1), 16), reinterpret_tensor(primals_1, (4, 1, 4, 4), (64, 16, 4,
1), 32), reinterpret_tensor(primals_1, (4, 1, 4, 4), (64, 16, 4, 1), 48
)
class depthwise_clipseg_convNew(nn.Module):
def __init__(self):
super(depthwise_clipseg_convNew, self).__init__()
self.depthwise = nn.Conv2d(1, 1, kernel_size=3, padding=1)
def depthwise_clipseg(self, x, channels):
x = torch.cat([self.depthwise(x[:, i].unsqueeze(1)) for i in range(
channels)], dim=1)
return x
def forward(self, input_0):
primals_2 = self.depthwise.weight
primals_3 = self.depthwise.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
whiteking64/lang-seg
|
depthwise_clipseg_conv
| false
| 16,707
|
[
"MIT"
] | 202
|
9d063b126f1b64e38ddb20cc75fc74435bfdcbd3
|
https://github.com/whiteking64/lang-seg/tree/9d063b126f1b64e38ddb20cc75fc74435bfdcbd3
|
SAModule
|
import torch
import torch.nn as nn
class SAModule(nn.Module):
"""Spatial Attention Module"""
def __init__(self):
super(SAModule, self).__init__()
self.conv = nn.Conv2d(2, 1, kernel_size=3, padding=1, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
input = x
avg_c = torch.mean(x, 1, True)
max_c, _ = torch.max(x, 1, True)
x = torch.cat((avg_c, max_c), 1)
x = self.conv(x)
x = self.sigmoid(x)
return input * x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 2
x0 = xindex % 16
x2 = xindex // 32
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp9 = tmp7 + tmp8
tmp10 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp4, tmp13, tmp14)
tmp16 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp19 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp20 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp21 = triton_helpers.maximum(tmp19, tmp20)
tmp22 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp23 = triton_helpers.maximum(tmp21, tmp22)
tmp24 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp25 = triton_helpers.maximum(tmp23, tmp24)
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp16, tmp25, tmp26)
tmp28 = tl.where(tmp4, tmp15, tmp27)
tl.store(out_ptr0 + x3, tmp28, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x3, tmp3, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 2, 3, 3), (18, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(128)](primals_1, buf0, 128, XBLOCK=128,
num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sigmoid_1[grid(256)](primals_1, buf1, buf2,
256, XBLOCK=256, num_warps=4, num_stages=1)
return buf2, primals_1, primals_2, buf0, buf1
class SAModuleNew(nn.Module):
"""Spatial Attention Module"""
def __init__(self):
super(SAModuleNew, self).__init__()
self.conv = nn.Conv2d(2, 1, kernel_size=3, padding=1, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, input_0):
primals_2 = self.conv.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
whkwls2653/Pytorch_Face_Recognition-
|
SAModule
| false
| 16,708
|
[
"MIT"
] | 62
|
60f3849def589957d9080457a1a9833112a71f6c
|
https://github.com/whkwls2653/Pytorch_Face_Recognition-/tree/60f3849def589957d9080457a1a9833112a71f6c
|
BoundaryDecoderAttention
|
import torch
def masked_softmax(x, m=None, axis=-1):
"""
Softmax with mask (optional)
"""
x = torch.clamp(x, min=-15.0, max=15.0)
if m is not None:
m = m.float()
x = x * m
e_x = torch.exp(x - torch.max(x, dim=axis, keepdim=True)[0])
if m is not None:
e_x = e_x * m
softmax = e_x / (torch.sum(e_x, dim=axis, keepdim=True) + 1e-06)
return softmax
class BoundaryDecoderAttention(torch.nn.Module):
"""
input: p: batch x inp_p
p_mask: batch
q: batch x time x inp_q
q_mask: batch x time
h_tm1: batch x out
depth: int
output: z: batch x inp_p+inp_q
"""
def __init__(self, input_dim, output_dim, enable_cuda=False):
super(BoundaryDecoderAttention, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.enable_cuda = enable_cuda
self.V = torch.nn.Linear(self.input_dim, self.output_dim)
self.W_a = torch.nn.Linear(self.output_dim, self.output_dim)
self.v = torch.nn.Parameter(torch.FloatTensor(self.output_dim))
self.c = torch.nn.Parameter(torch.FloatTensor(1))
self.init_weights()
def init_weights(self):
torch.nn.init.xavier_uniform(self.V.weight.data, gain=1)
torch.nn.init.xavier_uniform(self.W_a.weight.data, gain=1)
self.V.bias.data.fill_(0)
self.W_a.bias.data.fill_(0)
torch.nn.init.normal(self.v.data, mean=0, std=0.05)
self.c.data.fill_(1.0)
def forward(self, H_r, mask_r, h_tm1):
batch_size, time = H_r.size(0), H_r.size(1)
Fk = self.V.forward(H_r.view(-1, H_r.size(2)))
Fk_prime = self.W_a.forward(h_tm1)
Fk = Fk.view(batch_size, time, -1)
Fk = torch.tanh(Fk + Fk_prime.unsqueeze(1))
beta = torch.matmul(Fk, self.v)
beta = beta + self.c.unsqueeze(0)
beta = masked_softmax(beta, mask_r, axis=-1)
z = torch.bmm(beta.view(beta.size(0), 1, beta.size(1)), H_r)
z = z.view(z.size(0), -1)
return z, beta
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'output_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mv_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * (x0 // 4), xmask, eviction_policy='evict_last'
)
tmp4 = tl.load(in_ptr2 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (1 + 4 * (x0 // 4)), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr2 + 1)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp15 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr1 + (2 + 4 * (x0 // 4)), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr2 + 2)
tmp20 = tl.broadcast_to(tmp19, [XBLOCK])
tmp23 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp24 = tl.load(in_ptr1 + (3 + 4 * (x0 // 4)), xmask, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr2 + 3)
tmp28 = tl.broadcast_to(tmp27, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tmp6 = tmp3 * tmp5
tmp9 = tmp7 + tmp8
tmp10 = libdevice.tanh(tmp9)
tmp13 = tmp10 * tmp12
tmp14 = tmp6 + tmp13
tmp17 = tmp15 + tmp16
tmp18 = libdevice.tanh(tmp17)
tmp21 = tmp18 * tmp20
tmp22 = tmp14 + tmp21
tmp25 = tmp23 + tmp24
tmp26 = libdevice.tanh(tmp25)
tmp29 = tmp26 * tmp28
tmp30 = tmp22 + tmp29
tl.store(out_ptr0 + x0, tmp30, xmask)
@triton.jit
def triton_poi_fused_add_clamp_exp_max_mul_sub_sum_1(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp8 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp24 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp28 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tmp0 + tmp2
tmp4 = -15.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = 15.0
tmp7 = triton_helpers.minimum(tmp5, tmp6)
tmp9 = tmp7 * tmp8
tmp11 = tmp10 + tmp2
tmp12 = triton_helpers.maximum(tmp11, tmp4)
tmp13 = triton_helpers.minimum(tmp12, tmp6)
tmp15 = tmp13 * tmp14
tmp16 = triton_helpers.maximum(tmp9, tmp15)
tmp18 = tmp17 + tmp2
tmp19 = triton_helpers.maximum(tmp18, tmp4)
tmp20 = triton_helpers.minimum(tmp19, tmp6)
tmp22 = tmp20 * tmp21
tmp23 = triton_helpers.maximum(tmp16, tmp22)
tmp25 = tmp24 + tmp2
tmp26 = triton_helpers.maximum(tmp25, tmp4)
tmp27 = triton_helpers.minimum(tmp26, tmp6)
tmp29 = tmp27 * tmp28
tmp30 = triton_helpers.maximum(tmp23, tmp29)
tmp31 = tmp9 - tmp30
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp32 * tmp8
tmp34 = tmp15 - tmp30
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp35 * tmp14
tmp37 = tmp33 + tmp36
tmp38 = tmp22 - tmp30
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp39 * tmp21
tmp41 = tmp37 + tmp40
tmp42 = tmp29 - tmp30
tmp43 = tl_math.exp(tmp42)
tmp44 = tmp43 * tmp28
tmp45 = tmp41 + tmp44
tl.store(out_ptr0 + x0, tmp30, xmask)
tl.store(out_ptr1 + x0, tmp45, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_exp_mul_sub_2(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp8 = tl.load(in_ptr1 + x2, xmask)
tmp10 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp0 + tmp2
tmp4 = -15.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = 15.0
tmp7 = triton_helpers.minimum(tmp5, tmp6)
tmp9 = tmp7 * tmp8
tmp11 = tmp9 - tmp10
tmp12 = tl_math.exp(tmp11)
tmp13 = tmp12 * tmp8
tmp15 = 1e-06
tmp16 = tmp14 + tmp15
tmp17 = tmp13 / tmp16
tl.store(in_out_ptr0 + x2, tmp17, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (1,), (1,))
assert_size_stride(primals_9, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (16,
4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, primals_6, reinterpret_tensor(
primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((16,), (1,), torch.float32)
get_raw_stream(0)
triton_poi_fused_mv_0[grid(16)](buf0, buf1, primals_7, buf2, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf4 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused_add_clamp_exp_max_mul_sub_sum_1[grid(4)](buf2,
primals_8, primals_9, buf3, buf4, 4, XBLOCK=4, num_warps=1,
num_stages=1)
buf5 = reinterpret_tensor(buf2, (4, 4), (4, 1), 0)
del buf2
triton_poi_fused_add_clamp_div_exp_mul_sub_2[grid(16)](buf5,
primals_8, primals_9, buf3, buf4, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del buf3
del buf4
buf6 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf5, (4, 1, 4), (4, 4, 1), 0
), primals_1, out=buf6)
return reinterpret_tensor(buf6, (4, 4), (4, 1), 0
), buf5, primals_1, primals_6, primals_7, primals_8, primals_9, buf0, buf1
def masked_softmax(x, m=None, axis=-1):
"""
Softmax with mask (optional)
"""
x = torch.clamp(x, min=-15.0, max=15.0)
if m is not None:
m = m.float()
x = x * m
e_x = torch.exp(x - torch.max(x, dim=axis, keepdim=True)[0])
if m is not None:
e_x = e_x * m
softmax = e_x / (torch.sum(e_x, dim=axis, keepdim=True) + 1e-06)
return softmax
class BoundaryDecoderAttentionNew(torch.nn.Module):
"""
input: p: batch x inp_p
p_mask: batch
q: batch x time x inp_q
q_mask: batch x time
h_tm1: batch x out
depth: int
output: z: batch x inp_p+inp_q
"""
def __init__(self, input_dim, output_dim, enable_cuda=False):
super(BoundaryDecoderAttentionNew, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.enable_cuda = enable_cuda
self.V = torch.nn.Linear(self.input_dim, self.output_dim)
self.W_a = torch.nn.Linear(self.output_dim, self.output_dim)
self.v = torch.nn.Parameter(torch.FloatTensor(self.output_dim))
self.c = torch.nn.Parameter(torch.FloatTensor(1))
self.init_weights()
def init_weights(self):
torch.nn.init.xavier_uniform(self.V.weight.data, gain=1)
torch.nn.init.xavier_uniform(self.W_a.weight.data, gain=1)
self.V.bias.data.fill_(0)
self.W_a.bias.data.fill_(0)
torch.nn.init.normal(self.v.data, mean=0, std=0.05)
self.c.data.fill_(1.0)
def forward(self, input_0, input_1, input_2):
primals_3 = self.v
primals_8 = self.c
primals_2 = self.V.weight
primals_5 = self.V.bias
primals_4 = self.W_a.weight
primals_7 = self.W_a.bias
primals_1 = input_0
primals_6 = input_1
primals_9 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0], output[1]
|
watchernyu/MatchLSTM-Analyze-Adversarial-Training
|
BoundaryDecoderAttention
| false
| 16,709
|
[
"MIT"
] | 50
|
00bd33d3dd22d5291dc2c1ec5feef5eb93b59b3a
|
https://github.com/watchernyu/MatchLSTM-Analyze-Adversarial-Training/tree/00bd33d3dd22d5291dc2c1ec5feef5eb93b59b3a
|
VertexConv
|
import torch
from torch import nn
class Transform(nn.Module):
"""
A Vertex Transformation module
Permutation invariant transformation: (N, k, d) -> (N, k, d)
"""
def __init__(self, dim_in, k):
"""
:param dim_in: input feature dimension
:param k: k neighbors
"""
super().__init__()
self.convKK = nn.Conv1d(k, k * k, dim_in, groups=k)
self.activation = nn.Softmax(dim=-1)
self.dp = nn.Dropout()
def forward(self, region_feats):
"""
:param region_feats: (N, k, d)
:return: (N, k, d)
"""
N, k, _ = region_feats.size()
conved = self.convKK(region_feats)
multiplier = conved.view(N, k, k)
multiplier = self.activation(multiplier)
transformed_feats = torch.matmul(multiplier, region_feats)
return transformed_feats
class VertexConv(nn.Module):
"""
A Vertex Convolution layer
Transform (N, k, d) feature to (N, d) feature by transform matrix and 1-D convolution
"""
def __init__(self, dim_in, k):
"""
:param dim_in: input feature dimension
:param k: k neighbors
"""
super().__init__()
self.trans = Transform(dim_in, k)
self.convK1 = nn.Conv1d(k, 1, 1)
def forward(self, region_feats):
"""
:param region_feats: (N, k, d)
:return: (N, d)
"""
transformed_feats = self.trans(region_feats)
pooled_feats = self.convK1(transformed_feats)
pooled_feats = pooled_feats.squeeze(1)
return pooled_feats
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dim_in': 4, 'k': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (16, 1, 4), (4, 4, 1))
assert_size_stride(primals_3, (16,), (1,))
assert_size_stride(primals_4, (1, 4, 1), (4, 1, 1))
assert_size_stride(primals_5, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=4, bias=None)
assert_size_stride(buf0, (4, 16, 1), (16, 1, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(64)](buf1, primals_3, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(64)](buf2, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf4 = buf2
del buf2
extern_kernels.bmm(buf3, primals_1, out=buf4)
del buf3
buf5 = extern_kernels.convolution(buf4, primals_4, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf5, (4, 1, 4), (4, 4, 1))
buf6 = buf5
del buf5
triton_poi_fused_convolution_3[grid(16)](buf6, primals_5, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_5
return reinterpret_tensor(buf6, (4, 4), (4, 1), 0
), primals_1, primals_2, primals_4, buf1, buf4
class Transform(nn.Module):
"""
A Vertex Transformation module
Permutation invariant transformation: (N, k, d) -> (N, k, d)
"""
def __init__(self, dim_in, k):
"""
:param dim_in: input feature dimension
:param k: k neighbors
"""
super().__init__()
self.convKK = nn.Conv1d(k, k * k, dim_in, groups=k)
self.activation = nn.Softmax(dim=-1)
self.dp = nn.Dropout()
def forward(self, region_feats):
"""
:param region_feats: (N, k, d)
:return: (N, k, d)
"""
N, k, _ = region_feats.size()
conved = self.convKK(region_feats)
multiplier = conved.view(N, k, k)
multiplier = self.activation(multiplier)
transformed_feats = torch.matmul(multiplier, region_feats)
return transformed_feats
class VertexConvNew(nn.Module):
"""
A Vertex Convolution layer
Transform (N, k, d) feature to (N, d) feature by transform matrix and 1-D convolution
"""
def __init__(self, dim_in, k):
"""
:param dim_in: input feature dimension
:param k: k neighbors
"""
super().__init__()
self.trans = Transform(dim_in, k)
self.convK1 = nn.Conv1d(k, 1, 1)
def forward(self, input_0):
primals_2 = self.trans.convKK.weight
primals_3 = self.trans.convKK.bias
primals_4 = self.convK1.weight
primals_5 = self.convK1.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
weiyx15/DHGNN
|
VertexConv
| false
| 16,710
|
[
"MIT"
] | 124
|
870a1763c34af6ee9a7a3207fed4a5e6bdb95d23
|
https://github.com/weiyx15/DHGNN/tree/870a1763c34af6ee9a7a3207fed4a5e6bdb95d23
|
Transform
|
import torch
from torch import nn
class Transform(nn.Module):
"""
A Vertex Transformation module
Permutation invariant transformation: (N, k, d) -> (N, k, d)
"""
def __init__(self, dim_in, k):
"""
:param dim_in: input feature dimension
:param k: k neighbors
"""
super().__init__()
self.convKK = nn.Conv1d(k, k * k, dim_in, groups=k)
self.activation = nn.Softmax(dim=-1)
self.dp = nn.Dropout()
def forward(self, region_feats):
"""
:param region_feats: (N, k, d)
:return: (N, k, d)
"""
N, k, _ = region_feats.size()
conved = self.convKK(region_feats)
multiplier = conved.view(N, k, k)
multiplier = self.activation(multiplier)
transformed_feats = torch.matmul(multiplier, region_feats)
return transformed_feats
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dim_in': 4, 'k': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (16, 1, 4), (4, 4, 1))
assert_size_stride(primals_3, (16,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=4, bias=None)
assert_size_stride(buf0, (4, 16, 1), (16, 1, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(64)](buf1, primals_3, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(64)](buf2, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf4 = buf2
del buf2
extern_kernels.bmm(buf3, primals_1, out=buf4)
del buf3
return buf4, primals_1, primals_2, buf1
class TransformNew(nn.Module):
"""
A Vertex Transformation module
Permutation invariant transformation: (N, k, d) -> (N, k, d)
"""
def __init__(self, dim_in, k):
"""
:param dim_in: input feature dimension
:param k: k neighbors
"""
super().__init__()
self.convKK = nn.Conv1d(k, k * k, dim_in, groups=k)
self.activation = nn.Softmax(dim=-1)
self.dp = nn.Dropout()
def forward(self, input_0):
primals_2 = self.convKK.weight
primals_3 = self.convKK.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
weiyx15/DHGNN
|
Transform
| false
| 16,711
|
[
"MIT"
] | 124
|
870a1763c34af6ee9a7a3207fed4a5e6bdb95d23
|
https://github.com/weiyx15/DHGNN/tree/870a1763c34af6ee9a7a3207fed4a5e6bdb95d23
|
ScaledDotProductAttention
|
import torch
def masked_softmax(x, m=None, dim=-1):
"""
Softmax with mask (optional)
"""
x = torch.clamp(x, min=-15.0, max=15.0)
if m is not None:
m = m.float()
x = x * m
e_x = torch.exp(x - torch.max(x, dim=dim, keepdim=True)[0])
if m is not None:
e_x = e_x * m
softmax = e_x / (torch.sum(e_x, dim=dim, keepdim=True) + 1e-06)
return softmax
class ScaledDotProductAttention(torch.nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, temperature, dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = torch.nn.Dropout(dropout)
def forward(self, q, k, v, mask):
attn = torch.bmm(q, k.transpose(1, 2))
attn = attn / self.temperature
attn = masked_softmax(attn, mask, 2)
attn = self.dropout(attn)
output = torch.bmm(attn, v)
return output, attn
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4,
4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'temperature': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clamp_div_exp_max_mul_sub_sum_0(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp23 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp27 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 0.25
tmp2 = tmp0 * tmp1
tmp3 = -15.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 15.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp8 = tmp6 * tmp7
tmp10 = tmp9 * tmp1
tmp11 = triton_helpers.maximum(tmp10, tmp3)
tmp12 = triton_helpers.minimum(tmp11, tmp5)
tmp14 = tmp12 * tmp13
tmp15 = triton_helpers.maximum(tmp8, tmp14)
tmp17 = tmp16 * tmp1
tmp18 = triton_helpers.maximum(tmp17, tmp3)
tmp19 = triton_helpers.minimum(tmp18, tmp5)
tmp21 = tmp19 * tmp20
tmp22 = triton_helpers.maximum(tmp15, tmp21)
tmp24 = tmp23 * tmp1
tmp25 = triton_helpers.maximum(tmp24, tmp3)
tmp26 = triton_helpers.minimum(tmp25, tmp5)
tmp28 = tmp26 * tmp27
tmp29 = triton_helpers.maximum(tmp22, tmp28)
tmp30 = tmp8 - tmp29
tmp31 = tl_math.exp(tmp30)
tmp32 = tmp31 * tmp7
tmp33 = tmp14 - tmp29
tmp34 = tl_math.exp(tmp33)
tmp35 = tmp34 * tmp13
tmp36 = tmp32 + tmp35
tmp37 = tmp21 - tmp29
tmp38 = tl_math.exp(tmp37)
tmp39 = tmp38 * tmp20
tmp40 = tmp36 + tmp39
tmp41 = tmp28 - tmp29
tmp42 = tl_math.exp(tmp41)
tmp43 = tmp42 * tmp27
tmp44 = tmp40 + tmp43
tl.store(out_ptr0 + x0, tmp29, xmask)
tl.store(out_ptr1 + x0, tmp44, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_exp_max_mul_sub_1(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp7 = tl.load(in_ptr0 + x2, xmask)
tmp9 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp1 = 0.25
tmp2 = tmp0 * tmp1
tmp3 = -15.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 15.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp8 = tmp6 * tmp7
tmp10 = tmp8 - tmp9
tmp11 = tl_math.exp(tmp10)
tmp12 = tmp11 * tmp7
tmp14 = 1e-06
tmp15 = tmp13 + tmp14
tmp16 = tmp12 / tmp15
tl.store(in_out_ptr0 + x2, tmp16, xmask)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), (
16, 1, 4), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_div_exp_max_mul_sub_sum_0[grid(16)](buf0,
arg2_1, buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf3 = buf0
del buf0
triton_poi_fused_add_clamp_div_exp_max_mul_sub_1[grid(64)](buf3,
arg2_1, buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg2_1
del buf1
del buf2
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf3, arg3_1, out=buf4)
del arg3_1
return buf4, buf3
def masked_softmax(x, m=None, dim=-1):
"""
Softmax with mask (optional)
"""
x = torch.clamp(x, min=-15.0, max=15.0)
if m is not None:
m = m.float()
x = x * m
e_x = torch.exp(x - torch.max(x, dim=dim, keepdim=True)[0])
if m is not None:
e_x = e_x * m
softmax = e_x / (torch.sum(e_x, dim=dim, keepdim=True) + 1e-06)
return softmax
class ScaledDotProductAttentionNew(torch.nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, temperature, dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = torch.nn.Dropout(dropout)
def forward(self, input_0, input_1, input_2, input_3):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0], output[1]
|
wjurayj/commonsense-rl
|
ScaledDotProductAttention
| false
| 16,712
|
[
"Apache-2.0"
] | 55
|
fbbe4fa4a21865095783845fce2f0c4f4346e40f
|
https://github.com/wjurayj/commonsense-rl/tree/fbbe4fa4a21865095783845fce2f0c4f4346e40f
|
Attention
|
import torch
import torch.nn as nn
class Attention(nn.Module):
""" Applies attention mechanism on the `context` using the `query`.
**Thank you** to IBM for their initial implementation of :class:`Attention`. Here is
their `License
<https://github.com/IBM/pytorch-seq2seq/blob/master/LICENSE>`__.
Args:
dimensions (int): Dimensionality of the query and context.
attention_type (str, optional): How to compute the attention score:
* dot: :math:`score(H_j,q) = H_j^T q`
* general: :math:`score(H_j, q) = H_j^T W_a q`
Example:
>>> attention = Attention(256)
>>> query = torch.randn(5, 1, 256)
>>> context = torch.randn(5, 5, 256)
>>> output, weights = attention(query, context)
>>> output.size()
torch.Size([5, 1, 256])
>>> weights.size()
torch.Size([5, 1, 5])
"""
def __init__(self, query_dim, context_dim, attention_type='general'):
super(Attention, self).__init__()
if attention_type not in ['dot', 'general']:
raise ValueError('Invalid attention type selected.')
self.attention_type = attention_type
if self.attention_type == 'general':
self.linear_in = nn.Linear(query_dim, query_dim, bias=False)
if query_dim != context_dim:
self.linear_proj = nn.Linear(query_dim, context_dim, bias=False)
self.linear_out = nn.Linear(context_dim * 2, context_dim, bias=False)
self.softmax = nn.Softmax(dim=-1)
self.tanh = nn.Tanh()
def forward(self, query, context):
"""
Args:
query (:class:`torch.FloatTensor` [batch size, output length, dimensions]): Sequence of
queries to query the context.
context (:class:`torch.FloatTensor` [batch size, query length, dimensions]): Data
overwhich to apply the attention mechanism.
Returns:
:class:`tuple` with `output` and `weights`:
* **output** (:class:`torch.LongTensor` [batch size, output length, dimensions]):
Tensor containing the attended features.
* **weights** (:class:`torch.FloatTensor` [batch size, output length, query length]):
Tensor containing attention weights.
"""
batch_size, output_len, query_dim = query.size()
batch_size, query_len, context_dim = context.size()
if self.attention_type == 'general':
query = query.reshape(batch_size * output_len, query_dim)
query = self.linear_in(query)
query = query.reshape(batch_size, output_len, query_dim)
if query_dim != context_dim:
query = self.linear_proj(query)
attention_scores = torch.bmm(query, context.transpose(1, 2).
contiguous())
attention_scores = attention_scores.view(batch_size * output_len,
query_len)
attention_weights = self.softmax(attention_scores)
attention_weights = attention_weights.view(batch_size, output_len,
query_len)
mix = torch.bmm(attention_weights, context)
combined = torch.cat((mix, query), dim=2)
combined = combined.view(batch_size * output_len, 2 * context_dim)
output = self.linear_out(combined).view(batch_size, output_len,
context_dim)
output = self.tanh(output)
attention_weights = attention_weights.mean(dim=1)
return output, attention_weights
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'query_dim': 4, 'context_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_transpose_0(in_ptr0, out_ptr0, out_ptr1, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
y2 = yindex % 4
y3 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x1 + 4 * y0), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
tl.store(out_ptr1 + (y2 + 4 * x1 + 16 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_tanh_4(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
@triton.jit
def triton_poi_fused_mean_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 8), (8, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0)
del primals_3
buf1 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
buf10 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_transpose_0[grid(16, 4)](primals_2, buf1,
buf10, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1),
0), buf1, out=buf2)
buf3 = reinterpret_tensor(buf1, (16, 4), (4, 1), 0)
del buf1
triton_poi_fused__softmax_1[grid(64)](buf2, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(64)](buf3, buf4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf5 = reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0)
del buf3
extern_kernels.bmm(reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1),
0), primals_2, out=buf5)
buf6 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
triton_poi_fused_cat_3[grid(128)](buf5, buf0, buf6, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del buf0
buf7 = reinterpret_tensor(buf5, (16, 4), (4, 1), 0)
del buf5
extern_kernels.mm(reinterpret_tensor(buf6, (16, 8), (8, 1), 0),
reinterpret_tensor(primals_4, (8, 4), (1, 8), 0), out=buf7)
buf8 = reinterpret_tensor(buf7, (4, 4, 4), (16, 4, 1), 0)
del buf7
triton_poi_fused_tanh_4[grid(64)](buf8, 64, XBLOCK=64, num_warps=1,
num_stages=1)
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_mean_5[grid(16)](buf4, buf9, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf4
return buf8, buf9, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_2, (4, 4, 4), (16, 1, 4), 0
), buf2, reinterpret_tensor(buf6, (16, 8), (8, 1), 0
), buf8, primals_4, buf10
class AttentionNew(nn.Module):
""" Applies attention mechanism on the `context` using the `query`.
**Thank you** to IBM for their initial implementation of :class:`Attention`. Here is
their `License
<https://github.com/IBM/pytorch-seq2seq/blob/master/LICENSE>`__.
Args:
dimensions (int): Dimensionality of the query and context.
attention_type (str, optional): How to compute the attention score:
* dot: :math:`score(H_j,q) = H_j^T q`
* general: :math:`score(H_j, q) = H_j^T W_a q`
Example:
>>> attention = Attention(256)
>>> query = torch.randn(5, 1, 256)
>>> context = torch.randn(5, 5, 256)
>>> output, weights = attention(query, context)
>>> output.size()
torch.Size([5, 1, 256])
>>> weights.size()
torch.Size([5, 1, 5])
"""
def __init__(self, query_dim, context_dim, attention_type='general'):
super(AttentionNew, self).__init__()
if attention_type not in ['dot', 'general']:
raise ValueError('Invalid attention type selected.')
self.attention_type = attention_type
if self.attention_type == 'general':
self.linear_in = nn.Linear(query_dim, query_dim, bias=False)
if query_dim != context_dim:
self.linear_proj = nn.Linear(query_dim, context_dim, bias=False)
self.linear_out = nn.Linear(context_dim * 2, context_dim, bias=False)
self.softmax = nn.Softmax(dim=-1)
self.tanh = nn.Tanh()
def forward(self, input_0, input_1):
primals_3 = self.linear_in.weight
primals_4 = self.linear_out.weight
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0], output[1]
|
wjurayj/commonsense-rl
|
Attention
| false
| 16,713
|
[
"Apache-2.0"
] | 55
|
fbbe4fa4a21865095783845fce2f0c4f4346e40f
|
https://github.com/wjurayj/commonsense-rl/tree/fbbe4fa4a21865095783845fce2f0c4f4346e40f
|
bottleneck_block
|
import torch
import torch.nn as nn
import torch.utils.data
class depthwise_conv(nn.Module):
def __init__(self, kernel_size=3, stride=1, padding=1):
super(depthwise_conv, self).__init__()
self.depthwise = nn.Conv2d(1, 1, kernel_size=kernel_size, stride=
stride, padding=padding)
def forward(self, x):
C, H, W = x.shape[1:]
x = x.reshape(-1, 1, H, W)
x = self.depthwise(x)
x = x.view(-1, C, H, W)
return x
class bottleneck_block(nn.Module):
def __init__(self, kernel_size=3, stride=1, padding=1, activation='relu'):
super(bottleneck_block, self).__init__()
self.depthwise = depthwise_conv(kernel_size=3, stride=1, padding=1)
if activation == 'relu':
self.activation = nn.ReLU()
elif activation == 'lrelu':
self.activation = nn.LeakyReLU()
elif activation == 'tanh':
self.activation = nn.Tanh()
def forward(self, x, act=True):
sum_layer = x.max(dim=1, keepdim=True)[0]
x = self.depthwise(x)
x = x + sum_layer
if act:
x = self.activation(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_max_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp0 + tmp2
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp10 = triton_helpers.maximum(tmp8, tmp9)
tmp11 = tmp3 + tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tmp14 = 0.0
tmp15 = tmp13 <= tmp14
tl.store(in_out_ptr0 + x3, tmp13, xmask)
tl.store(out_ptr0 + x3, tmp15, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (16,
1, 4, 4), (16, 16, 4, 1), 0), primals_2, stride=(1, 1), padding
=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0,
0), groups=1, bias=None)
assert_size_stride(buf0, (16, 1, 4, 4), (16, 16, 4, 1))
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_add_max_relu_threshold_backward_0[grid(256)](buf1,
primals_3, primals_1, buf2, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_3
return buf1, primals_2, reinterpret_tensor(primals_1, (16, 1, 4, 4), (
16, 16, 4, 1), 0), buf2
class depthwise_conv(nn.Module):
def __init__(self, kernel_size=3, stride=1, padding=1):
super(depthwise_conv, self).__init__()
self.depthwise = nn.Conv2d(1, 1, kernel_size=kernel_size, stride=
stride, padding=padding)
def forward(self, x):
C, H, W = x.shape[1:]
x = x.reshape(-1, 1, H, W)
x = self.depthwise(x)
x = x.view(-1, C, H, W)
return x
class bottleneck_blockNew(nn.Module):
def __init__(self, kernel_size=3, stride=1, padding=1, activation='relu'):
super(bottleneck_blockNew, self).__init__()
self.depthwise = depthwise_conv(kernel_size=3, stride=1, padding=1)
if activation == 'relu':
self.activation = nn.ReLU()
elif activation == 'lrelu':
self.activation = nn.LeakyReLU()
elif activation == 'tanh':
self.activation = nn.Tanh()
def forward(self, input_0):
primals_2 = self.depthwise.depthwise.weight
primals_3 = self.depthwise.depthwise.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
whiteking64/lang-seg
|
bottleneck_block
| false
| 16,714
|
[
"MIT"
] | 202
|
9d063b126f1b64e38ddb20cc75fc74435bfdcbd3
|
https://github.com/whiteking64/lang-seg/tree/9d063b126f1b64e38ddb20cc75fc74435bfdcbd3
|
depthwise_conv
|
import torch
import torch.nn as nn
import torch.utils.data
class depthwise_conv(nn.Module):
def __init__(self, kernel_size=3, stride=1, padding=1):
super(depthwise_conv, self).__init__()
self.depthwise = nn.Conv2d(1, 1, kernel_size=kernel_size, stride=
stride, padding=padding)
def forward(self, x):
C, H, W = x.shape[1:]
x = x.reshape(-1, 1, H, W)
x = self.depthwise(x)
x = x.view(-1, C, H, W)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (16,
1, 4, 4), (16, 16, 4, 1), 0), primals_2, stride=(1, 1), padding
=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0,
0), groups=1, bias=None)
assert_size_stride(buf0, (16, 1, 4, 4), (16, 16, 4, 1))
buf1 = reinterpret_tensor(buf0, (16, 1, 4, 4), (16, 1, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(256)](buf1, primals_3, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_2, reinterpret_tensor(primals_1, (16, 1, 4, 4), (16, 16,
4, 1), 0)
class depthwise_convNew(nn.Module):
def __init__(self, kernel_size=3, stride=1, padding=1):
super(depthwise_convNew, self).__init__()
self.depthwise = nn.Conv2d(1, 1, kernel_size=kernel_size, stride=
stride, padding=padding)
def forward(self, input_0):
primals_2 = self.depthwise.weight
primals_3 = self.depthwise.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
whiteking64/lang-seg
|
depthwise_conv
| false
| 16,715
|
[
"MIT"
] | 202
|
9d063b126f1b64e38ddb20cc75fc74435bfdcbd3
|
https://github.com/whiteking64/lang-seg/tree/9d063b126f1b64e38ddb20cc75fc74435bfdcbd3
|
MNACLayer
|
import collections
import math
import torch
import torch.utils.data
def sparsity_error(W):
W_error = torch.min(torch.abs(W), torch.abs(1 - torch.abs(W)))
return torch.max(W_error)
def mnac(x, W, mode='prod'):
out_size, in_size = W.size()
x = x.view(x.size()[0], in_size, 1)
W = W.t().view(1, in_size, out_size)
if mode == 'prod':
return torch.prod(x * W + 1 - W, -2)
elif mode == 'exp-log':
return torch.exp(torch.sum(torch.log(x * W + 1 - W), -2))
elif mode == 'no-idendity':
return torch.prod(x * W, -2)
else:
raise ValueError(f'mnac mode "{mode}" is not implemented')
class SummaryWriterNamespaceNoLoggingScope:
def __init__(self, writer):
self._writer = writer
def __enter__(self):
self._writer._logging_enabled = False
def __exit__(self, type, value, traceback):
self._writer._logging_enabled = True
return False
class DummySummaryWriter:
def __init__(self, **kwargs):
self._logging_enabled = False
pass
def add_scalar(self, name, value, verbose_only=True):
pass
def add_summary(self, name, tensor, verbose_only=True):
pass
def add_histogram(self, name, tensor, verbose_only=True):
pass
def add_tensor(self, name, tensor, verbose_only=True):
pass
def print(self, name, tensor, verbose_only=True):
pass
def namespace(self, name):
return self
def every(self, epoch_interval):
return self
def verbose(self, verbose):
return self
def no_logging(self):
return SummaryWriterNamespaceNoLoggingScope(self)
class NoRandomScope:
def __init__(self, module):
self._module = module
def __enter__(self):
self._module._disable_random()
def __exit__(self, type, value, traceback):
self._module._enable_random()
return False
class ExtendedTorchModule(torch.nn.Module):
def __init__(self, default_name, *args, writer=None, name=None, **kwargs):
super().__init__()
if writer is None:
writer = DummySummaryWriter()
self.writer = writer.namespace(default_name if name is None else name)
self.allow_random = True
def set_parameter(self, name, value):
parameter = getattr(self, name, None)
if isinstance(parameter, torch.nn.Parameter):
parameter.fill_(value)
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module.set_parameter(name, value)
def regualizer(self, merge_in=None):
regualizers = collections.defaultdict(int)
if merge_in is not None:
for key, value in merge_in.items():
self.writer.add_scalar(f'regualizer/{key}', value)
regualizers[key] += value
for module in self.children():
if isinstance(module, ExtendedTorchModule):
for key, value in module.regualizer().items():
regualizers[key] += value
return regualizers
def optimize(self, loss):
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module.optimize(loss)
def log_gradients(self):
for name, parameter in self.named_parameters(recurse=False):
if parameter.requires_grad:
gradient, *_ = parameter.grad.data
self.writer.add_summary(f'{name}/grad', gradient)
self.writer.add_histogram(f'{name}/grad', gradient)
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module.log_gradients()
def no_internal_logging(self):
return self.writer.no_logging()
def _disable_random(self):
self.allow_random = False
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module._disable_random()
def _enable_random(self):
self.allow_random = True
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module._enable_random()
def no_random(self):
return NoRandomScope(self)
class MNACLayer(ExtendedTorchModule):
"""Implements the NAC (Neural Accumulator)
Arguments:
in_features: number of ingoing features
out_features: number of outgoing features
"""
def __init__(self, in_features, out_features, **kwargs):
super().__init__('nac', **kwargs)
self.in_features = in_features
self.out_features = out_features
self.W_hat = torch.nn.Parameter(torch.Tensor(out_features, in_features)
)
self.register_parameter('bias', None)
def reset_parameters(self):
std = math.sqrt(0.25)
r = math.sqrt(3.0) * std
torch.nn.init.uniform_(self.W_hat, -r, r)
def forward(self, x, reuse=False):
W = torch.sigmoid(self.W_hat)
self.writer.add_histogram('W', W)
self.writer.add_tensor('W', W)
self.writer.add_scalar('W/sparsity_error', sparsity_error(W),
verbose_only=False)
return mnac(x, W)
def extra_repr(self):
return 'in_features={}, out_features={}'.format(self.in_features,
self.out_features)
def get_inputs():
return [torch.rand([4, 4, 1])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import collections
import math
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex % 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp1 = tl.load(in_ptr1 + x3, xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tmp4 = 1.0
tmp5 = tmp3 + tmp4
tmp6 = tmp5 - tmp2
tl.store(out_ptr0 + x4, tmp6, xmask)
@triton.jit
def triton_poi_fused_prod_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_sub_0[grid(64)](primals_2, primals_1, buf0,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_prod_1[grid(16)](buf0, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
return buf1, primals_1, primals_2, buf0
def sparsity_error(W):
W_error = torch.min(torch.abs(W), torch.abs(1 - torch.abs(W)))
return torch.max(W_error)
def mnac(x, W, mode='prod'):
out_size, in_size = W.size()
x = x.view(x.size()[0], in_size, 1)
W = W.t().view(1, in_size, out_size)
if mode == 'prod':
return torch.prod(x * W + 1 - W, -2)
elif mode == 'exp-log':
return torch.exp(torch.sum(torch.log(x * W + 1 - W), -2))
elif mode == 'no-idendity':
return torch.prod(x * W, -2)
else:
raise ValueError(f'mnac mode "{mode}" is not implemented')
class SummaryWriterNamespaceNoLoggingScope:
def __init__(self, writer):
self._writer = writer
def __enter__(self):
self._writer._logging_enabled = False
def __exit__(self, type, value, traceback):
self._writer._logging_enabled = True
return False
class DummySummaryWriter:
def __init__(self, **kwargs):
self._logging_enabled = False
pass
def add_scalar(self, name, value, verbose_only=True):
pass
def add_summary(self, name, tensor, verbose_only=True):
pass
def add_histogram(self, name, tensor, verbose_only=True):
pass
def add_tensor(self, name, tensor, verbose_only=True):
pass
def print(self, name, tensor, verbose_only=True):
pass
def namespace(self, name):
return self
def every(self, epoch_interval):
return self
def verbose(self, verbose):
return self
def no_logging(self):
return SummaryWriterNamespaceNoLoggingScope(self)
class NoRandomScope:
def __init__(self, module):
self._module = module
def __enter__(self):
self._module._disable_random()
def __exit__(self, type, value, traceback):
self._module._enable_random()
return False
class ExtendedTorchModule(torch.nn.Module):
def __init__(self, default_name, *args, writer=None, name=None, **kwargs):
super().__init__()
if writer is None:
writer = DummySummaryWriter()
self.writer = writer.namespace(default_name if name is None else name)
self.allow_random = True
def set_parameter(self, name, value):
parameter = getattr(self, name, None)
if isinstance(parameter, torch.nn.Parameter):
parameter.fill_(value)
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module.set_parameter(name, value)
def regualizer(self, merge_in=None):
regualizers = collections.defaultdict(int)
if merge_in is not None:
for key, value in merge_in.items():
self.writer.add_scalar(f'regualizer/{key}', value)
regualizers[key] += value
for module in self.children():
if isinstance(module, ExtendedTorchModule):
for key, value in module.regualizer().items():
regualizers[key] += value
return regualizers
def optimize(self, loss):
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module.optimize(loss)
def log_gradients(self):
for name, parameter in self.named_parameters(recurse=False):
if parameter.requires_grad:
gradient, *_ = parameter.grad.data
self.writer.add_summary(f'{name}/grad', gradient)
self.writer.add_histogram(f'{name}/grad', gradient)
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module.log_gradients()
def no_internal_logging(self):
return self.writer.no_logging()
def _disable_random(self):
self.allow_random = False
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module._disable_random()
def _enable_random(self):
self.allow_random = True
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module._enable_random()
def no_random(self):
return NoRandomScope(self)
class MNACLayerNew(ExtendedTorchModule):
"""Implements the NAC (Neural Accumulator)
Arguments:
in_features: number of ingoing features
out_features: number of outgoing features
"""
def __init__(self, in_features, out_features, **kwargs):
super().__init__('nac', **kwargs)
self.in_features = in_features
self.out_features = out_features
self.W_hat = torch.nn.Parameter(torch.Tensor(out_features, in_features)
)
self.register_parameter('bias', None)
def reset_parameters(self):
std = math.sqrt(0.25)
r = math.sqrt(3.0) * std
torch.nn.init.uniform_(self.W_hat, -r, r)
def extra_repr(self):
return 'in_features={}, out_features={}'.format(self.in_features,
self.out_features)
def forward(self, input_0):
primals_1 = self.W_hat
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
wlm2019/Neural-Arithmetic-Units
|
MNACLayer
| false
| 16,716
|
[
"MIT"
] | 147
|
f9de9d004bb2dc2ee28577cd1760d0a00c185836
|
https://github.com/wlm2019/Neural-Arithmetic-Units/tree/f9de9d004bb2dc2ee28577cd1760d0a00c185836
|
GaussLinearStandardized
|
from torch.nn import Module
import math
import torch
from torch.nn.modules import Module
from torch.nn.parameter import Parameter
import torch.nn.functional as F
class GaussLinearStandardized(Module):
def __init__(self, in_features, out_features, bias=True,
raw_weight_variance=1.0, raw_bias_variance=1.0):
super(GaussLinearStandardized, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.raw_weight_variance = raw_weight_variance
self.raw_bias_variance = raw_bias_variance
self.epsilon_weight = Parameter(torch.Tensor(out_features, in_features)
)
if bias:
self.epsilon_bias = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('epsilon_bias', None)
self.reset_parameters()
def reset_parameters(self):
self.epsilon_weight.data.normal_()
if self.epsilon_bias is not None:
self.epsilon_bias.data.normal_()
def forward(self, input):
stdv = 1.0 / math.sqrt(self.in_features)
weight = self.epsilon_weight * stdv * math.sqrt(self.
raw_weight_variance)
if self.epsilon_bias is not None:
bias = self.epsilon_bias * math.sqrt(self.raw_bias_variance)
else:
bias = None
return F.linear(input, weight, bias)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.nn import Module
from torch.nn.modules import Module
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](primals_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused_mul_1[grid(4)](primals_2, buf1, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(buf1, reinterpret_tensor(primals_3, (64, 4), (
4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1,
beta=1, out=buf2)
del buf0
del buf1
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0)
class GaussLinearStandardizedNew(Module):
def __init__(self, in_features, out_features, bias=True,
raw_weight_variance=1.0, raw_bias_variance=1.0):
super(GaussLinearStandardizedNew, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.raw_weight_variance = raw_weight_variance
self.raw_bias_variance = raw_bias_variance
self.epsilon_weight = Parameter(torch.Tensor(out_features, in_features)
)
if bias:
self.epsilon_bias = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('epsilon_bias', None)
self.reset_parameters()
def reset_parameters(self):
self.epsilon_weight.data.normal_()
if self.epsilon_bias is not None:
self.epsilon_bias.data.normal_()
def forward(self, input_0):
primals_1 = self.epsilon_weight
primals_2 = self.epsilon_bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
widedeepnetworks/widedeepnetworks
|
GaussLinearStandardized
| false
| 16,717
|
[
"Apache-2.0"
] | 50
|
81a8629d62d31643f3d598992ac6376a8fc5c48a
|
https://github.com/widedeepnetworks/widedeepnetworks/tree/81a8629d62d31643f3d598992ac6376a8fc5c48a
|
PosNACLayer
|
import collections
import torch
import torch.utils.data
def sparsity_error(W):
W_error = torch.min(torch.abs(W), torch.abs(1 - torch.abs(W)))
return torch.max(W_error)
class SummaryWriterNamespaceNoLoggingScope:
def __init__(self, writer):
self._writer = writer
def __enter__(self):
self._writer._logging_enabled = False
def __exit__(self, type, value, traceback):
self._writer._logging_enabled = True
return False
class DummySummaryWriter:
def __init__(self, **kwargs):
self._logging_enabled = False
pass
def add_scalar(self, name, value, verbose_only=True):
pass
def add_summary(self, name, tensor, verbose_only=True):
pass
def add_histogram(self, name, tensor, verbose_only=True):
pass
def add_tensor(self, name, tensor, verbose_only=True):
pass
def print(self, name, tensor, verbose_only=True):
pass
def namespace(self, name):
return self
def every(self, epoch_interval):
return self
def verbose(self, verbose):
return self
def no_logging(self):
return SummaryWriterNamespaceNoLoggingScope(self)
class NoRandomScope:
def __init__(self, module):
self._module = module
def __enter__(self):
self._module._disable_random()
def __exit__(self, type, value, traceback):
self._module._enable_random()
return False
class ExtendedTorchModule(torch.nn.Module):
def __init__(self, default_name, *args, writer=None, name=None, **kwargs):
super().__init__()
if writer is None:
writer = DummySummaryWriter()
self.writer = writer.namespace(default_name if name is None else name)
self.allow_random = True
def set_parameter(self, name, value):
parameter = getattr(self, name, None)
if isinstance(parameter, torch.nn.Parameter):
parameter.fill_(value)
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module.set_parameter(name, value)
def regualizer(self, merge_in=None):
regualizers = collections.defaultdict(int)
if merge_in is not None:
for key, value in merge_in.items():
self.writer.add_scalar(f'regualizer/{key}', value)
regualizers[key] += value
for module in self.children():
if isinstance(module, ExtendedTorchModule):
for key, value in module.regualizer().items():
regualizers[key] += value
return regualizers
def optimize(self, loss):
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module.optimize(loss)
def log_gradients(self):
for name, parameter in self.named_parameters(recurse=False):
if parameter.requires_grad:
gradient, *_ = parameter.grad.data
self.writer.add_summary(f'{name}/grad', gradient)
self.writer.add_histogram(f'{name}/grad', gradient)
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module.log_gradients()
def no_internal_logging(self):
return self.writer.no_logging()
def _disable_random(self):
self.allow_random = False
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module._disable_random()
def _enable_random(self):
self.allow_random = True
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module._enable_random()
def no_random(self):
return NoRandomScope(self)
class PosNACLayer(ExtendedTorchModule):
"""Implements the NAC (Neural Accumulator)
Arguments:
in_features: number of ingoing features
out_features: number of outgoing features
"""
def __init__(self, in_features, out_features, **kwargs):
super().__init__('nac', **kwargs)
self.in_features = in_features
self.out_features = out_features
self.W_hat = torch.nn.Parameter(torch.Tensor(out_features, in_features)
)
self.register_parameter('bias', None)
def reset_parameters(self):
torch.nn.init.xavier_normal_(self.W_hat)
def forward(self, input, reuse=False):
W = torch.sigmoid(self.W_hat)
self.writer.add_histogram('W', W)
self.writer.add_tensor('W', W)
self.writer.add_scalar('W/sparsity_error', sparsity_error(W),
verbose_only=False)
return torch.nn.functional.linear(input, W, self.bias)
def extra_repr(self):
return 'in_features={}, out_features={}'.format(self.in_features,
self.out_features)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import collections
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(out_ptr0 + x0, tmp1, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_sigmoid_0[grid(16)](primals_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(buf0, (4, 4), (1, 4), 0), out=buf1)
del buf0
return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_1, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0)
def sparsity_error(W):
W_error = torch.min(torch.abs(W), torch.abs(1 - torch.abs(W)))
return torch.max(W_error)
class SummaryWriterNamespaceNoLoggingScope:
def __init__(self, writer):
self._writer = writer
def __enter__(self):
self._writer._logging_enabled = False
def __exit__(self, type, value, traceback):
self._writer._logging_enabled = True
return False
class DummySummaryWriter:
def __init__(self, **kwargs):
self._logging_enabled = False
pass
def add_scalar(self, name, value, verbose_only=True):
pass
def add_summary(self, name, tensor, verbose_only=True):
pass
def add_histogram(self, name, tensor, verbose_only=True):
pass
def add_tensor(self, name, tensor, verbose_only=True):
pass
def print(self, name, tensor, verbose_only=True):
pass
def namespace(self, name):
return self
def every(self, epoch_interval):
return self
def verbose(self, verbose):
return self
def no_logging(self):
return SummaryWriterNamespaceNoLoggingScope(self)
class NoRandomScope:
def __init__(self, module):
self._module = module
def __enter__(self):
self._module._disable_random()
def __exit__(self, type, value, traceback):
self._module._enable_random()
return False
class ExtendedTorchModule(torch.nn.Module):
def __init__(self, default_name, *args, writer=None, name=None, **kwargs):
super().__init__()
if writer is None:
writer = DummySummaryWriter()
self.writer = writer.namespace(default_name if name is None else name)
self.allow_random = True
def set_parameter(self, name, value):
parameter = getattr(self, name, None)
if isinstance(parameter, torch.nn.Parameter):
parameter.fill_(value)
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module.set_parameter(name, value)
def regualizer(self, merge_in=None):
regualizers = collections.defaultdict(int)
if merge_in is not None:
for key, value in merge_in.items():
self.writer.add_scalar(f'regualizer/{key}', value)
regualizers[key] += value
for module in self.children():
if isinstance(module, ExtendedTorchModule):
for key, value in module.regualizer().items():
regualizers[key] += value
return regualizers
def optimize(self, loss):
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module.optimize(loss)
def log_gradients(self):
for name, parameter in self.named_parameters(recurse=False):
if parameter.requires_grad:
gradient, *_ = parameter.grad.data
self.writer.add_summary(f'{name}/grad', gradient)
self.writer.add_histogram(f'{name}/grad', gradient)
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module.log_gradients()
def no_internal_logging(self):
return self.writer.no_logging()
def _disable_random(self):
self.allow_random = False
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module._disable_random()
def _enable_random(self):
self.allow_random = True
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module._enable_random()
def no_random(self):
return NoRandomScope(self)
class PosNACLayerNew(ExtendedTorchModule):
"""Implements the NAC (Neural Accumulator)
Arguments:
in_features: number of ingoing features
out_features: number of outgoing features
"""
def __init__(self, in_features, out_features, **kwargs):
super().__init__('nac', **kwargs)
self.in_features = in_features
self.out_features = out_features
self.W_hat = torch.nn.Parameter(torch.Tensor(out_features, in_features)
)
self.register_parameter('bias', None)
def reset_parameters(self):
torch.nn.init.xavier_normal_(self.W_hat)
def extra_repr(self):
return 'in_features={}, out_features={}'.format(self.in_features,
self.out_features)
def forward(self, input_0):
primals_1 = self.W_hat
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
wlm2019/Neural-Arithmetic-Units
|
PosNACLayer
| false
| 16,718
|
[
"MIT"
] | 147
|
f9de9d004bb2dc2ee28577cd1760d0a00c185836
|
https://github.com/wlm2019/Neural-Arithmetic-Units/tree/f9de9d004bb2dc2ee28577cd1760d0a00c185836
|
GumbelMNACLayer
|
import collections
import torch
import torch.utils.data
def mnac(x, W, mode='prod'):
out_size, in_size = W.size()
x = x.view(x.size()[0], in_size, 1)
W = W.t().view(1, in_size, out_size)
if mode == 'prod':
return torch.prod(x * W + 1 - W, -2)
elif mode == 'exp-log':
return torch.exp(torch.sum(torch.log(x * W + 1 - W), -2))
elif mode == 'no-idendity':
return torch.prod(x * W, -2)
else:
raise ValueError(f'mnac mode "{mode}" is not implemented')
class SummaryWriterNamespaceNoLoggingScope:
def __init__(self, writer):
self._writer = writer
def __enter__(self):
self._writer._logging_enabled = False
def __exit__(self, type, value, traceback):
self._writer._logging_enabled = True
return False
class DummySummaryWriter:
def __init__(self, **kwargs):
self._logging_enabled = False
pass
def add_scalar(self, name, value, verbose_only=True):
pass
def add_summary(self, name, tensor, verbose_only=True):
pass
def add_histogram(self, name, tensor, verbose_only=True):
pass
def add_tensor(self, name, tensor, verbose_only=True):
pass
def print(self, name, tensor, verbose_only=True):
pass
def namespace(self, name):
return self
def every(self, epoch_interval):
return self
def verbose(self, verbose):
return self
def no_logging(self):
return SummaryWriterNamespaceNoLoggingScope(self)
class NoRandomScope:
def __init__(self, module):
self._module = module
def __enter__(self):
self._module._disable_random()
def __exit__(self, type, value, traceback):
self._module._enable_random()
return False
class ExtendedTorchModule(torch.nn.Module):
def __init__(self, default_name, *args, writer=None, name=None, **kwargs):
super().__init__()
if writer is None:
writer = DummySummaryWriter()
self.writer = writer.namespace(default_name if name is None else name)
self.allow_random = True
def set_parameter(self, name, value):
parameter = getattr(self, name, None)
if isinstance(parameter, torch.nn.Parameter):
parameter.fill_(value)
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module.set_parameter(name, value)
def regualizer(self, merge_in=None):
regualizers = collections.defaultdict(int)
if merge_in is not None:
for key, value in merge_in.items():
self.writer.add_scalar(f'regualizer/{key}', value)
regualizers[key] += value
for module in self.children():
if isinstance(module, ExtendedTorchModule):
for key, value in module.regualizer().items():
regualizers[key] += value
return regualizers
def optimize(self, loss):
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module.optimize(loss)
def log_gradients(self):
for name, parameter in self.named_parameters(recurse=False):
if parameter.requires_grad:
gradient, *_ = parameter.grad.data
self.writer.add_summary(f'{name}/grad', gradient)
self.writer.add_histogram(f'{name}/grad', gradient)
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module.log_gradients()
def no_internal_logging(self):
return self.writer.no_logging()
def _disable_random(self):
self.allow_random = False
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module._disable_random()
def _enable_random(self):
self.allow_random = True
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module._enable_random()
def no_random(self):
return NoRandomScope(self)
class GumbelMNACLayer(ExtendedTorchModule):
"""Implements the NAC (Neural Accumulator)
Arguments:
in_features: number of ingoing features
out_features: number of outgoing features
"""
def __init__(self, in_features, out_features, **kwargs):
super().__init__('nac', **kwargs)
self.in_features = in_features
self.out_features = out_features
self.tau = torch.nn.Parameter(torch.tensor(1, dtype=torch.float32),
requires_grad=False)
self.register_buffer('target_weights', torch.tensor([1, -1, 0],
dtype=torch.float32))
self.U = torch.Tensor(out_features, in_features, 3)
self.W_hat = torch.nn.Parameter(torch.Tensor(out_features, in_features)
)
self.register_parameter('bias', None)
def reset_parameters(self):
torch.nn.init.constant_(self.W_hat, 0)
torch.nn.init.constant_(self.tau, 1)
def forward(self, x, reuse=False):
if self.allow_random:
gumbel = -torch.log(1e-08 - torch.log(torch.rand(self.
out_features, self.in_features, device=x.device) + 1e-08))
W = torch.sigmoid((self.W_hat + gumbel) / self.tau)
else:
W = torch.sigmoid(self.W_hat)
expected_W = torch.sigmoid(self.W_hat)
self.writer.add_histogram('W', expected_W)
self.writer.add_tensor('W', expected_W, verbose_only=False)
return mnac(x, W)
def extra_repr(self):
return 'in_features={}, out_features={}'.format(self.in_features,
self.out_features)
def get_inputs():
return [torch.rand([4, 4, 1])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch import device
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import collections
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_sub_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex % 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp1 = tl.load(in_ptr1 + x3, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + 0)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp3 = 1e-08
tmp4 = tmp2 + tmp3
tmp5 = tl_math.log(tmp4)
tmp6 = tmp3 - tmp5
tmp7 = tl_math.log(tmp6)
tmp8 = -tmp7
tmp9 = tmp1 + tmp8
tmp12 = tmp9 / tmp11
tmp13 = tl.sigmoid(tmp12)
tmp14 = tmp0 * tmp13
tmp15 = 1.0
tmp16 = tmp14 + tmp15
tmp17 = tmp16 - tmp13
tl.store(out_ptr0 + x4, tmp17, xmask)
@triton.jit
def triton_poi_fused_prod_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tl.store(out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_div_log_neg_rsub_sigmoid_sigmoid_backward_2(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_out_ptr0 + x0, xmask)
tmp9 = tl.load(in_ptr1 + 0)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK])
tmp2 = 1e-08
tmp3 = tmp1 + tmp2
tmp4 = tl_math.log(tmp3)
tmp5 = tmp2 - tmp4
tmp6 = tl_math.log(tmp5)
tmp7 = -tmp6
tmp8 = tmp0 + tmp7
tmp11 = tmp8 / tmp10
tmp12 = tl.sigmoid(tmp11)
tmp13 = 1.0
tmp14 = tmp13 - tmp12
tmp15 = tmp12 * tmp14
tl.store(in_out_ptr0 + x0, tmp15, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (), ())
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = torch.ops.aten.rand.default([4, 4], device=device(type=
'cuda', index=0), pin_memory=False)
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_sub_0[grid(64)](primals_1, primals_2, buf1,
primals_3, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_prod_1[grid(16)](buf2, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf4 = buf1
del buf1
triton_poi_fused_add_div_log_neg_rsub_sigmoid_sigmoid_backward_2[grid
(16)](buf4, primals_2, primals_3, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del primals_2
return buf3, primals_1, primals_3, buf2, buf4
def mnac(x, W, mode='prod'):
out_size, in_size = W.size()
x = x.view(x.size()[0], in_size, 1)
W = W.t().view(1, in_size, out_size)
if mode == 'prod':
return torch.prod(x * W + 1 - W, -2)
elif mode == 'exp-log':
return torch.exp(torch.sum(torch.log(x * W + 1 - W), -2))
elif mode == 'no-idendity':
return torch.prod(x * W, -2)
else:
raise ValueError(f'mnac mode "{mode}" is not implemented')
class SummaryWriterNamespaceNoLoggingScope:
def __init__(self, writer):
self._writer = writer
def __enter__(self):
self._writer._logging_enabled = False
def __exit__(self, type, value, traceback):
self._writer._logging_enabled = True
return False
class DummySummaryWriter:
def __init__(self, **kwargs):
self._logging_enabled = False
pass
def add_scalar(self, name, value, verbose_only=True):
pass
def add_summary(self, name, tensor, verbose_only=True):
pass
def add_histogram(self, name, tensor, verbose_only=True):
pass
def add_tensor(self, name, tensor, verbose_only=True):
pass
def print(self, name, tensor, verbose_only=True):
pass
def namespace(self, name):
return self
def every(self, epoch_interval):
return self
def verbose(self, verbose):
return self
def no_logging(self):
return SummaryWriterNamespaceNoLoggingScope(self)
class NoRandomScope:
def __init__(self, module):
self._module = module
def __enter__(self):
self._module._disable_random()
def __exit__(self, type, value, traceback):
self._module._enable_random()
return False
class ExtendedTorchModule(torch.nn.Module):
def __init__(self, default_name, *args, writer=None, name=None, **kwargs):
super().__init__()
if writer is None:
writer = DummySummaryWriter()
self.writer = writer.namespace(default_name if name is None else name)
self.allow_random = True
def set_parameter(self, name, value):
parameter = getattr(self, name, None)
if isinstance(parameter, torch.nn.Parameter):
parameter.fill_(value)
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module.set_parameter(name, value)
def regualizer(self, merge_in=None):
regualizers = collections.defaultdict(int)
if merge_in is not None:
for key, value in merge_in.items():
self.writer.add_scalar(f'regualizer/{key}', value)
regualizers[key] += value
for module in self.children():
if isinstance(module, ExtendedTorchModule):
for key, value in module.regualizer().items():
regualizers[key] += value
return regualizers
def optimize(self, loss):
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module.optimize(loss)
def log_gradients(self):
for name, parameter in self.named_parameters(recurse=False):
if parameter.requires_grad:
gradient, *_ = parameter.grad.data
self.writer.add_summary(f'{name}/grad', gradient)
self.writer.add_histogram(f'{name}/grad', gradient)
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module.log_gradients()
def no_internal_logging(self):
return self.writer.no_logging()
def _disable_random(self):
self.allow_random = False
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module._disable_random()
def _enable_random(self):
self.allow_random = True
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module._enable_random()
def no_random(self):
return NoRandomScope(self)
class GumbelMNACLayerNew(ExtendedTorchModule):
"""Implements the NAC (Neural Accumulator)
Arguments:
in_features: number of ingoing features
out_features: number of outgoing features
"""
def __init__(self, in_features, out_features, **kwargs):
super().__init__('nac', **kwargs)
self.in_features = in_features
self.out_features = out_features
self.tau = torch.nn.Parameter(torch.tensor(1, dtype=torch.float32),
requires_grad=False)
self.register_buffer('target_weights', torch.tensor([1, -1, 0],
dtype=torch.float32))
self.U = torch.Tensor(out_features, in_features, 3)
self.W_hat = torch.nn.Parameter(torch.Tensor(out_features, in_features)
)
self.register_parameter('bias', None)
def reset_parameters(self):
torch.nn.init.constant_(self.W_hat, 0)
torch.nn.init.constant_(self.tau, 1)
def extra_repr(self):
return 'in_features={}, out_features={}'.format(self.in_features,
self.out_features)
def forward(self, input_0):
primals_3 = self.tau
primals_2 = self.W_hat
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
wlm2019/Neural-Arithmetic-Units
|
GumbelMNACLayer
| false
| 16,719
|
[
"MIT"
] | 147
|
f9de9d004bb2dc2ee28577cd1760d0a00c185836
|
https://github.com/wlm2019/Neural-Arithmetic-Units/tree/f9de9d004bb2dc2ee28577cd1760d0a00c185836
|
ReRegualizedLinearMNACLayer
|
import collections
import math
import torch
import torch.utils.data
def sparsity_error(W):
W_error = torch.min(torch.abs(W), torch.abs(1 - torch.abs(W)))
return torch.max(W_error)
def mnac(x, W, mode='prod'):
out_size, in_size = W.size()
x = x.view(x.size()[0], in_size, 1)
W = W.t().view(1, in_size, out_size)
if mode == 'prod':
return torch.prod(x * W + 1 - W, -2)
elif mode == 'exp-log':
return torch.exp(torch.sum(torch.log(x * W + 1 - W), -2))
elif mode == 'no-idendity':
return torch.prod(x * W, -2)
else:
raise ValueError(f'mnac mode "{mode}" is not implemented')
class SummaryWriterNamespaceNoLoggingScope:
def __init__(self, writer):
self._writer = writer
def __enter__(self):
self._writer._logging_enabled = False
def __exit__(self, type, value, traceback):
self._writer._logging_enabled = True
return False
class DummySummaryWriter:
def __init__(self, **kwargs):
self._logging_enabled = False
pass
def add_scalar(self, name, value, verbose_only=True):
pass
def add_summary(self, name, tensor, verbose_only=True):
pass
def add_histogram(self, name, tensor, verbose_only=True):
pass
def add_tensor(self, name, tensor, verbose_only=True):
pass
def print(self, name, tensor, verbose_only=True):
pass
def namespace(self, name):
return self
def every(self, epoch_interval):
return self
def verbose(self, verbose):
return self
def no_logging(self):
return SummaryWriterNamespaceNoLoggingScope(self)
class NoRandomScope:
def __init__(self, module):
self._module = module
def __enter__(self):
self._module._disable_random()
def __exit__(self, type, value, traceback):
self._module._enable_random()
return False
class ExtendedTorchModule(torch.nn.Module):
def __init__(self, default_name, *args, writer=None, name=None, **kwargs):
super().__init__()
if writer is None:
writer = DummySummaryWriter()
self.writer = writer.namespace(default_name if name is None else name)
self.allow_random = True
def set_parameter(self, name, value):
parameter = getattr(self, name, None)
if isinstance(parameter, torch.nn.Parameter):
parameter.fill_(value)
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module.set_parameter(name, value)
def regualizer(self, merge_in=None):
regualizers = collections.defaultdict(int)
if merge_in is not None:
for key, value in merge_in.items():
self.writer.add_scalar(f'regualizer/{key}', value)
regualizers[key] += value
for module in self.children():
if isinstance(module, ExtendedTorchModule):
for key, value in module.regualizer().items():
regualizers[key] += value
return regualizers
def optimize(self, loss):
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module.optimize(loss)
def log_gradients(self):
for name, parameter in self.named_parameters(recurse=False):
if parameter.requires_grad:
gradient, *_ = parameter.grad.data
self.writer.add_summary(f'{name}/grad', gradient)
self.writer.add_histogram(f'{name}/grad', gradient)
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module.log_gradients()
def no_internal_logging(self):
return self.writer.no_logging()
def _disable_random(self):
self.allow_random = False
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module._disable_random()
def _enable_random(self):
self.allow_random = True
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module._enable_random()
def no_random(self):
return NoRandomScope(self)
class Regualizer:
def __init__(self, support='nac', type='bias', shape='squared', zero=
False, zero_epsilon=0):
super()
self.zero_epsilon = 0
if zero:
self.fn = self._zero
else:
identifier = '_'.join(['', support, type, shape])
self.fn = getattr(self, identifier)
def __call__(self, W):
return self.fn(W)
def _zero(self, W):
return 0
def _mnac_bias_linear(self, W):
return torch.mean(torch.min(torch.abs(W - self.zero_epsilon), torch
.abs(1 - W)))
def _mnac_bias_squared(self, W):
return torch.mean((W - self.zero_epsilon) ** 2 * (1 - W) ** 2)
def _mnac_oob_linear(self, W):
return torch.mean(torch.relu(torch.abs(W - 0.5 - self.zero_epsilon) -
0.5 + self.zero_epsilon))
def _mnac_oob_squared(self, W):
return torch.mean(torch.relu(torch.abs(W - 0.5 - self.zero_epsilon) -
0.5 + self.zero_epsilon) ** 2)
def _nac_bias_linear(self, W):
W_abs = torch.abs(W)
return torch.mean(torch.min(W_abs, torch.abs(1 - W_abs)))
def _nac_bias_squared(self, W):
return torch.mean(W ** 2 * (1 - torch.abs(W)) ** 2)
def _nac_oob_linear(self, W):
return torch.mean(torch.relu(torch.abs(W) - 1))
def _nac_oob_squared(self, W):
return torch.mean(torch.relu(torch.abs(W) - 1) ** 2)
class RegualizerNMUZ:
def __init__(self, zero=False):
self.zero = zero
self.stored_inputs = []
def __call__(self, W):
if self.zero:
return 0
x_mean = torch.mean(torch.cat(self.stored_inputs, dim=0), dim=0,
keepdim=True)
return torch.mean((1 - W) * (1 - x_mean) ** 2)
def append_input(self, x):
if self.zero:
return
self.stored_inputs.append(x)
def reset(self):
if self.zero:
return
self.stored_inputs = []
class ReRegualizedLinearMNACLayer(ExtendedTorchModule):
"""Implements the NAC (Neural Accumulator)
Arguments:
in_features: number of ingoing features
out_features: number of outgoing features
"""
def __init__(self, in_features, out_features, nac_oob='regualized',
regualizer_shape='squared', mnac_epsilon=0, mnac_normalized=False,
regualizer_z=0, **kwargs):
super().__init__('nac', **kwargs)
self.in_features = in_features
self.out_features = out_features
self.mnac_normalized = mnac_normalized
self.mnac_epsilon = mnac_epsilon
self.nac_oob = nac_oob
self._regualizer_bias = Regualizer(support='mnac', type='bias',
shape=regualizer_shape, zero_epsilon=mnac_epsilon)
self._regualizer_oob = Regualizer(support='mnac', type='oob', shape
=regualizer_shape, zero_epsilon=mnac_epsilon, zero=self.nac_oob ==
'clip')
self._regualizer_nmu_z = RegualizerNMUZ(zero=regualizer_z == 0)
self.W = torch.nn.Parameter(torch.Tensor(out_features, in_features))
self.register_parameter('bias', None)
def reset_parameters(self):
std = math.sqrt(0.25)
r = min(0.25, math.sqrt(3.0) * std)
torch.nn.init.uniform_(self.W, 0.5 - r, 0.5 + r)
self._regualizer_nmu_z.reset()
def optimize(self, loss):
self._regualizer_nmu_z.reset()
if self.nac_oob == 'clip':
self.W.data.clamp_(0.0 + self.mnac_epsilon, 1.0)
def regualizer(self):
return super().regualizer({'W': self._regualizer_bias(self.W), 'z':
self._regualizer_nmu_z(self.W), 'W-OOB': self._regualizer_oob(
self.W)})
def forward(self, x, reuse=False):
if self.allow_random:
self._regualizer_nmu_z.append_input(x)
W = torch.clamp(self.W, 0.0 + self.mnac_epsilon, 1.0
) if self.nac_oob == 'regualized' else self.W
self.writer.add_histogram('W', W)
self.writer.add_tensor('W', W)
self.writer.add_scalar('W/sparsity_error', sparsity_error(W),
verbose_only=False)
if self.mnac_normalized:
c = torch.std(x)
x_normalized = x / c
z_normalized = mnac(x_normalized, W, mode='prod')
out = z_normalized * c ** torch.sum(W, 1)
else:
out = mnac(x, W, mode='prod')
return out
def extra_repr(self):
return 'in_features={}, out_features={}'.format(self.in_features,
self.out_features)
def get_inputs():
return [torch.rand([4, 4, 1])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import collections
import math
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex % 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp1 = tl.load(in_ptr1 + x3, xmask, eviction_policy='evict_last')
tmp2 = 0.0
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = 1.0
tmp5 = triton_helpers.minimum(tmp3, tmp4)
tmp6 = tmp0 * tmp5
tmp7 = tmp6 + tmp4
tmp8 = tmp7 - tmp5
tl.store(out_ptr0 + x4, tmp8, xmask)
@triton.jit
def triton_poi_fused_prod_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tl.store(out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_ge_le_logical_and_2(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 >= tmp1
tmp3 = 1.0
tmp4 = tmp0 <= tmp3
tmp5 = tmp2 & tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_sub_0[grid(64)](primals_2, primals_1, buf0,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_prod_1[grid(16)](buf0, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_ge_le_logical_and_2[grid(16)](primals_1, buf2, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_1
return buf1, primals_2, buf0, buf2
def sparsity_error(W):
W_error = torch.min(torch.abs(W), torch.abs(1 - torch.abs(W)))
return torch.max(W_error)
def mnac(x, W, mode='prod'):
out_size, in_size = W.size()
x = x.view(x.size()[0], in_size, 1)
W = W.t().view(1, in_size, out_size)
if mode == 'prod':
return torch.prod(x * W + 1 - W, -2)
elif mode == 'exp-log':
return torch.exp(torch.sum(torch.log(x * W + 1 - W), -2))
elif mode == 'no-idendity':
return torch.prod(x * W, -2)
else:
raise ValueError(f'mnac mode "{mode}" is not implemented')
class SummaryWriterNamespaceNoLoggingScope:
def __init__(self, writer):
self._writer = writer
def __enter__(self):
self._writer._logging_enabled = False
def __exit__(self, type, value, traceback):
self._writer._logging_enabled = True
return False
class DummySummaryWriter:
def __init__(self, **kwargs):
self._logging_enabled = False
pass
def add_scalar(self, name, value, verbose_only=True):
pass
def add_summary(self, name, tensor, verbose_only=True):
pass
def add_histogram(self, name, tensor, verbose_only=True):
pass
def add_tensor(self, name, tensor, verbose_only=True):
pass
def print(self, name, tensor, verbose_only=True):
pass
def namespace(self, name):
return self
def every(self, epoch_interval):
return self
def verbose(self, verbose):
return self
def no_logging(self):
return SummaryWriterNamespaceNoLoggingScope(self)
class NoRandomScope:
def __init__(self, module):
self._module = module
def __enter__(self):
self._module._disable_random()
def __exit__(self, type, value, traceback):
self._module._enable_random()
return False
class ExtendedTorchModule(torch.nn.Module):
def __init__(self, default_name, *args, writer=None, name=None, **kwargs):
super().__init__()
if writer is None:
writer = DummySummaryWriter()
self.writer = writer.namespace(default_name if name is None else name)
self.allow_random = True
def set_parameter(self, name, value):
parameter = getattr(self, name, None)
if isinstance(parameter, torch.nn.Parameter):
parameter.fill_(value)
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module.set_parameter(name, value)
def regualizer(self, merge_in=None):
regualizers = collections.defaultdict(int)
if merge_in is not None:
for key, value in merge_in.items():
self.writer.add_scalar(f'regualizer/{key}', value)
regualizers[key] += value
for module in self.children():
if isinstance(module, ExtendedTorchModule):
for key, value in module.regualizer().items():
regualizers[key] += value
return regualizers
def optimize(self, loss):
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module.optimize(loss)
def log_gradients(self):
for name, parameter in self.named_parameters(recurse=False):
if parameter.requires_grad:
gradient, *_ = parameter.grad.data
self.writer.add_summary(f'{name}/grad', gradient)
self.writer.add_histogram(f'{name}/grad', gradient)
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module.log_gradients()
def no_internal_logging(self):
return self.writer.no_logging()
def _disable_random(self):
self.allow_random = False
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module._disable_random()
def _enable_random(self):
self.allow_random = True
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module._enable_random()
def no_random(self):
return NoRandomScope(self)
class Regualizer:
def __init__(self, support='nac', type='bias', shape='squared', zero=
False, zero_epsilon=0):
super()
self.zero_epsilon = 0
if zero:
self.fn = self._zero
else:
identifier = '_'.join(['', support, type, shape])
self.fn = getattr(self, identifier)
def __call__(self, W):
return self.fn(W)
def _zero(self, W):
return 0
def _mnac_bias_linear(self, W):
return torch.mean(torch.min(torch.abs(W - self.zero_epsilon), torch
.abs(1 - W)))
def _mnac_bias_squared(self, W):
return torch.mean((W - self.zero_epsilon) ** 2 * (1 - W) ** 2)
def _mnac_oob_linear(self, W):
return torch.mean(torch.relu(torch.abs(W - 0.5 - self.zero_epsilon) -
0.5 + self.zero_epsilon))
def _mnac_oob_squared(self, W):
return torch.mean(torch.relu(torch.abs(W - 0.5 - self.zero_epsilon) -
0.5 + self.zero_epsilon) ** 2)
def _nac_bias_linear(self, W):
W_abs = torch.abs(W)
return torch.mean(torch.min(W_abs, torch.abs(1 - W_abs)))
def _nac_bias_squared(self, W):
return torch.mean(W ** 2 * (1 - torch.abs(W)) ** 2)
def _nac_oob_linear(self, W):
return torch.mean(torch.relu(torch.abs(W) - 1))
def _nac_oob_squared(self, W):
return torch.mean(torch.relu(torch.abs(W) - 1) ** 2)
class RegualizerNMUZ:
def __init__(self, zero=False):
self.zero = zero
self.stored_inputs = []
def __call__(self, W):
if self.zero:
return 0
x_mean = torch.mean(torch.cat(self.stored_inputs, dim=0), dim=0,
keepdim=True)
return torch.mean((1 - W) * (1 - x_mean) ** 2)
def append_input(self, x):
if self.zero:
return
self.stored_inputs.append(x)
def reset(self):
if self.zero:
return
self.stored_inputs = []
class ReRegualizedLinearMNACLayerNew(ExtendedTorchModule):
"""Implements the NAC (Neural Accumulator)
Arguments:
in_features: number of ingoing features
out_features: number of outgoing features
"""
def __init__(self, in_features, out_features, nac_oob='regualized',
regualizer_shape='squared', mnac_epsilon=0, mnac_normalized=False,
regualizer_z=0, **kwargs):
super().__init__('nac', **kwargs)
self.in_features = in_features
self.out_features = out_features
self.mnac_normalized = mnac_normalized
self.mnac_epsilon = mnac_epsilon
self.nac_oob = nac_oob
self._regualizer_bias = Regualizer(support='mnac', type='bias',
shape=regualizer_shape, zero_epsilon=mnac_epsilon)
self._regualizer_oob = Regualizer(support='mnac', type='oob', shape
=regualizer_shape, zero_epsilon=mnac_epsilon, zero=self.nac_oob ==
'clip')
self._regualizer_nmu_z = RegualizerNMUZ(zero=regualizer_z == 0)
self.W = torch.nn.Parameter(torch.Tensor(out_features, in_features))
self.register_parameter('bias', None)
def reset_parameters(self):
std = math.sqrt(0.25)
r = min(0.25, math.sqrt(3.0) * std)
torch.nn.init.uniform_(self.W, 0.5 - r, 0.5 + r)
self._regualizer_nmu_z.reset()
def optimize(self, loss):
self._regualizer_nmu_z.reset()
if self.nac_oob == 'clip':
self.W.data.clamp_(0.0 + self.mnac_epsilon, 1.0)
def regualizer(self):
return super().regualizer({'W': self._regualizer_bias(self.W), 'z':
self._regualizer_nmu_z(self.W), 'W-OOB': self._regualizer_oob(
self.W)})
def extra_repr(self):
return 'in_features={}, out_features={}'.format(self.in_features,
self.out_features)
def forward(self, input_0):
primals_1 = self.W
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
wlm2019/Neural-Arithmetic-Units
|
ReRegualizedLinearMNACLayer
| false
| 16,720
|
[
"MIT"
] | 147
|
f9de9d004bb2dc2ee28577cd1760d0a00c185836
|
https://github.com/wlm2019/Neural-Arithmetic-Units/tree/f9de9d004bb2dc2ee28577cd1760d0a00c185836
|
Gain
|
import random
import torch
from torchaudio.transforms import Vol
class Gain(torch.nn.Module):
def __init__(self, min_gain: 'float'=-20.0, max_gain: 'float'=-1):
super().__init__()
self.min_gain = min_gain
self.max_gain = max_gain
def forward(self, audio: 'torch.Tensor') ->torch.Tensor:
gain = random.uniform(self.min_gain, self.max_gain)
audio = Vol(gain, gain_type='db')(audio)
return audio
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clamp_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.38624708435209876
tmp2 = tmp0 * tmp1
tmp3 = -1.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 1.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class GainNew(torch.nn.Module):
def __init__(self, min_gain: 'float'=-20.0, max_gain: 'float'=-1):
super().__init__()
self.min_gain = min_gain
self.max_gain = max_gain
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
wesbz/torchaudio-augmentations
|
Gain
| false
| 16,721
|
[
"MIT"
] | 112
|
e7b379be60376bb4a44f72a6840358871b3ff06d
|
https://github.com/wesbz/torchaudio-augmentations/tree/e7b379be60376bb4a44f72a6840358871b3ff06d
|
VisionLanguageFusionModule
|
import torch
from torch import Tensor
import torch.utils.data
import torch
from torch import nn
from typing import Optional
class VisionLanguageFusionModule(nn.Module):
def __init__(self, d_model, nhead, dropout=0.0):
super().__init__()
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout
=dropout)
def with_pos_embed(self, tensor, pos: 'Optional[Tensor]'):
return tensor if pos is None else tensor + pos
def forward(self, tgt, memory, memory_key_padding_mask:
'Optional[Tensor]'=None, pos: 'Optional[Tensor]'=None, query_pos:
'Optional[Tensor]'=None):
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos
), key=self.with_pos_embed(memory, pos), value=memory,
attn_mask=None, key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt * tgt2
return tgt
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'nhead': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import Tensor
import torch.utils.data
import torch
from torch import nn
from typing import Optional
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_mul_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (12, 4), (4, 1))
assert_size_stride(primals_4, (12,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_3, (4, 4),
(1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_4, (4,), (1,), 4),
primals_2, reinterpret_tensor(primals_3, (4, 4), (1, 4), 16),
alpha=1, beta=1, out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_4, (4,), (1,), 8),
primals_2, reinterpret_tensor(primals_3, (4, 4), (1, 4), 32),
alpha=1, beta=1, out=buf2)
del primals_3
buf3 = reinterpret_tensor(buf0, (4, 4, 1), (1, 4, 16), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](buf3, primals_4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_4
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf3, reinterpret_tensor(buf1, (4, 1, 4), (1, 1,
4), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(64)](buf4, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf6 = buf4
del buf4
triton_poi_fused__softmax_2[grid(64)](buf5, buf6, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf5
buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (4, 4, 1), (1, 4,
1), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
triton_poi_fused_clone_3[grid(4, 4)](buf7, buf8, 4, 4, XBLOCK=4,
YBLOCK=4, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf7, (4, 4), (4, 1), 0)
del buf7
extern_kernels.mm(reinterpret_tensor(buf8, (4, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf9)
buf10 = buf9
del buf9
triton_poi_fused_mul_4[grid(16)](buf10, primals_1, primals_6, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_6
return buf10, primals_1, primals_2, buf6, reinterpret_tensor(buf8, (4,
4), (4, 1), 0), primals_5, reinterpret_tensor(buf2, (4, 1, 4), (1,
1, 4), 0), reinterpret_tensor(buf3, (4, 1, 4), (1, 1, 4), 0
), reinterpret_tensor(buf1, (4, 4, 1), (1, 4, 1), 0)
class VisionLanguageFusionModuleNew(nn.Module):
def __init__(self, d_model, nhead, dropout=0.0):
super().__init__()
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout
=dropout)
def with_pos_embed(self, tensor, pos: 'Optional[Tensor]'):
return tensor if pos is None else tensor + pos
def forward(self, input_0, input_1):
primals_3 = self.multihead_attn.in_proj_weight
primals_4 = self.multihead_attn.in_proj_bias
primals_1 = self.multihead_attn.out_proj.weight
primals_6 = self.multihead_attn.out_proj.bias
primals_2 = input_0
primals_5 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
wjn922/ReferFormer
|
VisionLanguageFusionModule
| false
| 16,722
|
[
"Apache-2.0"
] | 125
|
17ca2d8024116068ecae66d0e7155e1d4429b204
|
https://github.com/wjn922/ReferFormer/tree/17ca2d8024116068ecae66d0e7155e1d4429b204
|
Task
|
import torch
import torch.nn
import torch.utils.data.distributed
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
class Task(nn.Module):
def __init__(self):
super().__init__()
self.p = nn.Parameter(torch.ones(2, 2))
def forward(self, x):
return self.p + x
def get_inputs():
return [torch.rand([4, 4, 2, 2])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn
import torch.utils.data.distributed
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (2, 2), (2, 1))
assert_size_stride(primals_2, (4, 4, 2, 2), (16, 4, 2, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(64)](primals_1, primals_2, buf0, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_1
del primals_2
return buf0,
class TaskNew(nn.Module):
def __init__(self):
super().__init__()
self.p = nn.Parameter(torch.ones(2, 2))
def forward(self, input_0):
primals_1 = self.p
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
woqidaideshi/bagua
|
Task
| false
| 16,723
|
[
"MIT"
] | 635
|
0ee96da598685748519d58d24ce983499cb36721
|
https://github.com/woqidaideshi/bagua/tree/0ee96da598685748519d58d24ce983499cb36721
|
ModuleForDdpCommHook
|
import torch
import torch.nn
import torch.utils.data.distributed
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
class Task(nn.Module):
def __init__(self):
super().__init__()
self.p = nn.Parameter(torch.ones(2, 2))
def forward(self, x):
return self.p + x
class ModuleForDdpCommHook(nn.Module):
def __init__(self):
super().__init__()
self.t0 = Task()
def forward(self, x, rank):
return self.t0(x + rank)
def get_inputs():
return [torch.rand([4, 4, 2, 2]), torch.rand([4, 4, 2, 2])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn
import torch.utils.data.distributed
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x2, xmask)
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 2, 2), (16, 4, 2, 1))
assert_size_stride(primals_2, (4, 4, 2, 2), (16, 4, 2, 1))
assert_size_stride(primals_3, (2, 2), (2, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(64)](primals_3, primals_1, primals_2,
buf0, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_1
del primals_2
del primals_3
return buf0,
class Task(nn.Module):
def __init__(self):
super().__init__()
self.p = nn.Parameter(torch.ones(2, 2))
def forward(self, x):
return self.p + x
class ModuleForDdpCommHookNew(nn.Module):
def __init__(self):
super().__init__()
self.t0 = Task()
def forward(self, input_0, input_1):
primals_3 = self.t0.p
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
|
woqidaideshi/bagua
|
ModuleForDdpCommHook
| false
| 16,724
|
[
"MIT"
] | 635
|
0ee96da598685748519d58d24ce983499cb36721
|
https://github.com/woqidaideshi/bagua/tree/0ee96da598685748519d58d24ce983499cb36721
|
ReRegualizedLinearPosNACLayer
|
import collections
import math
import torch
import torch.utils.data
def sparsity_error(W):
W_error = torch.min(torch.abs(W), torch.abs(1 - torch.abs(W)))
return torch.max(W_error)
class SummaryWriterNamespaceNoLoggingScope:
def __init__(self, writer):
self._writer = writer
def __enter__(self):
self._writer._logging_enabled = False
def __exit__(self, type, value, traceback):
self._writer._logging_enabled = True
return False
class DummySummaryWriter:
def __init__(self, **kwargs):
self._logging_enabled = False
pass
def add_scalar(self, name, value, verbose_only=True):
pass
def add_summary(self, name, tensor, verbose_only=True):
pass
def add_histogram(self, name, tensor, verbose_only=True):
pass
def add_tensor(self, name, tensor, verbose_only=True):
pass
def print(self, name, tensor, verbose_only=True):
pass
def namespace(self, name):
return self
def every(self, epoch_interval):
return self
def verbose(self, verbose):
return self
def no_logging(self):
return SummaryWriterNamespaceNoLoggingScope(self)
class NoRandomScope:
def __init__(self, module):
self._module = module
def __enter__(self):
self._module._disable_random()
def __exit__(self, type, value, traceback):
self._module._enable_random()
return False
class ExtendedTorchModule(torch.nn.Module):
def __init__(self, default_name, *args, writer=None, name=None, **kwargs):
super().__init__()
if writer is None:
writer = DummySummaryWriter()
self.writer = writer.namespace(default_name if name is None else name)
self.allow_random = True
def set_parameter(self, name, value):
parameter = getattr(self, name, None)
if isinstance(parameter, torch.nn.Parameter):
parameter.fill_(value)
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module.set_parameter(name, value)
def regualizer(self, merge_in=None):
regualizers = collections.defaultdict(int)
if merge_in is not None:
for key, value in merge_in.items():
self.writer.add_scalar(f'regualizer/{key}', value)
regualizers[key] += value
for module in self.children():
if isinstance(module, ExtendedTorchModule):
for key, value in module.regualizer().items():
regualizers[key] += value
return regualizers
def optimize(self, loss):
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module.optimize(loss)
def log_gradients(self):
for name, parameter in self.named_parameters(recurse=False):
if parameter.requires_grad:
gradient, *_ = parameter.grad.data
self.writer.add_summary(f'{name}/grad', gradient)
self.writer.add_histogram(f'{name}/grad', gradient)
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module.log_gradients()
def no_internal_logging(self):
return self.writer.no_logging()
def _disable_random(self):
self.allow_random = False
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module._disable_random()
def _enable_random(self):
self.allow_random = True
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module._enable_random()
def no_random(self):
return NoRandomScope(self)
class Regualizer:
def __init__(self, support='nac', type='bias', shape='squared', zero=
False, zero_epsilon=0):
super()
self.zero_epsilon = 0
if zero:
self.fn = self._zero
else:
identifier = '_'.join(['', support, type, shape])
self.fn = getattr(self, identifier)
def __call__(self, W):
return self.fn(W)
def _zero(self, W):
return 0
def _mnac_bias_linear(self, W):
return torch.mean(torch.min(torch.abs(W - self.zero_epsilon), torch
.abs(1 - W)))
def _mnac_bias_squared(self, W):
return torch.mean((W - self.zero_epsilon) ** 2 * (1 - W) ** 2)
def _mnac_oob_linear(self, W):
return torch.mean(torch.relu(torch.abs(W - 0.5 - self.zero_epsilon) -
0.5 + self.zero_epsilon))
def _mnac_oob_squared(self, W):
return torch.mean(torch.relu(torch.abs(W - 0.5 - self.zero_epsilon) -
0.5 + self.zero_epsilon) ** 2)
def _nac_bias_linear(self, W):
W_abs = torch.abs(W)
return torch.mean(torch.min(W_abs, torch.abs(1 - W_abs)))
def _nac_bias_squared(self, W):
return torch.mean(W ** 2 * (1 - torch.abs(W)) ** 2)
def _nac_oob_linear(self, W):
return torch.mean(torch.relu(torch.abs(W) - 1))
def _nac_oob_squared(self, W):
return torch.mean(torch.relu(torch.abs(W) - 1) ** 2)
class RegualizerNMUZ:
def __init__(self, zero=False):
self.zero = zero
self.stored_inputs = []
def __call__(self, W):
if self.zero:
return 0
x_mean = torch.mean(torch.cat(self.stored_inputs, dim=0), dim=0,
keepdim=True)
return torch.mean((1 - W) * (1 - x_mean) ** 2)
def append_input(self, x):
if self.zero:
return
self.stored_inputs.append(x)
def reset(self):
if self.zero:
return
self.stored_inputs = []
class ReRegualizedLinearPosNACLayer(ExtendedTorchModule):
"""Implements the NAC (Neural Accumulator)
Arguments:
in_features: number of ingoing features
out_features: number of outgoing features
"""
def __init__(self, in_features, out_features, nac_oob='regualized',
regualizer_shape='squared', mnac_epsilon=0, mnac_normalized=False,
regualizer_z=0, **kwargs):
super().__init__('nac', **kwargs)
self.in_features = in_features
self.out_features = out_features
self.mnac_normalized = mnac_normalized
self.mnac_epsilon = mnac_epsilon
self.nac_oob = nac_oob
self._regualizer_bias = Regualizer(support='mnac', type='bias',
shape=regualizer_shape, zero_epsilon=mnac_epsilon)
self._regualizer_oob = Regualizer(support='mnac', type='oob', shape
=regualizer_shape, zero_epsilon=mnac_epsilon, zero=self.nac_oob ==
'clip')
self._regualizer_nmu_z = RegualizerNMUZ(zero=regualizer_z == 0)
self.W = torch.nn.Parameter(torch.Tensor(out_features, in_features))
self.register_parameter('bias', None)
def reset_parameters(self):
std = math.sqrt(0.25)
r = min(0.25, math.sqrt(3.0) * std)
torch.nn.init.uniform_(self.W, 0.5 - r, 0.5 + r)
self._regualizer_nmu_z.reset()
def optimize(self, loss):
self._regualizer_nmu_z.reset()
if self.nac_oob == 'clip':
self.W.data.clamp_(0.0 + self.mnac_epsilon, 1.0)
def regualizer(self):
return super().regualizer({'W': self._regualizer_bias(self.W), 'z':
self._regualizer_nmu_z(self.W), 'W-OOB': self._regualizer_oob(
self.W)})
def forward(self, x, reuse=False):
if self.allow_random:
self._regualizer_nmu_z.append_input(x)
W = torch.clamp(self.W, 0.0 + self.mnac_epsilon, 1.0
) if self.nac_oob == 'regualized' else self.W
self.writer.add_histogram('W', W)
self.writer.add_tensor('W', W)
self.writer.add_scalar('W/sparsity_error', sparsity_error(W),
verbose_only=False)
return torch.nn.functional.linear(x, W, self.bias)
def extra_repr(self):
return 'in_features={}, out_features={}'.format(self.in_features,
self.out_features)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import collections
import math
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clamp_ge_le_logical_and_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 1.0
tmp4 = triton_helpers.minimum(tmp2, tmp3)
tmp5 = tmp0 >= tmp1
tmp6 = tmp0 <= tmp3
tmp7 = tmp5 & tmp6
tl.store(out_ptr0 + x0, tmp4, xmask)
tl.store(out_ptr1 + x0, tmp7, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_clamp_ge_le_logical_and_0[grid(16)](primals_1,
buf0, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(buf0, (4, 4), (1, 4), 0), out=buf1)
del buf0
return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), buf2
def sparsity_error(W):
W_error = torch.min(torch.abs(W), torch.abs(1 - torch.abs(W)))
return torch.max(W_error)
class SummaryWriterNamespaceNoLoggingScope:
def __init__(self, writer):
self._writer = writer
def __enter__(self):
self._writer._logging_enabled = False
def __exit__(self, type, value, traceback):
self._writer._logging_enabled = True
return False
class DummySummaryWriter:
def __init__(self, **kwargs):
self._logging_enabled = False
pass
def add_scalar(self, name, value, verbose_only=True):
pass
def add_summary(self, name, tensor, verbose_only=True):
pass
def add_histogram(self, name, tensor, verbose_only=True):
pass
def add_tensor(self, name, tensor, verbose_only=True):
pass
def print(self, name, tensor, verbose_only=True):
pass
def namespace(self, name):
return self
def every(self, epoch_interval):
return self
def verbose(self, verbose):
return self
def no_logging(self):
return SummaryWriterNamespaceNoLoggingScope(self)
class NoRandomScope:
def __init__(self, module):
self._module = module
def __enter__(self):
self._module._disable_random()
def __exit__(self, type, value, traceback):
self._module._enable_random()
return False
class ExtendedTorchModule(torch.nn.Module):
def __init__(self, default_name, *args, writer=None, name=None, **kwargs):
super().__init__()
if writer is None:
writer = DummySummaryWriter()
self.writer = writer.namespace(default_name if name is None else name)
self.allow_random = True
def set_parameter(self, name, value):
parameter = getattr(self, name, None)
if isinstance(parameter, torch.nn.Parameter):
parameter.fill_(value)
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module.set_parameter(name, value)
def regualizer(self, merge_in=None):
regualizers = collections.defaultdict(int)
if merge_in is not None:
for key, value in merge_in.items():
self.writer.add_scalar(f'regualizer/{key}', value)
regualizers[key] += value
for module in self.children():
if isinstance(module, ExtendedTorchModule):
for key, value in module.regualizer().items():
regualizers[key] += value
return regualizers
def optimize(self, loss):
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module.optimize(loss)
def log_gradients(self):
for name, parameter in self.named_parameters(recurse=False):
if parameter.requires_grad:
gradient, *_ = parameter.grad.data
self.writer.add_summary(f'{name}/grad', gradient)
self.writer.add_histogram(f'{name}/grad', gradient)
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module.log_gradients()
def no_internal_logging(self):
return self.writer.no_logging()
def _disable_random(self):
self.allow_random = False
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module._disable_random()
def _enable_random(self):
self.allow_random = True
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module._enable_random()
def no_random(self):
return NoRandomScope(self)
class Regualizer:
def __init__(self, support='nac', type='bias', shape='squared', zero=
False, zero_epsilon=0):
super()
self.zero_epsilon = 0
if zero:
self.fn = self._zero
else:
identifier = '_'.join(['', support, type, shape])
self.fn = getattr(self, identifier)
def __call__(self, W):
return self.fn(W)
def _zero(self, W):
return 0
def _mnac_bias_linear(self, W):
return torch.mean(torch.min(torch.abs(W - self.zero_epsilon), torch
.abs(1 - W)))
def _mnac_bias_squared(self, W):
return torch.mean((W - self.zero_epsilon) ** 2 * (1 - W) ** 2)
def _mnac_oob_linear(self, W):
return torch.mean(torch.relu(torch.abs(W - 0.5 - self.zero_epsilon) -
0.5 + self.zero_epsilon))
def _mnac_oob_squared(self, W):
return torch.mean(torch.relu(torch.abs(W - 0.5 - self.zero_epsilon) -
0.5 + self.zero_epsilon) ** 2)
def _nac_bias_linear(self, W):
W_abs = torch.abs(W)
return torch.mean(torch.min(W_abs, torch.abs(1 - W_abs)))
def _nac_bias_squared(self, W):
return torch.mean(W ** 2 * (1 - torch.abs(W)) ** 2)
def _nac_oob_linear(self, W):
return torch.mean(torch.relu(torch.abs(W) - 1))
def _nac_oob_squared(self, W):
return torch.mean(torch.relu(torch.abs(W) - 1) ** 2)
class RegualizerNMUZ:
def __init__(self, zero=False):
self.zero = zero
self.stored_inputs = []
def __call__(self, W):
if self.zero:
return 0
x_mean = torch.mean(torch.cat(self.stored_inputs, dim=0), dim=0,
keepdim=True)
return torch.mean((1 - W) * (1 - x_mean) ** 2)
def append_input(self, x):
if self.zero:
return
self.stored_inputs.append(x)
def reset(self):
if self.zero:
return
self.stored_inputs = []
class ReRegualizedLinearPosNACLayerNew(ExtendedTorchModule):
"""Implements the NAC (Neural Accumulator)
Arguments:
in_features: number of ingoing features
out_features: number of outgoing features
"""
def __init__(self, in_features, out_features, nac_oob='regualized',
regualizer_shape='squared', mnac_epsilon=0, mnac_normalized=False,
regualizer_z=0, **kwargs):
super().__init__('nac', **kwargs)
self.in_features = in_features
self.out_features = out_features
self.mnac_normalized = mnac_normalized
self.mnac_epsilon = mnac_epsilon
self.nac_oob = nac_oob
self._regualizer_bias = Regualizer(support='mnac', type='bias',
shape=regualizer_shape, zero_epsilon=mnac_epsilon)
self._regualizer_oob = Regualizer(support='mnac', type='oob', shape
=regualizer_shape, zero_epsilon=mnac_epsilon, zero=self.nac_oob ==
'clip')
self._regualizer_nmu_z = RegualizerNMUZ(zero=regualizer_z == 0)
self.W = torch.nn.Parameter(torch.Tensor(out_features, in_features))
self.register_parameter('bias', None)
def reset_parameters(self):
std = math.sqrt(0.25)
r = min(0.25, math.sqrt(3.0) * std)
torch.nn.init.uniform_(self.W, 0.5 - r, 0.5 + r)
self._regualizer_nmu_z.reset()
def optimize(self, loss):
self._regualizer_nmu_z.reset()
if self.nac_oob == 'clip':
self.W.data.clamp_(0.0 + self.mnac_epsilon, 1.0)
def regualizer(self):
return super().regualizer({'W': self._regualizer_bias(self.W), 'z':
self._regualizer_nmu_z(self.W), 'W-OOB': self._regualizer_oob(
self.W)})
def extra_repr(self):
return 'in_features={}, out_features={}'.format(self.in_features,
self.out_features)
def forward(self, input_0):
primals_1 = self.W
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
wlm2019/Neural-Arithmetic-Units
|
ReRegualizedLinearPosNACLayer
| false
| 16,725
|
[
"MIT"
] | 147
|
f9de9d004bb2dc2ee28577cd1760d0a00c185836
|
https://github.com/wlm2019/Neural-Arithmetic-Units/tree/f9de9d004bb2dc2ee28577cd1760d0a00c185836
|
ReRegualizedLinearNACLayer
|
import collections
import math
import torch
import torch.utils.data
def sparsity_error(W):
W_error = torch.min(torch.abs(W), torch.abs(1 - torch.abs(W)))
return torch.max(W_error)
class SummaryWriterNamespaceNoLoggingScope:
def __init__(self, writer):
self._writer = writer
def __enter__(self):
self._writer._logging_enabled = False
def __exit__(self, type, value, traceback):
self._writer._logging_enabled = True
return False
class DummySummaryWriter:
def __init__(self, **kwargs):
self._logging_enabled = False
pass
def add_scalar(self, name, value, verbose_only=True):
pass
def add_summary(self, name, tensor, verbose_only=True):
pass
def add_histogram(self, name, tensor, verbose_only=True):
pass
def add_tensor(self, name, tensor, verbose_only=True):
pass
def print(self, name, tensor, verbose_only=True):
pass
def namespace(self, name):
return self
def every(self, epoch_interval):
return self
def verbose(self, verbose):
return self
def no_logging(self):
return SummaryWriterNamespaceNoLoggingScope(self)
class NoRandomScope:
def __init__(self, module):
self._module = module
def __enter__(self):
self._module._disable_random()
def __exit__(self, type, value, traceback):
self._module._enable_random()
return False
class ExtendedTorchModule(torch.nn.Module):
def __init__(self, default_name, *args, writer=None, name=None, **kwargs):
super().__init__()
if writer is None:
writer = DummySummaryWriter()
self.writer = writer.namespace(default_name if name is None else name)
self.allow_random = True
def set_parameter(self, name, value):
parameter = getattr(self, name, None)
if isinstance(parameter, torch.nn.Parameter):
parameter.fill_(value)
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module.set_parameter(name, value)
def regualizer(self, merge_in=None):
regualizers = collections.defaultdict(int)
if merge_in is not None:
for key, value in merge_in.items():
self.writer.add_scalar(f'regualizer/{key}', value)
regualizers[key] += value
for module in self.children():
if isinstance(module, ExtendedTorchModule):
for key, value in module.regualizer().items():
regualizers[key] += value
return regualizers
def optimize(self, loss):
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module.optimize(loss)
def log_gradients(self):
for name, parameter in self.named_parameters(recurse=False):
if parameter.requires_grad:
gradient, *_ = parameter.grad.data
self.writer.add_summary(f'{name}/grad', gradient)
self.writer.add_histogram(f'{name}/grad', gradient)
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module.log_gradients()
def no_internal_logging(self):
return self.writer.no_logging()
def _disable_random(self):
self.allow_random = False
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module._disable_random()
def _enable_random(self):
self.allow_random = True
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module._enable_random()
def no_random(self):
return NoRandomScope(self)
class Regualizer:
def __init__(self, support='nac', type='bias', shape='squared', zero=
False, zero_epsilon=0):
super()
self.zero_epsilon = 0
if zero:
self.fn = self._zero
else:
identifier = '_'.join(['', support, type, shape])
self.fn = getattr(self, identifier)
def __call__(self, W):
return self.fn(W)
def _zero(self, W):
return 0
def _mnac_bias_linear(self, W):
return torch.mean(torch.min(torch.abs(W - self.zero_epsilon), torch
.abs(1 - W)))
def _mnac_bias_squared(self, W):
return torch.mean((W - self.zero_epsilon) ** 2 * (1 - W) ** 2)
def _mnac_oob_linear(self, W):
return torch.mean(torch.relu(torch.abs(W - 0.5 - self.zero_epsilon) -
0.5 + self.zero_epsilon))
def _mnac_oob_squared(self, W):
return torch.mean(torch.relu(torch.abs(W - 0.5 - self.zero_epsilon) -
0.5 + self.zero_epsilon) ** 2)
def _nac_bias_linear(self, W):
W_abs = torch.abs(W)
return torch.mean(torch.min(W_abs, torch.abs(1 - W_abs)))
def _nac_bias_squared(self, W):
return torch.mean(W ** 2 * (1 - torch.abs(W)) ** 2)
def _nac_oob_linear(self, W):
return torch.mean(torch.relu(torch.abs(W) - 1))
def _nac_oob_squared(self, W):
return torch.mean(torch.relu(torch.abs(W) - 1) ** 2)
class ReRegualizedLinearNACLayer(ExtendedTorchModule):
"""Implements the RegualizedLinearNAC
Arguments:
in_features: number of ingoing features
out_features: number of outgoing features
"""
def __init__(self, in_features, out_features, nac_oob='regualized',
regualizer_shape='squared', **kwargs):
super().__init__('nac', **kwargs)
self.in_features = in_features
self.out_features = out_features
self.nac_oob = nac_oob
self._regualizer_bias = Regualizer(support='nac', type='bias',
shape=regualizer_shape)
self._regualizer_oob = Regualizer(support='nac', type='oob', shape=
regualizer_shape, zero=self.nac_oob == 'clip')
self.W = torch.nn.Parameter(torch.Tensor(out_features, in_features))
self.register_parameter('bias', None)
def reset_parameters(self):
std = math.sqrt(2.0 / (self.in_features + self.out_features))
r = min(0.5, math.sqrt(3.0) * std)
torch.nn.init.uniform_(self.W, -r, r)
def optimize(self, loss):
if self.nac_oob == 'clip':
self.W.data.clamp_(-1.0, 1.0)
def regualizer(self):
return super().regualizer({'W': self._regualizer_bias(self.W),
'W-OOB': self._regualizer_oob(self.W)})
def forward(self, input, reuse=False):
W = torch.clamp(self.W, -1.0, 1.0)
self.writer.add_histogram('W', W)
self.writer.add_tensor('W', W)
self.writer.add_scalar('W/sparsity_error', sparsity_error(W),
verbose_only=False)
return torch.nn.functional.linear(input, W, self.bias)
def extra_repr(self):
return 'in_features={}, out_features={}'.format(self.in_features,
self.out_features)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import collections
import math
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clamp_ge_le_logical_and_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = -1.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 1.0
tmp4 = triton_helpers.minimum(tmp2, tmp3)
tmp5 = tmp0 >= tmp1
tmp6 = tmp0 <= tmp3
tmp7 = tmp5 & tmp6
tl.store(out_ptr0 + x0, tmp4, xmask)
tl.store(out_ptr1 + x0, tmp7, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_clamp_ge_le_logical_and_0[grid(16)](primals_1,
buf0, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(buf0, (4, 4), (1, 4), 0), out=buf1)
del buf0
return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), buf2
def sparsity_error(W):
W_error = torch.min(torch.abs(W), torch.abs(1 - torch.abs(W)))
return torch.max(W_error)
class SummaryWriterNamespaceNoLoggingScope:
def __init__(self, writer):
self._writer = writer
def __enter__(self):
self._writer._logging_enabled = False
def __exit__(self, type, value, traceback):
self._writer._logging_enabled = True
return False
class DummySummaryWriter:
def __init__(self, **kwargs):
self._logging_enabled = False
pass
def add_scalar(self, name, value, verbose_only=True):
pass
def add_summary(self, name, tensor, verbose_only=True):
pass
def add_histogram(self, name, tensor, verbose_only=True):
pass
def add_tensor(self, name, tensor, verbose_only=True):
pass
def print(self, name, tensor, verbose_only=True):
pass
def namespace(self, name):
return self
def every(self, epoch_interval):
return self
def verbose(self, verbose):
return self
def no_logging(self):
return SummaryWriterNamespaceNoLoggingScope(self)
class NoRandomScope:
def __init__(self, module):
self._module = module
def __enter__(self):
self._module._disable_random()
def __exit__(self, type, value, traceback):
self._module._enable_random()
return False
class ExtendedTorchModule(torch.nn.Module):
def __init__(self, default_name, *args, writer=None, name=None, **kwargs):
super().__init__()
if writer is None:
writer = DummySummaryWriter()
self.writer = writer.namespace(default_name if name is None else name)
self.allow_random = True
def set_parameter(self, name, value):
parameter = getattr(self, name, None)
if isinstance(parameter, torch.nn.Parameter):
parameter.fill_(value)
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module.set_parameter(name, value)
def regualizer(self, merge_in=None):
regualizers = collections.defaultdict(int)
if merge_in is not None:
for key, value in merge_in.items():
self.writer.add_scalar(f'regualizer/{key}', value)
regualizers[key] += value
for module in self.children():
if isinstance(module, ExtendedTorchModule):
for key, value in module.regualizer().items():
regualizers[key] += value
return regualizers
def optimize(self, loss):
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module.optimize(loss)
def log_gradients(self):
for name, parameter in self.named_parameters(recurse=False):
if parameter.requires_grad:
gradient, *_ = parameter.grad.data
self.writer.add_summary(f'{name}/grad', gradient)
self.writer.add_histogram(f'{name}/grad', gradient)
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module.log_gradients()
def no_internal_logging(self):
return self.writer.no_logging()
def _disable_random(self):
self.allow_random = False
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module._disable_random()
def _enable_random(self):
self.allow_random = True
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module._enable_random()
def no_random(self):
return NoRandomScope(self)
class Regualizer:
def __init__(self, support='nac', type='bias', shape='squared', zero=
False, zero_epsilon=0):
super()
self.zero_epsilon = 0
if zero:
self.fn = self._zero
else:
identifier = '_'.join(['', support, type, shape])
self.fn = getattr(self, identifier)
def __call__(self, W):
return self.fn(W)
def _zero(self, W):
return 0
def _mnac_bias_linear(self, W):
return torch.mean(torch.min(torch.abs(W - self.zero_epsilon), torch
.abs(1 - W)))
def _mnac_bias_squared(self, W):
return torch.mean((W - self.zero_epsilon) ** 2 * (1 - W) ** 2)
def _mnac_oob_linear(self, W):
return torch.mean(torch.relu(torch.abs(W - 0.5 - self.zero_epsilon) -
0.5 + self.zero_epsilon))
def _mnac_oob_squared(self, W):
return torch.mean(torch.relu(torch.abs(W - 0.5 - self.zero_epsilon) -
0.5 + self.zero_epsilon) ** 2)
def _nac_bias_linear(self, W):
W_abs = torch.abs(W)
return torch.mean(torch.min(W_abs, torch.abs(1 - W_abs)))
def _nac_bias_squared(self, W):
return torch.mean(W ** 2 * (1 - torch.abs(W)) ** 2)
def _nac_oob_linear(self, W):
return torch.mean(torch.relu(torch.abs(W) - 1))
def _nac_oob_squared(self, W):
return torch.mean(torch.relu(torch.abs(W) - 1) ** 2)
class ReRegualizedLinearNACLayerNew(ExtendedTorchModule):
"""Implements the RegualizedLinearNAC
Arguments:
in_features: number of ingoing features
out_features: number of outgoing features
"""
def __init__(self, in_features, out_features, nac_oob='regualized',
regualizer_shape='squared', **kwargs):
super().__init__('nac', **kwargs)
self.in_features = in_features
self.out_features = out_features
self.nac_oob = nac_oob
self._regualizer_bias = Regualizer(support='nac', type='bias',
shape=regualizer_shape)
self._regualizer_oob = Regualizer(support='nac', type='oob', shape=
regualizer_shape, zero=self.nac_oob == 'clip')
self.W = torch.nn.Parameter(torch.Tensor(out_features, in_features))
self.register_parameter('bias', None)
def reset_parameters(self):
std = math.sqrt(2.0 / (self.in_features + self.out_features))
r = min(0.5, math.sqrt(3.0) * std)
torch.nn.init.uniform_(self.W, -r, r)
def optimize(self, loss):
if self.nac_oob == 'clip':
self.W.data.clamp_(-1.0, 1.0)
def regualizer(self):
return super().regualizer({'W': self._regualizer_bias(self.W),
'W-OOB': self._regualizer_oob(self.W)})
def extra_repr(self):
return 'in_features={}, out_features={}'.format(self.in_features,
self.out_features)
def forward(self, input_0):
primals_1 = self.W
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
wlm2019/Neural-Arithmetic-Units
|
ReRegualizedLinearNACLayer
| false
| 16,726
|
[
"MIT"
] | 147
|
f9de9d004bb2dc2ee28577cd1760d0a00c185836
|
https://github.com/wlm2019/Neural-Arithmetic-Units/tree/f9de9d004bb2dc2ee28577cd1760d0a00c185836
|
GAT
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class GraphAttentionLayer(nn.Module):
"""
Simple GAT layer, similar to https://arxiv.org/abs/1710.10903
"""
def __init__(self, in_features, out_features, dropout, alpha, concat=True):
super(GraphAttentionLayer, self).__init__()
self.dropout = dropout
self.in_features = in_features
self.out_features = out_features
self.alpha = alpha
self.concat = concat
self.W = nn.Parameter(torch.zeros(size=(in_features, out_features)))
nn.init.xavier_uniform_(self.W.data, gain=1.414)
self.a = nn.Parameter(torch.zeros(size=(2 * out_features, 1)))
nn.init.xavier_uniform_(self.a.data, gain=1.414)
self.leakyrelu = nn.LeakyReLU(self.alpha)
def forward(self, input, adj):
h = torch.matmul(input, self.W)
N = h.size()[1]
batch_size = h.size(0)
a_input = torch.cat([h.repeat(1, 1, N).view(batch_size, N * N, -1),
h.repeat(1, N, 1)], dim=2).view(batch_size, N, -1, 2 * self.
out_features)
e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(3))
zero_vec = -9000000000000000.0 * torch.ones_like(e)
attention = torch.where(adj > 0, e, zero_vec)
attention = F.softmax(attention, dim=1)
attention = F.dropout(attention, self.dropout, training=self.training)
h_prime = torch.matmul(attention, h)
if self.concat:
return F.elu(h_prime)
else:
return h_prime
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
class GAT(nn.Module):
def __init__(self, nfeat, nhid, dropout, alpha, nheads):
super(GAT, self).__init__()
self.dropout = dropout
self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout,
alpha=alpha, concat=True) for _ in range(nheads)]
for i, attention in enumerate(self.attentions):
self.add_module('attention_{}'.format(i), attention)
def forward(self, x, adj):
x = F.dropout(x, self.dropout, training=self.training)
x = torch.cat([att(x, adj) for att in self.attentions], dim=1)
x = F.dropout(x, self.dropout, training=self.training)
return x
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'nfeat': 4, 'nhid': 4, 'dropout': 0.5, 'alpha': 4,
'nheads': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8 % 16
x2 = xindex // 128
x3 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * ((4 * x1 + x0) // 16 % 4) + 16 * ((4 * x1 +
64 * x2 + x0) // 64 % 4) + (4 * x1 + x0) % 16 % 4), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr0 + (4 * (x1 % 4) + 16 * x2 + (-4 + x0)), tmp6 &
xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_gt_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_leaky_relu_mul_where_3(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0,
out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask).to(tl.int1)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask).to(tl.int1)
tmp9 = tl.load(in_ptr1 + (16 + x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp10 = tl.load(in_ptr2 + (16 + x0), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask).to(tl.int1)
tmp16 = tl.load(in_ptr1 + (32 + x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp17 = tl.load(in_ptr2 + (32 + x0), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask).to(tl.int1)
tmp23 = tl.load(in_ptr1 + (48 + x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp24 = tl.load(in_ptr2 + (48 + x0), xmask, eviction_policy='evict_last')
tmp40 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp41 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp45 = tl.load(in_ptr3 + (16 + x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp46 = tl.load(in_ptr4 + (16 + x0), xmask, eviction_policy='evict_last')
tmp51 = tl.load(in_ptr3 + (32 + x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp52 = tl.load(in_ptr4 + (32 + x0), xmask, eviction_policy='evict_last')
tmp57 = tl.load(in_ptr3 + (48 + x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp58 = tl.load(in_ptr4 + (48 + x0), xmask, eviction_policy='evict_last')
tmp74 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp75 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last')
tmp79 = tl.load(in_ptr5 + (16 + x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp80 = tl.load(in_ptr6 + (16 + x0), xmask, eviction_policy='evict_last')
tmp85 = tl.load(in_ptr5 + (32 + x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp86 = tl.load(in_ptr6 + (32 + x0), xmask, eviction_policy='evict_last')
tmp91 = tl.load(in_ptr5 + (48 + x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp92 = tl.load(in_ptr6 + (48 + x0), xmask, eviction_policy='evict_last')
tmp108 = tl.load(in_ptr7 + x0, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp109 = tl.load(in_ptr8 + x0, xmask, eviction_policy='evict_last')
tmp113 = tl.load(in_ptr7 + (16 + x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp114 = tl.load(in_ptr8 + (16 + x0), xmask, eviction_policy='evict_last')
tmp119 = tl.load(in_ptr7 + (32 + x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp120 = tl.load(in_ptr8 + (32 + x0), xmask, eviction_policy='evict_last')
tmp125 = tl.load(in_ptr7 + (48 + x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp126 = tl.load(in_ptr8 + (48 + x0), xmask, eviction_policy='evict_last')
tmp3 = 4.0
tmp4 = tmp2 * tmp3
tmp5 = tl.where(tmp1, tmp2, tmp4)
tmp6 = -8999999815811072.0
tmp7 = tl.where(tmp0, tmp5, tmp6)
tmp11 = tmp10 * tmp3
tmp12 = tl.where(tmp9, tmp10, tmp11)
tmp13 = tl.where(tmp8, tmp12, tmp6)
tmp14 = triton_helpers.maximum(tmp7, tmp13)
tmp18 = tmp17 * tmp3
tmp19 = tl.where(tmp16, tmp17, tmp18)
tmp20 = tl.where(tmp15, tmp19, tmp6)
tmp21 = triton_helpers.maximum(tmp14, tmp20)
tmp25 = tmp24 * tmp3
tmp26 = tl.where(tmp23, tmp24, tmp25)
tmp27 = tl.where(tmp22, tmp26, tmp6)
tmp28 = triton_helpers.maximum(tmp21, tmp27)
tmp29 = tmp7 - tmp28
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp13 - tmp28
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp30 + tmp32
tmp34 = tmp20 - tmp28
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp33 + tmp35
tmp37 = tmp27 - tmp28
tmp38 = tl_math.exp(tmp37)
tmp39 = tmp36 + tmp38
tmp42 = tmp41 * tmp3
tmp43 = tl.where(tmp40, tmp41, tmp42)
tmp44 = tl.where(tmp0, tmp43, tmp6)
tmp47 = tmp46 * tmp3
tmp48 = tl.where(tmp45, tmp46, tmp47)
tmp49 = tl.where(tmp8, tmp48, tmp6)
tmp50 = triton_helpers.maximum(tmp44, tmp49)
tmp53 = tmp52 * tmp3
tmp54 = tl.where(tmp51, tmp52, tmp53)
tmp55 = tl.where(tmp15, tmp54, tmp6)
tmp56 = triton_helpers.maximum(tmp50, tmp55)
tmp59 = tmp58 * tmp3
tmp60 = tl.where(tmp57, tmp58, tmp59)
tmp61 = tl.where(tmp22, tmp60, tmp6)
tmp62 = triton_helpers.maximum(tmp56, tmp61)
tmp63 = tmp44 - tmp62
tmp64 = tl_math.exp(tmp63)
tmp65 = tmp49 - tmp62
tmp66 = tl_math.exp(tmp65)
tmp67 = tmp64 + tmp66
tmp68 = tmp55 - tmp62
tmp69 = tl_math.exp(tmp68)
tmp70 = tmp67 + tmp69
tmp71 = tmp61 - tmp62
tmp72 = tl_math.exp(tmp71)
tmp73 = tmp70 + tmp72
tmp76 = tmp75 * tmp3
tmp77 = tl.where(tmp74, tmp75, tmp76)
tmp78 = tl.where(tmp0, tmp77, tmp6)
tmp81 = tmp80 * tmp3
tmp82 = tl.where(tmp79, tmp80, tmp81)
tmp83 = tl.where(tmp8, tmp82, tmp6)
tmp84 = triton_helpers.maximum(tmp78, tmp83)
tmp87 = tmp86 * tmp3
tmp88 = tl.where(tmp85, tmp86, tmp87)
tmp89 = tl.where(tmp15, tmp88, tmp6)
tmp90 = triton_helpers.maximum(tmp84, tmp89)
tmp93 = tmp92 * tmp3
tmp94 = tl.where(tmp91, tmp92, tmp93)
tmp95 = tl.where(tmp22, tmp94, tmp6)
tmp96 = triton_helpers.maximum(tmp90, tmp95)
tmp97 = tmp78 - tmp96
tmp98 = tl_math.exp(tmp97)
tmp99 = tmp83 - tmp96
tmp100 = tl_math.exp(tmp99)
tmp101 = tmp98 + tmp100
tmp102 = tmp89 - tmp96
tmp103 = tl_math.exp(tmp102)
tmp104 = tmp101 + tmp103
tmp105 = tmp95 - tmp96
tmp106 = tl_math.exp(tmp105)
tmp107 = tmp104 + tmp106
tmp110 = tmp109 * tmp3
tmp111 = tl.where(tmp108, tmp109, tmp110)
tmp112 = tl.where(tmp0, tmp111, tmp6)
tmp115 = tmp114 * tmp3
tmp116 = tl.where(tmp113, tmp114, tmp115)
tmp117 = tl.where(tmp8, tmp116, tmp6)
tmp118 = triton_helpers.maximum(tmp112, tmp117)
tmp121 = tmp120 * tmp3
tmp122 = tl.where(tmp119, tmp120, tmp121)
tmp123 = tl.where(tmp15, tmp122, tmp6)
tmp124 = triton_helpers.maximum(tmp118, tmp123)
tmp127 = tmp126 * tmp3
tmp128 = tl.where(tmp125, tmp126, tmp127)
tmp129 = tl.where(tmp22, tmp128, tmp6)
tmp130 = triton_helpers.maximum(tmp124, tmp129)
tmp131 = tmp112 - tmp130
tmp132 = tl_math.exp(tmp131)
tmp133 = tmp117 - tmp130
tmp134 = tl_math.exp(tmp133)
tmp135 = tmp132 + tmp134
tmp136 = tmp123 - tmp130
tmp137 = tl_math.exp(tmp136)
tmp138 = tmp135 + tmp137
tmp139 = tmp129 - tmp130
tmp140 = tl_math.exp(tmp139)
tmp141 = tmp138 + tmp140
tl.store(out_ptr0 + x2, tmp28, xmask)
tl.store(out_ptr1 + x2, tmp39, xmask)
tl.store(out_ptr2 + x2, tmp62, xmask)
tl.store(out_ptr3 + x2, tmp73, xmask)
tl.store(out_ptr4 + x2, tmp96, xmask)
tl.store(out_ptr5 + x2, tmp107, xmask)
tl.store(out_ptr6 + x2, tmp130, xmask)
tl.store(out_ptr7 + x2, tmp141, xmask)
@triton.jit
def triton_poi_fused__softmax_leaky_relu_mul_where_4(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9,
in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16,
out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex % 64
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask).to(tl.int1)
tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp2 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr4 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr5 + x4, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp14 = tl.load(in_ptr6 + x4, xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr7 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr8 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr9 + x4, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp24 = tl.load(in_ptr10 + x4, xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr11 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp31 = tl.load(in_ptr12 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp33 = tl.load(in_ptr13 + x4, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp34 = tl.load(in_ptr14 + x4, xmask, eviction_policy='evict_last')
tmp38 = tl.load(in_ptr15 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp41 = tl.load(in_ptr16 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = 4.0
tmp4 = tmp2 * tmp3
tmp5 = tl.where(tmp1, tmp2, tmp4)
tmp6 = -8999999815811072.0
tmp7 = tl.where(tmp0, tmp5, tmp6)
tmp9 = tmp7 - tmp8
tmp10 = tl_math.exp(tmp9)
tmp12 = tmp10 / tmp11
tmp15 = tmp14 * tmp3
tmp16 = tl.where(tmp13, tmp14, tmp15)
tmp17 = tl.where(tmp0, tmp16, tmp6)
tmp19 = tmp17 - tmp18
tmp20 = tl_math.exp(tmp19)
tmp22 = tmp20 / tmp21
tmp25 = tmp24 * tmp3
tmp26 = tl.where(tmp23, tmp24, tmp25)
tmp27 = tl.where(tmp0, tmp26, tmp6)
tmp29 = tmp27 - tmp28
tmp30 = tl_math.exp(tmp29)
tmp32 = tmp30 / tmp31
tmp35 = tmp34 * tmp3
tmp36 = tl.where(tmp33, tmp34, tmp35)
tmp37 = tl.where(tmp0, tmp36, tmp6)
tmp39 = tmp37 - tmp38
tmp40 = tl_math.exp(tmp39)
tmp42 = tmp40 / tmp41
tl.store(out_ptr0 + x3, tmp12, xmask)
tl.store(out_ptr1 + x3, tmp22, xmask)
tl.store(out_ptr2 + x3, tmp32, xmask)
tl.store(out_ptr3 + x3, tmp42, xmask)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_cat_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 16
x0 = xindex % 16
x2 = xindex // 256
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = 0.0
tmp7 = tmp5 > tmp6
tmp8 = 1.0
tmp9 = tmp5 * tmp8
tmp10 = libdevice.expm1(tmp9)
tmp11 = tmp10 * tmp8
tmp12 = tl.where(tmp7, tmp9, tmp11)
tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype)
tmp14 = tl.where(tmp4, tmp12, tmp13)
tmp15 = tmp0 >= tmp3
tmp16 = tl.full([1], 8, tl.int64)
tmp17 = tmp0 < tmp16
tmp18 = tmp15 & tmp17
tmp19 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp18 &
xmask, other=0.0)
tmp20 = tmp19 > tmp6
tmp21 = tmp19 * tmp8
tmp22 = libdevice.expm1(tmp21)
tmp23 = tmp22 * tmp8
tmp24 = tl.where(tmp20, tmp21, tmp23)
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp18, tmp24, tmp25)
tmp27 = tmp0 >= tmp16
tmp28 = tl.full([1], 12, tl.int64)
tmp29 = tmp0 < tmp28
tmp30 = tmp27 & tmp29
tmp31 = tl.load(in_ptr2 + (x0 + 16 * (-8 + x1) + 64 * x2), tmp30 &
xmask, other=0.0)
tmp32 = tmp31 > tmp6
tmp33 = tmp31 * tmp8
tmp34 = libdevice.expm1(tmp33)
tmp35 = tmp34 * tmp8
tmp36 = tl.where(tmp32, tmp33, tmp35)
tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype)
tmp38 = tl.where(tmp30, tmp36, tmp37)
tmp39 = tmp0 >= tmp28
tl.full([1], 16, tl.int64)
tmp42 = tl.load(in_ptr3 + (x0 + 16 * (-12 + x1) + 64 * x2), tmp39 &
xmask, other=0.0)
tmp43 = tmp42 > tmp6
tmp44 = tmp42 * tmp8
tmp45 = libdevice.expm1(tmp44)
tmp46 = tmp45 * tmp8
tmp47 = tl.where(tmp43, tmp44, tmp46)
tmp48 = tl.full(tmp47.shape, 0.0, tmp47.dtype)
tmp49 = tl.where(tmp39, tmp47, tmp48)
tmp50 = tl.where(tmp30, tmp38, tmp49)
tmp51 = tl.where(tmp18, tmp26, tmp50)
tmp52 = tl.where(tmp4, tmp14, tmp51)
tl.store(out_ptr0 + x3, tmp52, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (8, 1), (1, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (8, 1), (1, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (8, 1), (1, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (8, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
primals_2, out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 16, 8), (128, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](buf0, buf1, 512, XBLOCK=128,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 8), (8, 1), 0),
primals_3, out=buf2)
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_leaky_relu_1[grid(64)](buf2, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_gt_2[grid(256)](primals_4, buf4, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_4
buf10 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
primals_5, out=buf10)
del primals_5
buf11 = empty_strided_cuda((4, 16, 8), (128, 8, 1), torch.float32)
triton_poi_fused_cat_0[grid(512)](buf10, buf11, 512, XBLOCK=128,
num_warps=4, num_stages=1)
buf12 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf11, (64, 8), (8, 1), 0),
primals_6, out=buf12)
buf13 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_leaky_relu_1[grid(64)](buf12, buf13, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
primals_7, out=buf19)
del primals_7
buf20 = empty_strided_cuda((4, 16, 8), (128, 8, 1), torch.float32)
triton_poi_fused_cat_0[grid(512)](buf19, buf20, 512, XBLOCK=128,
num_warps=4, num_stages=1)
buf21 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf20, (64, 8), (8, 1), 0),
primals_8, out=buf21)
buf22 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_leaky_relu_1[grid(64)](buf21, buf22, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf28 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
primals_9, out=buf28)
del primals_9
buf29 = empty_strided_cuda((4, 16, 8), (128, 8, 1), torch.float32)
triton_poi_fused_cat_0[grid(512)](buf28, buf29, 512, XBLOCK=128,
num_warps=4, num_stages=1)
buf30 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf29, (64, 8), (8, 1), 0),
primals_10, out=buf30)
buf31 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_leaky_relu_1[grid(64)](buf30, buf31, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf14 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf15 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf23 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf24 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf32 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf33 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
triton_poi_fused__softmax_leaky_relu_mul_where_3[grid(64)](buf4,
buf3, buf2, buf13, buf12, buf22, buf21, buf31, buf30, buf5,
buf6, buf14, buf15, buf23, buf24, buf32, buf33, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf16 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf25 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf34 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_leaky_relu_mul_where_4[grid(256)](buf4,
buf3, buf2, buf5, buf6, buf13, buf12, buf14, buf15, buf22,
buf21, buf23, buf24, buf31, buf30, buf32, buf33, buf7, buf16,
buf25, buf34, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf12
del buf14
del buf15
del buf2
del buf21
del buf23
del buf24
del buf30
del buf32
del buf33
del buf5
del buf6
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_5[grid(256)](buf0, buf8, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf0
buf9 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), out=buf9)
buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_5[grid(256)](buf10, buf17, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf10
buf18 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf16, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf17, (16, 4, 4), (16, 4, 1), 0), out=buf18
)
buf26 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_5[grid(256)](buf19, buf26, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf19
buf27 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf25, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf26, (16, 4, 4), (16, 4, 1), 0), out=buf27
)
buf35 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_5[grid(256)](buf28, buf35, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf28
buf36 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf34, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf35, (16, 4, 4), (16, 4, 1), 0), out=buf36
)
buf37 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_6[grid(1024)](buf9, buf18, buf27, buf36, buf37,
1024, XBLOCK=128, num_warps=4, num_stages=1)
return (buf37, buf3, buf4, buf7, buf9, buf13, buf16, buf18, buf22,
buf25, buf27, buf31, buf34, buf36, reinterpret_tensor(buf35, (16, 4,
4), (16, 1, 4), 0), reinterpret_tensor(buf29, (8, 64), (1, 8), 0),
reinterpret_tensor(primals_10, (1, 8), (1, 1), 0),
reinterpret_tensor(primals_1, (4, 16), (1, 4), 0),
reinterpret_tensor(buf26, (16, 4, 4), (16, 1, 4), 0),
reinterpret_tensor(buf20, (8, 64), (1, 8), 0), reinterpret_tensor(
primals_8, (1, 8), (1, 1), 0), reinterpret_tensor(buf17, (16, 4, 4),
(16, 1, 4), 0), reinterpret_tensor(buf11, (8, 64), (1, 8), 0),
reinterpret_tensor(primals_6, (1, 8), (1, 1), 0),
reinterpret_tensor(buf8, (16, 4, 4), (16, 1, 4), 0),
reinterpret_tensor(buf1, (8, 64), (1, 8), 0), reinterpret_tensor(
primals_3, (1, 8), (1, 1), 0))
class GraphAttentionLayer(nn.Module):
"""
Simple GAT layer, similar to https://arxiv.org/abs/1710.10903
"""
def __init__(self, in_features, out_features, dropout, alpha, concat=True):
super(GraphAttentionLayer, self).__init__()
self.dropout = dropout
self.in_features = in_features
self.out_features = out_features
self.alpha = alpha
self.concat = concat
self.W = nn.Parameter(torch.zeros(size=(in_features, out_features)))
nn.init.xavier_uniform_(self.W.data, gain=1.414)
self.a = nn.Parameter(torch.zeros(size=(2 * out_features, 1)))
nn.init.xavier_uniform_(self.a.data, gain=1.414)
self.leakyrelu = nn.LeakyReLU(self.alpha)
def forward(self, input, adj):
h = torch.matmul(input, self.W)
N = h.size()[1]
batch_size = h.size(0)
a_input = torch.cat([h.repeat(1, 1, N).view(batch_size, N * N, -1),
h.repeat(1, N, 1)], dim=2).view(batch_size, N, -1, 2 * self.
out_features)
e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(3))
zero_vec = -9000000000000000.0 * torch.ones_like(e)
attention = torch.where(adj > 0, e, zero_vec)
attention = F.softmax(attention, dim=1)
attention = F.dropout(attention, self.dropout, training=self.training)
h_prime = torch.matmul(attention, h)
if self.concat:
return F.elu(h_prime)
else:
return h_prime
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
class GATNew(nn.Module):
def __init__(self, nfeat, nhid, dropout, alpha, nheads):
super(GATNew, self).__init__()
self.dropout = dropout
self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout,
alpha=alpha, concat=True) for _ in range(nheads)]
for i, attention in enumerate(self.attentions):
self.add_module('attention_{}'.format(i), attention)
def forward(self, input_0, input_1):
primals_2 = self.attention_0.W
primals_3 = self.attention_0.a
primals_5 = self.attention_1.W
primals_6 = self.attention_1.a
primals_7 = self.attention_2.W
primals_8 = self.attention_2.a
primals_9 = self.attention_3.W
primals_10 = self.attention_3.a
primals_1 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
|
wjurayj/commonsense-rl
|
GAT
| false
| 16,727
|
[
"Apache-2.0"
] | 55
|
fbbe4fa4a21865095783845fce2f0c4f4346e40f
|
https://github.com/wjurayj/commonsense-rl/tree/fbbe4fa4a21865095783845fce2f0c4f4346e40f
|
GL
|
import torch
import torch.nn as nn
class GL(nn.Module):
def __init__(self, dim):
super().__init__()
self.gl_conv = nn.Conv2d(dim, dim, kernel_size=3, padding=1, groups=dim
)
def forward(self, x):
return x + self.gl_conv(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_add_convolution_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_out_ptr0 + x3, xmask)
tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x3, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_add_convolution_0[grid(256)](buf1, primals_3,
primals_2, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return buf1, primals_1, primals_3
class GLNew(nn.Module):
def __init__(self, dim):
super().__init__()
self.gl_conv = nn.Conv2d(dim, dim, kernel_size=3, padding=1, groups=dim
)
def forward(self, input_0):
primals_1 = self.gl_conv.weight
primals_2 = self.gl_conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
wofmanaf/ResT
|
GL
| false
| 16,728
|
[
"Apache-2.0"
] | 178
|
508e30b28036e2cb882a03d24268dc70eb0c82a3
|
https://github.com/wofmanaf/ResT/tree/508e30b28036e2cb882a03d24268dc70eb0c82a3
|
HighWay
|
import torch
import torch.nn as nn
from torch.nn import Parameter
class HighWay(torch.nn.Module):
def __init__(self, f_in, f_out, bias=True):
super(HighWay, self).__init__()
self.w = Parameter(torch.Tensor(f_in, f_out))
nn.init.xavier_uniform_(self.w)
if bias:
self.bias = Parameter(torch.Tensor(f_out))
nn.init.constant_(self.bias, 0)
else:
self.register_parameter('bias', None)
def forward(self, in_1, in_2):
t = torch.mm(in_1, self.w)
if self.bias is not None:
t = t + self.bias
gate = torch.sigmoid(t)
return gate * in_2 + (1.0 - gate) * in_1
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'f_in': 4, 'f_out': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from torch.nn import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_0(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + x2, xmask)
tmp8 = tl.load(in_ptr3 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tmp5 = tmp3 * tmp4
tmp6 = 1.0
tmp7 = tmp6 - tmp3
tmp9 = tmp7 * tmp8
tmp10 = tmp5 + tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_2, primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_rsub_sigmoid_0[grid(16)](buf0, primals_3,
primals_4, primals_2, buf1, 16, XBLOCK=16, num_warps=1,
num_stages=1)
return buf1, primals_2, primals_3, primals_4, buf0
class HighWayNew(torch.nn.Module):
def __init__(self, f_in, f_out, bias=True):
super(HighWayNew, self).__init__()
self.w = Parameter(torch.Tensor(f_in, f_out))
nn.init.xavier_uniform_(self.w)
if bias:
self.bias = Parameter(torch.Tensor(f_out))
nn.init.constant_(self.bias, 0)
else:
self.register_parameter('bias', None)
def forward(self, input_0, input_1):
primals_1 = self.w
primals_3 = self.bias
primals_2 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
weihangzhang/EAkit
|
HighWay
| false
| 16,729
|
[
"MIT"
] | 102
|
dde8e914480cd1a3585271f70db11d567d9c2a04
|
https://github.com/weihangzhang/EAkit/tree/dde8e914480cd1a3585271f70db11d567d9c2a04
|
SobelConv2d
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class SobelConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1,
padding=0, dilation=1, groups=1, bias=True, requires_grad=True):
assert kernel_size % 2 == 1, "SobelConv2d's kernel_size must be odd."
assert out_channels % 4 == 0, "SobelConv2d's out_channels must be a multiple of 4."
assert out_channels % groups == 0, "SobelConv2d's out_channels must be a multiple of groups."
super(SobelConv2d, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.bias = bias if requires_grad else False
if self.bias:
self.bias = nn.Parameter(torch.zeros(size=(out_channels,),
dtype=torch.float32), requires_grad=True)
else:
self.bias = None
self.sobel_weight = nn.Parameter(torch.zeros(size=(out_channels,
int(in_channels / groups), kernel_size, kernel_size)),
requires_grad=False)
kernel_mid = kernel_size // 2
for idx in range(out_channels):
if idx % 4 == 0:
self.sobel_weight[idx, :, 0, :] = -1
self.sobel_weight[idx, :, 0, kernel_mid] = -2
self.sobel_weight[idx, :, -1, :] = 1
self.sobel_weight[idx, :, -1, kernel_mid] = 2
elif idx % 4 == 1:
self.sobel_weight[idx, :, :, 0] = -1
self.sobel_weight[idx, :, kernel_mid, 0] = -2
self.sobel_weight[idx, :, :, -1] = 1
self.sobel_weight[idx, :, kernel_mid, -1] = 2
elif idx % 4 == 2:
self.sobel_weight[idx, :, 0, 0] = -2
for i in range(0, kernel_mid + 1):
self.sobel_weight[idx, :, kernel_mid - i, i] = -1
self.sobel_weight[idx, :, kernel_size - 1 - i,
kernel_mid + i] = 1
self.sobel_weight[idx, :, -1, -1] = 2
else:
self.sobel_weight[idx, :, -1, 0] = -2
for i in range(0, kernel_mid + 1):
self.sobel_weight[idx, :, kernel_mid + i, i] = -1
self.sobel_weight[idx, :, i, kernel_mid + i] = 1
self.sobel_weight[idx, :, 0, -1] = 2
if requires_grad:
self.sobel_factor = nn.Parameter(torch.ones(size=(out_channels,
1, 1, 1), dtype=torch.float32), requires_grad=True)
else:
self.sobel_factor = nn.Parameter(torch.ones(size=(out_channels,
1, 1, 1), dtype=torch.float32), requires_grad=False)
def forward(self, x):
if torch.cuda.is_available():
self.sobel_factor = self.sobel_factor
if isinstance(self.bias, nn.Parameter):
self.bias = self.bias
sobel_weight = self.sobel_weight * self.sobel_factor
if torch.cuda.is_available():
sobel_weight = sobel_weight
out = F.conv2d(x, sobel_weight, self.bias, self.stride, self.
padding, self.dilation, self.groups)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 36
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(144)](primals_3, primals_1, buf0, 144,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(primals_4, buf0, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 2, 2), (16, 4, 2, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(64)](buf2, primals_2, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
return buf2, primals_3, primals_4, buf0
class SobelConv2dNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1,
padding=0, dilation=1, groups=1, bias=True, requires_grad=True):
assert kernel_size % 2 == 1, "SobelConv2d's kernel_size must be odd."
assert out_channels % 4 == 0, "SobelConv2d's out_channels must be a multiple of 4."
assert out_channels % groups == 0, "SobelConv2d's out_channels must be a multiple of groups."
super(SobelConv2dNew, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.bias = bias if requires_grad else False
if self.bias:
self.bias = nn.Parameter(torch.zeros(size=(out_channels,),
dtype=torch.float32), requires_grad=True)
else:
self.bias = None
self.sobel_weight = nn.Parameter(torch.zeros(size=(out_channels,
int(in_channels / groups), kernel_size, kernel_size)),
requires_grad=False)
kernel_mid = kernel_size // 2
for idx in range(out_channels):
if idx % 4 == 0:
self.sobel_weight[idx, :, 0, :] = -1
self.sobel_weight[idx, :, 0, kernel_mid] = -2
self.sobel_weight[idx, :, -1, :] = 1
self.sobel_weight[idx, :, -1, kernel_mid] = 2
elif idx % 4 == 1:
self.sobel_weight[idx, :, :, 0] = -1
self.sobel_weight[idx, :, kernel_mid, 0] = -2
self.sobel_weight[idx, :, :, -1] = 1
self.sobel_weight[idx, :, kernel_mid, -1] = 2
elif idx % 4 == 2:
self.sobel_weight[idx, :, 0, 0] = -2
for i in range(0, kernel_mid + 1):
self.sobel_weight[idx, :, kernel_mid - i, i] = -1
self.sobel_weight[idx, :, kernel_size - 1 - i,
kernel_mid + i] = 1
self.sobel_weight[idx, :, -1, -1] = 2
else:
self.sobel_weight[idx, :, -1, 0] = -2
for i in range(0, kernel_mid + 1):
self.sobel_weight[idx, :, kernel_mid + i, i] = -1
self.sobel_weight[idx, :, i, kernel_mid + i] = 1
self.sobel_weight[idx, :, 0, -1] = 2
if requires_grad:
self.sobel_factor = nn.Parameter(torch.ones(size=(out_channels,
1, 1, 1), dtype=torch.float32), requires_grad=True)
else:
self.sobel_factor = nn.Parameter(torch.ones(size=(out_channels,
1, 1, 1), dtype=torch.float32), requires_grad=False)
def forward(self, input_0):
primals_2 = self.bias
primals_3 = self.sobel_weight
primals_1 = self.sobel_factor
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
workingcoder/EDCNN
|
SobelConv2d
| false
| 16,730
|
[
"Apache-2.0"
] | 117
|
68305f465d2b731b60ce78bd0c95c7742d9f52d1
|
https://github.com/workingcoder/EDCNN/tree/68305f465d2b731b60ce78bd0c95c7742d9f52d1
|
ContrastiveLoss
|
import torch
import torch.nn.functional as F
import torch.utils.data
import torch.nn.parallel
import torch.optim
class ContrastiveLoss(torch.nn.Module):
def __init__(self, margin=2.0):
super(ContrastiveLoss, self).__init__()
self.margin = margin
def forward(self, output1, output2, label):
euclidean_distance = F.pairwise_distance(output1, output2, keepdim=True
)
loss_contrastive = torch.mean(torch.pow(euclidean_distance, 2).
squeeze() * label.float() + torch.pow(torch.clamp(self.margin -
euclidean_distance, min=0.0), 2).squeeze() * (1 - label.float()))
return loss_contrastive
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.utils.data
import torch.nn.parallel
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_norm_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 - tmp1
tmp3 = 1e-06
tmp4 = tmp2 + tmp3
tmp5 = tmp4 * tmp4
tmp8 = tmp6 - tmp7
tmp9 = tmp8 + tmp3
tmp10 = tmp9 * tmp9
tmp11 = tmp5 + tmp10
tmp14 = tmp12 - tmp13
tmp15 = tmp14 + tmp3
tmp16 = tmp15 * tmp15
tmp17 = tmp11 + tmp16
tmp20 = tmp18 - tmp19
tmp21 = tmp20 + tmp3
tmp22 = tmp21 * tmp21
tmp23 = tmp17 + tmp22
tmp24 = libdevice.sqrt(tmp23)
tl.store(out_ptr0 + x0, tmp24, xmask)
@triton.jit
def triton_per_fused_add_mean_mul_rsub_1(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex % 64
r2 = rindex
tmp0 = tl.load(in_ptr0 + r0, None, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + r2, None)
tmp1 = tmp0 * tmp0
tmp3 = tmp1 * tmp2
tmp4 = 2.0
tmp5 = tmp4 - tmp0
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7 * tmp7
tmp9 = 1.0
tmp10 = tmp9 - tmp2
tmp11 = tmp8 * tmp10
tmp12 = tmp3 + tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 256.0
tmp17 = tmp15 / tmp16
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_norm_sub_0[grid(64)](arg1_1, arg0_1, buf0, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused_add_mean_mul_rsub_1[grid(1)](buf2, buf0, arg2_1, 1,
256, num_warps=2, num_stages=1)
del arg2_1
del buf0
return buf2,
class ContrastiveLossNew(torch.nn.Module):
def __init__(self, margin=2.0):
super(ContrastiveLossNew, self).__init__()
self.margin = margin
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
wenqingchu/Semantic-CariGANs
|
ContrastiveLoss
| false
| 16,731
|
[
"BSD-3-Clause"
] | 50
|
d6c2fc2046ee62b42dd70fa8892775e33337bbdf
|
https://github.com/wenqingchu/Semantic-CariGANs/tree/d6c2fc2046ee62b42dd70fa8892775e33337bbdf
|
My_loss
|
import torch
from torch import nn as nn
import torch.nn.parallel
import torch.optim
from torch.autograd import Variable as Variable
import torch.utils.data
import torch._utils
class My_loss(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
vx = x - torch.mean(x)
vy = y - torch.mean(y)
rho = torch.sum(vx * vy) / (torch.sqrt(torch.sum(torch.pow(vx, 2))) *
torch.sqrt(torch.sum(torch.pow(vy, 2))))
x_m = torch.mean(x)
y_m = torch.mean(y)
x_s = torch.std(x)
y_s = torch.std(y)
ccc = 2 * rho * x_s * y_s / (torch.pow(x_s, 2) + torch.pow(y_s, 2) +
torch.pow(x_m - y_m, 2))
return -ccc
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn as nn
import torch.nn.parallel
import torch.optim
from torch.autograd import Variable as Variable
import torch.utils.data
import torch._utils
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mean_mul_neg_pow_sqrt_std_sub_sum_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp4 = tl.load(in_ptr1 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0))
tmp5 = tl.broadcast_to(tmp4, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = 256.0
tmp9 = tmp3 / tmp8
tmp10 = tmp0 - tmp9
tmp11 = tmp7 / tmp8
tmp12 = tmp4 - tmp11
tmp13 = tmp10 * tmp12
tmp14 = tl.broadcast_to(tmp13, [RBLOCK])
tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0))
tmp17 = tmp10 * tmp10
tmp18 = tl.broadcast_to(tmp17, [RBLOCK])
tmp20 = triton_helpers.promote_to_tensor(tl.sum(tmp18, 0))
tmp21 = tmp12 * tmp12
tmp22 = tl.broadcast_to(tmp21, [RBLOCK])
tmp24 = triton_helpers.promote_to_tensor(tl.sum(tmp22, 0))
tmp26 = tl.broadcast_to(tmp1, [RBLOCK])
tmp28 = triton_helpers.promote_to_tensor(tl.sum(tmp26, 0))
tmp29 = tl.full([1], 256, tl.int32)
tmp30 = tmp29.to(tl.float32)
tmp31 = tmp28 / tmp30
tmp32 = tmp1 - tmp31
tmp33 = tmp32 * tmp32
tmp34 = tl.broadcast_to(tmp33, [RBLOCK])
tmp36 = triton_helpers.promote_to_tensor(tl.sum(tmp34, 0))
tmp38 = tl.broadcast_to(tmp5, [RBLOCK])
tmp40 = triton_helpers.promote_to_tensor(tl.sum(tmp38, 0))
tmp41 = tmp40 / tmp30
tmp42 = tmp5 - tmp41
tmp43 = tmp42 * tmp42
tmp44 = tl.broadcast_to(tmp43, [RBLOCK])
tmp46 = triton_helpers.promote_to_tensor(tl.sum(tmp44, 0))
tmp47 = libdevice.sqrt(tmp20)
tmp48 = libdevice.sqrt(tmp24)
tmp49 = tmp47 * tmp48
tmp50 = tmp16 / tmp49
tmp51 = 2.0
tmp52 = tmp50 * tmp51
tmp53 = 255.0
tmp54 = tmp36 / tmp53
tmp55 = libdevice.sqrt(tmp54)
tmp56 = tmp52 * tmp55
tmp57 = tmp46 / tmp53
tmp58 = libdevice.sqrt(tmp57)
tmp59 = tmp56 * tmp58
tmp60 = tmp55 * tmp55
tmp61 = tmp58 * tmp58
tmp62 = tmp60 + tmp61
tmp63 = tmp9 - tmp11
tmp64 = tmp63 * tmp63
tmp65 = tmp62 + tmp64
tmp66 = tmp59 / tmp65
tmp67 = -tmp66
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp67, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf11 = empty_strided_cuda((), (), torch.float32)
buf13 = buf11
del buf11
buf14 = buf13
del buf13
get_raw_stream(0)
triton_per_fused_add_div_mean_mul_neg_pow_sqrt_std_sub_sum_0[grid(1)](
buf14, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf14,
class My_lossNew(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
wtomin/MIMA-Net
|
My_loss
| false
| 16,732
|
[
"MIT"
] | 58
|
c0330777313ac04b25e53b137dbecd78b5c8dde6
|
https://github.com/wtomin/MIMA-Net/tree/c0330777313ac04b25e53b137dbecd78b5c8dde6
|
FusionMax
|
import torch
import torch.nn as nn
class Fusion(nn.Module):
""" Base Fusion Class"""
def __init__(self, input_dim=3):
super().__init__()
self.input_dim = input_dim
def tile_x2(self, x1, x2, x2_proj=None):
if x2_proj:
x2 = x2_proj(x2)
x2 = x2.unsqueeze(-1).unsqueeze(-1)
x2 = x2.repeat(x1.shape[0], 1, x1.shape[-2], x1.shape[-1])
return x2
def forward(self, x1, x2, x2_mask=None, x2_proj=None):
raise NotImplementedError()
class FusionMax(Fusion):
""" max(x1, x2) """
def __init__(self, input_dim=3):
super(FusionMax, self).__init__(input_dim=input_dim)
def forward(self, x1, x2, x2_mask=None, x2_proj=None):
if x1.shape != x2.shape and len(x1.shape) != len(x2.shape):
x2 = self.tile_x2(x1, x2, x2_proj)
return torch.max(x1, x2)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_maximum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_maximum_0[grid(256)](arg0_1, arg1_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class Fusion(nn.Module):
""" Base Fusion Class"""
def __init__(self, input_dim=3):
super().__init__()
self.input_dim = input_dim
def tile_x2(self, x1, x2, x2_proj=None):
if x2_proj:
x2 = x2_proj(x2)
x2 = x2.unsqueeze(-1).unsqueeze(-1)
x2 = x2.repeat(x1.shape[0], 1, x1.shape[-2], x1.shape[-1])
return x2
def forward(self, x1, x2, x2_mask=None, x2_proj=None):
raise NotImplementedError()
class FusionMaxNew(Fusion):
""" max(x1, x2) """
def __init__(self, input_dim=3):
super(FusionMaxNew, self).__init__(input_dim=input_dim)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
wx-b/cliport
|
FusionMax
| false
| 16,733
|
[
"Apache-2.0"
] | 110
|
c29b0c4b6b1c4e4da5bda6c7f8c718e36f28a6e8
|
https://github.com/wx-b/cliport/tree/c29b0c4b6b1c4e4da5bda6c7f8c718e36f28a6e8
|
LossBasic
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class TensorGradient(nn.Module):
"""
the gradient of tensor
"""
def __init__(self, L1=True):
super(TensorGradient, self).__init__()
self.L1 = L1
def forward(self, img):
w, h = img.size(-2), img.size(-1)
l = F.pad(img, [1, 0, 0, 0])
r = F.pad(img, [0, 1, 0, 0])
u = F.pad(img, [0, 0, 1, 0])
d = F.pad(img, [0, 0, 0, 1])
if self.L1:
return torch.abs((l - r)[..., 0:w, 0:h]) + torch.abs((u - d)[
..., 0:w, 0:h])
else:
return torch.sqrt(torch.pow((l - r)[..., 0:w, 0:h], 2) + torch.
pow((u - d)[..., 0:w, 0:h], 2))
class LossBasic(nn.Module):
"""
Basic loss function.
"""
def __init__(self, gradient_L1=True):
super(LossBasic, self).__init__()
self.l1_loss = nn.L1Loss()
self.l2_loss = nn.MSELoss()
self.gradient = TensorGradient(gradient_L1)
def forward(self, pred, ground_truth):
return self.l2_loss(pred, ground_truth) + self.l1_loss(self.
gradient(pred), self.gradient(ground_truth))
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_add_mean_mse_loss_sub_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
r1 = rindex % 4
r2 = rindex // 4 % 4
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = -1 + r1
tmp8 = tl.full([1], 0, tl.int64)
tmp9 = tmp7 >= tmp8
tmp10 = tl.load(in_ptr0 + tl.broadcast_to(-1 + r0, [RBLOCK]), tmp9,
other=0.0)
tmp11 = r1
tmp12 = tl.full([1], 4, tl.int64)
tmp13 = tmp11 < tmp12
tmp14 = tl.load(in_ptr0 + tl.broadcast_to(r0, [RBLOCK]), tmp13, other=0.0)
tmp15 = tmp10 - tmp14
tmp16 = tl_math.abs(tmp15)
tmp17 = -1 + r2
tmp18 = tmp17 >= tmp8
tmp19 = tl.load(in_ptr0 + tl.broadcast_to(-4 + r0, [RBLOCK]), tmp18,
other=0.0)
tmp20 = r2
tmp21 = tmp20 < tmp12
tmp22 = tl.load(in_ptr0 + tl.broadcast_to(r0, [RBLOCK]), tmp21, other=0.0)
tmp23 = tmp19 - tmp22
tmp24 = tl_math.abs(tmp23)
tmp25 = tmp16 + tmp24
tmp26 = tl.load(in_ptr1 + tl.broadcast_to(-1 + r0, [RBLOCK]), tmp9,
other=0.0)
tmp27 = tl.load(in_ptr1 + tl.broadcast_to(r0, [RBLOCK]), tmp13, other=0.0)
tmp28 = tmp26 - tmp27
tmp29 = tl_math.abs(tmp28)
tmp30 = tl.load(in_ptr1 + tl.broadcast_to(-4 + r0, [RBLOCK]), tmp18,
other=0.0)
tmp31 = tl.load(in_ptr1 + tl.broadcast_to(r0, [RBLOCK]), tmp21, other=0.0)
tmp32 = tmp30 - tmp31
tmp33 = tl_math.abs(tmp32)
tmp34 = tmp29 + tmp33
tmp35 = tmp25 - tmp34
tmp36 = tl_math.abs(tmp35)
tmp37 = tl.broadcast_to(tmp36, [RBLOCK])
tmp39 = triton_helpers.promote_to_tensor(tl.sum(tmp37, 0))
tmp40 = 256.0
tmp41 = tmp6 / tmp40
tmp42 = tmp39 / tmp40
tmp43 = tmp41 + tmp42
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp43, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_abs_add_mean_mse_loss_sub_0[grid(1)](buf3, arg1_1,
arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf3,
class TensorGradient(nn.Module):
"""
the gradient of tensor
"""
def __init__(self, L1=True):
super(TensorGradient, self).__init__()
self.L1 = L1
def forward(self, img):
w, h = img.size(-2), img.size(-1)
l = F.pad(img, [1, 0, 0, 0])
r = F.pad(img, [0, 1, 0, 0])
u = F.pad(img, [0, 0, 1, 0])
d = F.pad(img, [0, 0, 0, 1])
if self.L1:
return torch.abs((l - r)[..., 0:w, 0:h]) + torch.abs((u - d)[
..., 0:w, 0:h])
else:
return torch.sqrt(torch.pow((l - r)[..., 0:w, 0:h], 2) + torch.
pow((u - d)[..., 0:w, 0:h], 2))
class LossBasicNew(nn.Module):
"""
Basic loss function.
"""
def __init__(self, gradient_L1=True):
super(LossBasicNew, self).__init__()
self.l1_loss = nn.L1Loss()
self.l2_loss = nn.MSELoss()
self.gradient = TensorGradient(gradient_L1)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
xenbaloch/efficientderain
|
LossBasic
| false
| 16,734
|
[
"MIT"
] | 109
|
d5646815fd14a5a03c859102ecd2f298db7e53be
|
https://github.com/xenbaloch/efficientderain/tree/d5646815fd14a5a03c859102ecd2f298db7e53be
|
Attention
|
import math
import torch
import torch.nn as nn
class Attention(nn.Module):
def __init__(self, hidden_size):
super(Attention, self).__init__()
self.hidden_size = hidden_size
self.attn = nn.Linear(hidden_size * 2, hidden_size)
self.v = nn.Parameter(torch.rand(hidden_size), requires_grad=True)
stdv = 1.0 / math.sqrt(self.v.size(0))
self.v.data.uniform_(-stdv, stdv)
def forward(self, hidden, encoder_outputs):
timestep = encoder_outputs.size(1)
h = hidden.expand(timestep, -1, -1).transpose(0, 1)
attn_energies = self.score(h, encoder_outputs)
return attn_energies.softmax(2)
def score(self, hidden, encoder_outputs):
energy = torch.tanh(self.attn(torch.cat([hidden, encoder_outputs], 2)))
energy = energy.transpose(1, 2)
v = self.v.expand(encoder_outputs.size(0), -1).unsqueeze(1)
energy = torch.bmm(v, energy)
return energy
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8 % 4
x2 = xindex // 32
x3 = xindex // 8
x4 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x2 + 16 * x1 + x0), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x3 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x4, tmp10, xmask)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(128)](primals_2, primals_1, buf0, 128,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 8), (8, 1), 0),
reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0)
del buf1
triton_poi_fused_tanh_1[grid(64)](buf2, primals_4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_5, (4, 1, 4), (0, 0,
1), 0), reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), out
=buf3)
buf4 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
triton_poi_fused__softmax_2[grid(16)](buf3, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf5 = buf3
del buf3
triton_poi_fused__softmax_3[grid(16)](buf4, buf5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf4
return buf5, primals_5, reinterpret_tensor(buf0, (16, 8), (8, 1), 0
), buf2, buf5
class AttentionNew(nn.Module):
def __init__(self, hidden_size):
super(AttentionNew, self).__init__()
self.hidden_size = hidden_size
self.attn = nn.Linear(hidden_size * 2, hidden_size)
self.v = nn.Parameter(torch.rand(hidden_size), requires_grad=True)
stdv = 1.0 / math.sqrt(self.v.size(0))
self.v.data.uniform_(-stdv, stdv)
def score(self, hidden, encoder_outputs):
energy = torch.tanh(self.attn(torch.cat([hidden, encoder_outputs], 2)))
energy = energy.transpose(1, 2)
v = self.v.expand(encoder_outputs.size(0), -1).unsqueeze(1)
energy = torch.bmm(v, energy)
return energy
def forward(self, input_0, input_1):
primals_4 = self.v
primals_3 = self.attn.weight
primals_5 = self.attn.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
wptoux/attention-ocr
|
Attention
| false
| 16,735
|
[
"MIT"
] | 57
|
ed08719db86a2aaf7e0cbae6169d9919835879d7
|
https://github.com/wptoux/attention-ocr/tree/ed08719db86a2aaf7e0cbae6169d9919835879d7
|
ConvNet
|
import torch
import torch.nn
import torch.utils.data.distributed
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
class ConvNet(nn.Module):
def __init__(self, gpus, layouts, dtypes):
super(ConvNet, self).__init__()
self.dtypes = dtypes
if isinstance(gpus, list):
self.layer_gpus = gpus
else:
gpus = [gpus] * 4
self.conv0 = torch.nn.Conv2d(8, 16, (2, 2))
self.conv1 = torch.nn.Conv2d(16, 32, (2, 2))
self.conv2 = torch.nn.Conv2d(32, 16, (2, 2))
self.conv3 = torch.nn.Conv2d(16, 8, (2, 2))
def forward(self, x):
x = x
self.layer_gpus if hasattr(self, 'layer_gpus') else [x.device] * 4
x = self.conv0(x)
x = self.conv1(x)
x = self.conv2(x)
return self.conv3(x)
def get_inputs():
return [torch.rand([4, 8, 64, 64])]
def get_init_inputs():
return [[], {'gpus': False, 'layouts': 4, 'dtypes': torch.float32}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn
import torch.utils.data.distributed
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 254016
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3969 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 492032
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3844 % 32
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 238144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3721 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 115200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3600 % 8
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 8, 64, 64), (32768, 4096, 64, 1))
assert_size_stride(primals_2, (16, 8, 2, 2), (32, 4, 2, 1))
assert_size_stride(primals_3, (16,), (1,))
assert_size_stride(primals_4, (32, 16, 2, 2), (64, 4, 2, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (16, 32, 2, 2), (128, 4, 2, 1))
assert_size_stride(primals_7, (16,), (1,))
assert_size_stride(primals_8, (8, 16, 2, 2), (64, 4, 2, 1))
assert_size_stride(primals_9, (8,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 63, 63), (63504, 3969, 63, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(254016)](buf1, primals_3,
254016, XBLOCK=512, num_warps=8, num_stages=1)
del primals_3
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 62, 62), (123008, 3844, 62, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_1[grid(492032)](buf3, primals_5,
492032, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 61, 61), (59536, 3721, 61, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_2[grid(238144)](buf5, primals_7,
238144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_7
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 8, 60, 60), (28800, 3600, 60, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_3[grid(115200)](buf7, primals_9,
115200, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_9
return (buf7, primals_1, primals_2, primals_4, primals_6, primals_8,
buf1, buf3, buf5)
class ConvNetNew(nn.Module):
def __init__(self, gpus, layouts, dtypes):
super(ConvNetNew, self).__init__()
self.dtypes = dtypes
if isinstance(gpus, list):
self.layer_gpus = gpus
else:
gpus = [gpus] * 4
self.conv0 = torch.nn.Conv2d(8, 16, (2, 2))
self.conv1 = torch.nn.Conv2d(16, 32, (2, 2))
self.conv2 = torch.nn.Conv2d(32, 16, (2, 2))
self.conv3 = torch.nn.Conv2d(16, 8, (2, 2))
def forward(self, input_0):
primals_2 = self.conv0.weight
primals_3 = self.conv0.bias
primals_4 = self.conv1.weight
primals_5 = self.conv1.bias
primals_6 = self.conv2.weight
primals_7 = self.conv2.bias
primals_8 = self.conv3.weight
primals_9 = self.conv3.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
woqidaideshi/bagua
|
ConvNet
| false
| 16,737
|
[
"MIT"
] | 635
|
0ee96da598685748519d58d24ce983499cb36721
|
https://github.com/woqidaideshi/bagua/tree/0ee96da598685748519d58d24ce983499cb36721
|
TripletLoss
|
import torch
from torch import nn
import torch.nn.functional as F
from torch.optim.lr_scheduler import *
def _batch_hard(mat_distance, mat_similarity, indice=False):
sorted_mat_distance, positive_indices = torch.sort(mat_distance + -
9999999.0 * (1 - mat_similarity), dim=1, descending=True)
hard_p = sorted_mat_distance[:, 0]
hard_p_indice = positive_indices[:, 0]
sorted_mat_distance, negative_indices = torch.sort(mat_distance +
9999999.0 * mat_similarity, dim=1, descending=False)
hard_n = sorted_mat_distance[:, 0]
hard_n_indice = negative_indices[:, 0]
if indice:
return hard_p, hard_n, hard_p_indice, hard_n_indice
return hard_p, hard_n
def euclidean_dist(x, y):
m, n = x.size(0), y.size(0)
xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n)
yy = torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t()
dist = xx + yy
dist.addmm_(1, -2, x, y.t())
dist = dist.clamp(min=1e-12).sqrt()
return dist
class TripletLoss(nn.Module):
"""
Compute Triplet loss augmented with Batch Hard
Details can be seen in 'In defense of the Triplet Loss for Person Re-Identification'
"""
def __init__(self, margin, normalize_feature=False):
super(TripletLoss, self).__init__()
self.margin = margin
self.normalize_feature = normalize_feature
self.margin_loss = nn.MarginRankingLoss(margin=margin)
def forward(self, emb, label):
if self.normalize_feature:
emb = F.normalize(emb)
mat_dist = euclidean_dist(emb, emb)
assert mat_dist.size(0) == mat_dist.size(1)
N = mat_dist.size(0)
mat_sim = label.expand(N, N).eq(label.expand(N, N).t()).float()
dist_ap, dist_an = _batch_hard(mat_dist, mat_sim)
assert dist_an.size(0) == dist_ap.size(0)
y = torch.ones_like(dist_ap)
loss = self.margin_loss(dist_an, dist_ap, y)
return loss
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'margin': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
from torch.optim.lr_scheduler import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__to_copy_add_clamp_eq_mul_rsub_sort_sqrt_0(in_out_ptr0,
in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr
):
xnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (r1 + 4 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + 4 * r1, None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (1 + 4 * r1), None, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (2 + 4 * r1), None, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr0 + (3 + 4 * r1), None, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr1 + (r1 + 4 * x0), xmask, other=0.0)
tmp29 = tl.load(in_ptr1 + (x0 + 4 * r1), xmask, other=0.0)
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp13 = tmp12 * tmp12
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp21 = tmp20 * tmp20
tmp22 = tmp19 + tmp21
tmp23 = tmp11 + tmp22
tmp24 = tmp0 + tmp23
tmp25 = 1e-12
tmp26 = triton_helpers.maximum(tmp24, tmp25)
tmp27 = libdevice.sqrt(tmp26)
tmp30 = tmp28 == tmp29
tmp31 = tmp30.to(tl.float32)
tmp32 = 1.0
tmp33 = tmp32 - tmp31
tmp34 = -9999999.0
tmp35 = tmp33 * tmp34
tmp36 = tmp27 + tmp35
tmp37 = r1
tmp38 = tmp37.to(tl.int16)
tmp39 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK])
tmp40 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK])
tmp41, _tmp42 = triton_helpers.sort_with_index(tmp39, tmp40, None, 1,
stable=False, descending=True)
tmp43 = 9999999.0
tmp44 = tmp31 * tmp43
tmp45 = tmp27 + tmp44
tmp46 = tl.broadcast_to(tmp45, [XBLOCK, RBLOCK])
tmp47, _tmp48 = triton_helpers.sort_with_index(tmp46, tmp40, None, 1,
stable=False, descending=False)
tl.store(in_out_ptr0 + (r1 + 4 * x0), tmp24, xmask)
tl.store(out_ptr0 + (r1 + 4 * x0), tmp41, xmask)
tl.store(out_ptr1 + (r1 + 4 * x0), tmp47, xmask)
@triton.jit
def triton_per_fused_add_clamp_min_mean_mul_neg_sub_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = -1.0
tmp4 = tmp3 * tmp2
tmp5 = 4.0
tmp6 = tmp4 + tmp5
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.sum(tmp9, 1)[:, None]
tmp12 = tmp11 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp12, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(arg0_1, reinterpret_tensor(arg0_1, (4, 4), (1, 4),
0), out=buf0)
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused__to_copy_add_clamp_eq_mul_rsub_sort_sqrt_0[grid(4)](
buf1, arg0_1, arg1_1, buf2, buf4, 4, 4, XBLOCK=1, num_warps=2,
num_stages=1)
del arg0_1
del arg1_1
del buf1
buf6 = empty_strided_cuda((), (), torch.float32)
buf7 = buf6
del buf6
triton_per_fused_add_clamp_min_mean_mul_neg_sub_1[grid(1)](buf7,
buf4, buf2, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del buf2
del buf4
return buf7,
def _batch_hard(mat_distance, mat_similarity, indice=False):
sorted_mat_distance, positive_indices = torch.sort(mat_distance + -
9999999.0 * (1 - mat_similarity), dim=1, descending=True)
hard_p = sorted_mat_distance[:, 0]
hard_p_indice = positive_indices[:, 0]
sorted_mat_distance, negative_indices = torch.sort(mat_distance +
9999999.0 * mat_similarity, dim=1, descending=False)
hard_n = sorted_mat_distance[:, 0]
hard_n_indice = negative_indices[:, 0]
if indice:
return hard_p, hard_n, hard_p_indice, hard_n_indice
return hard_p, hard_n
def euclidean_dist(x, y):
m, n = x.size(0), y.size(0)
xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n)
yy = torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t()
dist = xx + yy
dist.addmm_(1, -2, x, y.t())
dist = dist.clamp(min=1e-12).sqrt()
return dist
class TripletLossNew(nn.Module):
"""
Compute Triplet loss augmented with Batch Hard
Details can be seen in 'In defense of the Triplet Loss for Person Re-Identification'
"""
def __init__(self, margin, normalize_feature=False):
super(TripletLossNew, self).__init__()
self.margin = margin
self.normalize_feature = normalize_feature
self.margin_loss = nn.MarginRankingLoss(margin=margin)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
xmy0916/IDM
|
TripletLoss
| false
| 16,738
|
[
"MIT"
] | 68
|
ab29fbd6d3d8c4650f3dbe41a7d21f745d6167ee
|
https://github.com/xmy0916/IDM/tree/ab29fbd6d3d8c4650f3dbe41a7d21f745d6167ee
|
Net
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data.distributed
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 32, (3, 3))
self.pool1 = nn.MaxPool2d((2, 2))
self.conv2 = nn.Conv2d(32, 32, (3, 3))
self.pool2 = nn.MaxPool2d((2, 2))
self.conv3 = nn.Conv2d(32, 64, (3, 3))
self.pool3 = nn.MaxPool2d((2, 2))
self.fc1 = nn.Linear(7 * 7 * 64, 128)
self.fc1_drop = nn.Dropout(0.5)
self.fc2 = nn.Linear(128, 43)
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.pool1(x)
x = F.relu(self.conv2(x))
x = self.pool2(x)
x = F.relu(self.conv3(x))
x = self.pool3(x)
x = x.view(-1, 7 * 7 * 64)
x = F.relu(self.fc1(x))
x = self.fc1_drop(x)
return self.fc2(x)
def get_inputs():
return [torch.rand([4, 3, 128, 128])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 2032128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 15876 % 32
x0 = xindex % 15876
x4 = xindex // 15876
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + 15904 * x4), tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 508032
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 63
x1 = xindex // 63 % 63
x2 = xindex // 3969
x3 = xindex % 3969
tmp0 = tl.load(in_ptr0 + (2 * x0 + 252 * x1 + 15904 * x2), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 252 * x1 + 15904 * x2), xmask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (126 + 2 * x0 + 252 * x1 + 15904 * x2), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (127 + 2 * x0 + 252 * x1 + 15904 * x2), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x3 + 4000 * x2), tmp6, xmask)
tl.store(out_ptr1 + (x3 + 4096 * x2), tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 476288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3721 % 32
x0 = xindex % 3721
x4 = xindex // 3721
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + 3744 * x4), tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 115200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 30
x1 = xindex // 30 % 30
x2 = xindex // 900
x3 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 122 * x1 + 3744 * x2), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 122 * x1 + 3744 * x2), xmask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (61 + 2 * x0 + 122 * x1 + 3744 * x2), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (62 + 2 * x0 + 122 * x1 + 3744 * x2), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr1 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 784 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 50176
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 14
x1 = xindex // 14
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 56 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 56 * x1), xmask, eviction_policy
='evict_last')
tmp7 = tl.load(in_ptr0 + (28 + 2 * x0 + 56 * x1), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (29 + 2 * x0 + 56 * x1), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x2, tmp15, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 3, 128, 128), (49152, 16384, 128, 1))
assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (128, 3136), (3136, 1))
assert_size_stride(primals_9, (128,), (1,))
assert_size_stride(primals_10, (43, 128), (128, 1))
assert_size_stride(primals_11, (43,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 126, 126), (508032, 15876, 126, 1))
buf1 = empty_strided_cuda((4, 32, 126, 126), (508928, 15904, 126, 1
), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(2032128)](buf0, primals_2,
buf1, 2032128, XBLOCK=1024, num_warps=4, num_stages=1)
del buf0
del primals_2
buf2 = empty_strided_cuda((4, 32, 63, 63), (128000, 4000, 63, 1),
torch.float32)
buf3 = empty_strided_cuda((4, 32, 63, 63), (131072, 4096, 63, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(508032)](buf1, buf2,
buf3, 508032, XBLOCK=512, num_warps=8, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 32, 61, 61), (119072, 3721, 61, 1))
buf5 = empty_strided_cuda((4, 32, 61, 61), (119808, 3744, 61, 1),
torch.float32)
triton_poi_fused_convolution_relu_2[grid(476288)](buf4, primals_5,
buf5, 476288, XBLOCK=1024, num_warps=4, num_stages=1)
del buf4
del primals_5
buf6 = empty_strided_cuda((4, 32, 30, 30), (28800, 900, 30, 1),
torch.float32)
buf7 = empty_strided_cuda((4, 32, 30, 30), (28800, 900, 30, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_3[grid(115200)](buf5, buf6,
buf7, 115200, XBLOCK=512, num_warps=8, num_stages=1)
buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 64, 28, 28), (50176, 784, 28, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_relu_4[grid(200704)](buf9, primals_7,
200704, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_7
buf10 = empty_strided_cuda((4, 64, 14, 14), (12544, 196, 14, 1),
torch.int8)
buf11 = empty_strided_cuda((4, 64, 14, 14), (12544, 196, 14, 1),
torch.float32)
triton_poi_fused_max_pool2d_with_indices_5[grid(50176)](buf9, buf10,
buf11, 50176, XBLOCK=256, num_warps=4, num_stages=1)
buf12 = empty_strided_cuda((16, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf11, (16, 3136), (3136, 1),
0), reinterpret_tensor(primals_8, (3136, 128), (1, 3136), 0),
out=buf12)
buf13 = buf12
del buf12
triton_poi_fused_relu_6[grid(2048)](buf13, primals_9, 2048, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_9
buf14 = empty_strided_cuda((16, 43), (43, 1), torch.float32)
extern_kernels.addmm(primals_11, buf13, reinterpret_tensor(
primals_10, (128, 43), (1, 128), 0), alpha=1, beta=1, out=buf14)
del primals_11
return (buf14, primals_1, primals_3, primals_4, primals_6, buf1, buf2,
buf3, buf5, buf6, buf7, buf9, buf10, reinterpret_tensor(buf11, (16,
3136), (3136, 1), 0), buf13, primals_10, primals_8)
class NetNew(nn.Module):
def __init__(self):
super(NetNew, self).__init__()
self.conv1 = nn.Conv2d(3, 32, (3, 3))
self.pool1 = nn.MaxPool2d((2, 2))
self.conv2 = nn.Conv2d(32, 32, (3, 3))
self.pool2 = nn.MaxPool2d((2, 2))
self.conv3 = nn.Conv2d(32, 64, (3, 3))
self.pool3 = nn.MaxPool2d((2, 2))
self.fc1 = nn.Linear(7 * 7 * 64, 128)
self.fc1_drop = nn.Dropout(0.5)
self.fc2 = nn.Linear(128, 43)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.fc1.weight
primals_9 = self.fc1.bias
primals_10 = self.fc2.weight
primals_11 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
wikfeldt/intro-to-dl
|
Net
| false
| 16,739
|
[
"MIT"
] | 59
|
7fb1fb6c520941143000c5e1b46c48c95db17ed6
|
https://github.com/wikfeldt/intro-to-dl/tree/7fb1fb6c520941143000c5e1b46c48c95db17ed6
|
Attention_Decoder
|
import torch
import torch.nn as nn
import torch._utils
class Attention_Decoder(nn.Module):
def __init__(self, dim, num_heads=1, qkv_bias=False, qk_scale=None,
attn_drop=0.0, proj_drop=0.0):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.fc_q = nn.Linear(dim, dim * 1, bias=qkv_bias)
self.fc_kv = nn.Linear(dim, dim * 2, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, q, x):
B, N, C = x.shape
n_class = q.shape[1]
q = self.fc_q(q).reshape(B, self.num_heads, n_class, C // self.
num_heads)
kv = self.fc_kv(x).reshape(B, N, 2, self.num_heads, C // self.num_heads
).permute(2, 0, 3, 1, 4)
k, v = kv[0], kv[1]
attn1 = q @ k.transpose(-2, -1) * self.scale
attn2 = attn1.softmax(dim=-1)
attn3 = self.attn_drop(attn2)
x = (attn3 @ v).reshape(B, n_class, C)
x = self.proj(x)
x = self.proj_drop(x)
attn = attn1.permute(0, 2, 1, 3)
return attn, x
def get_inputs():
return [torch.rand([4, 1, 1, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch._utils
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_mul_0(in_ptr0, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp0 * tmp15
tl.store(out_ptr0 + x2, tmp17, xmask)
tl.store(out_ptr1 + x2, tmp18, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 1, 1, 4), (4, 4, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (8, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (4, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0)
del primals_3
buf1 = empty_strided_cuda((16, 8), (8, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 8), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 1, 4), (4, 4, 1), 0
), reinterpret_tensor(buf1, (4, 4, 4), (32, 1, 8), 0), out=buf2)
buf3 = empty_strided_cuda((4, 1, 1, 4), (4, 16, 16, 1), torch.float32)
buf7 = empty_strided_cuda((4, 1, 1, 4), (4, 1, 16, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_mul_0[grid(16)](buf2, buf3, buf7, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf4 = reinterpret_tensor(buf2, (4, 1, 1, 4), (4, 4, 4, 1), 0)
del buf2
triton_poi_fused__softmax_1[grid(16)](buf3, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf5 = reinterpret_tensor(buf3, (4, 1, 4), (4, 4, 1), 0)
del buf3
extern_kernels.bmm(reinterpret_tensor(buf4, (4, 1, 4), (4, 4, 1), 0
), reinterpret_tensor(buf1, (4, 4, 4), (32, 8, 1), 4), out=buf5)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, reinterpret_tensor(buf5, (4, 4), (4,
1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha
=1, beta=1, out=buf6)
del primals_6
return reinterpret_tensor(buf7, (4, 1, 1, 4), (4, 4, 4, 1), 0
), reinterpret_tensor(buf6, (4, 1, 4), (4, 4, 1), 0
), reinterpret_tensor(primals_2, (4, 4), (4, 1), 0
), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), buf4, reinterpret_tensor(buf5, (4, 4), (4, 1), 0
), primals_5, reinterpret_tensor(buf1, (4, 4, 4), (32, 1, 8), 4
), reinterpret_tensor(buf0, (4, 4, 1), (4, 1, 4), 0
), reinterpret_tensor(buf1, (4, 4, 4), (32, 8, 1), 0)
class Attention_DecoderNew(nn.Module):
def __init__(self, dim, num_heads=1, qkv_bias=False, qk_scale=None,
attn_drop=0.0, proj_drop=0.0):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.fc_q = nn.Linear(dim, dim * 1, bias=qkv_bias)
self.fc_kv = nn.Linear(dim, dim * 2, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, input_0, input_1):
primals_3 = self.fc_q.weight
primals_4 = self.fc_kv.weight
primals_5 = self.proj.weight
primals_6 = self.proj.bias
primals_2 = input_0
primals_1 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0], output[1]
|
xieenze/Trans2Seg
|
Attention_Decoder
| false
| 16,740
|
[
"Apache-2.0"
] | 149
|
3972916bba7f985ca1aabc047fea56bdec9e9e5d
|
https://github.com/xieenze/Trans2Seg/tree/3972916bba7f985ca1aabc047fea56bdec9e9e5d
|
_Enc
|
import torch
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
class _NestedEnc(torch.nn.Module):
def __init__(self, f):
super().__init__()
self.f = f
def forward(self, x):
return self.f(x)
class _Enc(torch.nn.Module):
def __init__(self):
super().__init__()
self.e1 = _NestedEnc(torch.nn.Linear(4, 2))
self.e2 = _NestedEnc(self.e1.f)
def forward(self, x):
return self.e1(x) + self.e2(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tmp2 + tmp2
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (2, 4), (4, 1))
assert_size_stride(primals_2, (2,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 2), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 2), (32, 8, 2, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_add_0[grid(128)](buf1, primals_2, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_2
return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0)
class _NestedEnc(torch.nn.Module):
def __init__(self, f):
super().__init__()
self.f = f
def forward(self, x):
return self.f(x)
class _EncNew(torch.nn.Module):
def __init__(self):
super().__init__()
self.e1 = _NestedEnc(torch.nn.Linear(4, 2))
self.e2 = _NestedEnc(self.e1.f)
def forward(self, input_0):
primals_1 = self.e1.f.weight
primals_2 = self.e1.f.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
xuanyuzhou98/higher
|
_Enc
| false
| 16,741
|
[
"Apache-2.0"
] | 1,401
|
a28b488d8d4c80b38d3a2d322258233d74a89656
|
https://github.com/xuanyuzhou98/higher/tree/a28b488d8d4c80b38d3a2d322258233d74a89656
|
MyConv3d
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class MyConv3d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
bias=True):
super(MyConv3d, self).__init__()
self.kernel_size = kernel_size
self.conv = nn.Conv3d(in_channels=in_channels, out_channels=
out_channels, kernel_size=kernel_size, padding=(0, int((
kernel_size - 1) / 2), int((kernel_size - 1) / 2)), bias=bias)
def forward(self, x):
x = F.pad(x, pad=(0,) * 4 + (int((self.kernel_size - 1) / 2),) * 2,
mode='replicate')
return self.conv(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_replication_pad3d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 6
x3 = xindex // 96
x4 = xindex
tmp0 = tl.load(in_ptr0 + (4 * (3 * (3 <= x1) + x1 * (x1 < 3)) + 16 * (3 *
(3 <= 0 * (0 >= -1 + x2) + (-1 + x2) * (-1 + x2 > 0)) + (0 * (0 >=
-1 + x2) + (-1 + x2) * (-1 + x2 > 0)) * (0 * (0 >= -1 + x2) + (-1 +
x2) * (-1 + x2 > 0) < 3)) + 64 * x3 + (3 * (3 <= x0) + x0 * (x0 < 3
))), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 108
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 27
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 6, 4, 4), (96, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_replication_pad3d_0[grid(384)](primals_1, buf0,
384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (1, 4, 6,
4, 4), (0, 96, 16, 4, 1), 0), primals_2, stride=(1, 1, 1),
padding=(0, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf1, (1, 4, 3, 3, 3), (108, 27, 9, 3, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(108)](buf2, primals_3, 108,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
return reinterpret_tensor(buf2, (4, 3, 3, 3), (27, 9, 3, 1), 0
), primals_2, reinterpret_tensor(buf0, (1, 4, 6, 4, 4), (384, 96,
16, 4, 1), 0)
class MyConv3dNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
bias=True):
super(MyConv3dNew, self).__init__()
self.kernel_size = kernel_size
self.conv = nn.Conv3d(in_channels=in_channels, out_channels=
out_channels, kernel_size=kernel_size, padding=(0, int((
kernel_size - 1) / 2), int((kernel_size - 1) / 2)), bias=bias)
def forward(self, input_0):
primals_2 = self.conv.weight
primals_3 = self.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
xinxindefeiyu/S2VD-master_RESID
|
MyConv3d
| false
| 16,742
|
[
"MIT"
] | 48
|
b075d6873842d70f1d8d3215daf0565f8c0ffe9a
|
https://github.com/xinxindefeiyu/S2VD-master_RESID/tree/b075d6873842d70f1d8d3215daf0565f8c0ffe9a
|
LossFunc
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class TensorGradient(nn.Module):
"""
the gradient of tensor
"""
def __init__(self, L1=True):
super(TensorGradient, self).__init__()
self.L1 = L1
def forward(self, img):
w, h = img.size(-2), img.size(-1)
l = F.pad(img, [1, 0, 0, 0])
r = F.pad(img, [0, 1, 0, 0])
u = F.pad(img, [0, 0, 1, 0])
d = F.pad(img, [0, 0, 0, 1])
if self.L1:
return torch.abs((l - r)[..., 0:w, 0:h]) + torch.abs((u - d)[
..., 0:w, 0:h])
else:
return torch.sqrt(torch.pow((l - r)[..., 0:w, 0:h], 2) + torch.
pow((u - d)[..., 0:w, 0:h], 2))
class LossBasic(nn.Module):
"""
Basic loss function.
"""
def __init__(self, gradient_L1=True):
super(LossBasic, self).__init__()
self.l1_loss = nn.L1Loss()
self.l2_loss = nn.MSELoss()
self.gradient = TensorGradient(gradient_L1)
def forward(self, pred, ground_truth):
return self.l2_loss(pred, ground_truth) + self.l1_loss(self.
gradient(pred), self.gradient(ground_truth))
class LossAnneal(nn.Module):
"""
anneal loss function
"""
def __init__(self, alpha=0.9998, beta=100):
super(LossAnneal, self).__init__()
self.global_step = 0
self.loss_func = LossBasic(gradient_L1=True)
self.alpha = alpha
self.beta = beta
def forward(self, global_step, pred_i, ground_truth):
"""
:param global_step: int
:param pred_i: [batch_size, N, 3, height, width]
:param ground_truth: [batch_size, 3, height, width]
:return:
"""
loss = 0
for i in range(pred_i.size(1)):
loss += self.loss_func(pred_i[:, i, ...], ground_truth)
loss /= pred_i.size(1)
return self.beta * self.alpha ** global_step * loss
class LossFunc(nn.Module):
"""
loss function of KPN
"""
def __init__(self, coeff_basic=1.0, coeff_anneal=1.0, gradient_L1=True,
alpha=0.9998, beta=100):
super(LossFunc, self).__init__()
self.coeff_basic = coeff_basic
self.coeff_anneal = coeff_anneal
self.loss_basic = LossBasic(gradient_L1)
self.loss_anneal = LossAnneal(alpha, beta)
def forward(self, pred_img_i, pred_img, ground_truth, global_step):
"""
forward function of loss_func
:param frames: frame_1 ~ frame_N, shape: [batch, N, 3, height, width]
:param core: a dict coverted by ......
:param ground_truth: shape [batch, 3, height, width]
:param global_step: int
:return: loss
"""
return self.coeff_basic * self.loss_basic(pred_img, ground_truth
), self.coeff_anneal * self.loss_anneal(global_step, pred_img_i,
ground_truth)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_add_div_mean_mse_loss_mul_pow_sub_0(in_out_ptr1,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr13, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
r1 = rindex % 16
r2 = rindex // 16 % 4
r4 = rindex % 4
r5 = rindex // 4 % 4
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp7 = tl.load(in_ptr2 + (r1 + 64 * r2), None, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr2 + (16 + r1 + 64 * r2), None, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr2 + (32 + r1 + 64 * r2), None, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr2 + (48 + r1 + 64 * r2), None, eviction_policy=
'evict_last')
tmp145 = tl.load(in_ptr3 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp8 = tmp7 - tmp1
tmp9 = tmp8 * tmp8
tmp10 = tl.broadcast_to(tmp9, [RBLOCK])
tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0))
tmp14 = tmp13 - tmp1
tmp15 = tmp14 * tmp14
tmp16 = tl.broadcast_to(tmp15, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tmp20 = tmp19 - tmp1
tmp21 = tmp20 * tmp20
tmp22 = tl.broadcast_to(tmp21, [RBLOCK])
tmp24 = triton_helpers.promote_to_tensor(tl.sum(tmp22, 0))
tmp26 = tmp25 - tmp1
tmp27 = tmp26 * tmp26
tmp28 = tl.broadcast_to(tmp27, [RBLOCK])
tmp30 = triton_helpers.promote_to_tensor(tl.sum(tmp28, 0))
tmp31 = -1 + r4
tmp32 = tl.full([1], 0, tl.int64)
tmp33 = tmp31 >= tmp32
tmp34 = tl.load(in_ptr0 + tl.broadcast_to(-1 + r0, [RBLOCK]), tmp33,
other=0.0)
tmp35 = r4
tmp36 = tl.full([1], 4, tl.int64)
tmp37 = tmp35 < tmp36
tmp38 = tl.load(in_ptr0 + tl.broadcast_to(r0, [RBLOCK]), tmp37, other=0.0)
tmp39 = tmp34 - tmp38
tmp40 = tl_math.abs(tmp39)
tmp41 = -1 + r5
tmp42 = tmp41 >= tmp32
tmp43 = tl.load(in_ptr0 + tl.broadcast_to(-4 + r0, [RBLOCK]), tmp42,
other=0.0)
tmp44 = r5
tmp45 = tmp44 < tmp36
tmp46 = tl.load(in_ptr0 + tl.broadcast_to(r0, [RBLOCK]), tmp45, other=0.0)
tmp47 = tmp43 - tmp46
tmp48 = tl_math.abs(tmp47)
tmp49 = tmp40 + tmp48
tmp50 = tl.load(in_ptr1 + tl.broadcast_to(-1 + r0, [RBLOCK]), tmp33,
other=0.0)
tmp51 = tl.load(in_ptr1 + tl.broadcast_to(r0, [RBLOCK]), tmp37, other=0.0)
tmp52 = tmp50 - tmp51
tmp53 = tl_math.abs(tmp52)
tmp54 = tl.load(in_ptr1 + tl.broadcast_to(-4 + r0, [RBLOCK]), tmp42,
other=0.0)
tmp55 = tl.load(in_ptr1 + tl.broadcast_to(r0, [RBLOCK]), tmp45, other=0.0)
tmp56 = tmp54 - tmp55
tmp57 = tl_math.abs(tmp56)
tmp58 = tmp53 + tmp57
tmp59 = tmp49 - tmp58
tmp60 = tl.load(in_ptr2 + tl.broadcast_to(-1 + r1 + 64 * r2, [RBLOCK]),
tmp33, eviction_policy='evict_last', other=0.0)
tmp61 = tl.load(in_ptr2 + tl.broadcast_to(r1 + 64 * r2, [RBLOCK]),
tmp37, eviction_policy='evict_last', other=0.0)
tmp62 = tmp60 - tmp61
tmp63 = tl_math.abs(tmp62)
tmp64 = tl.load(in_ptr2 + tl.broadcast_to(-4 + r1 + 64 * r2, [RBLOCK]),
tmp42, eviction_policy='evict_last', other=0.0)
tmp65 = tl.load(in_ptr2 + tl.broadcast_to(r1 + 64 * r2, [RBLOCK]),
tmp45, eviction_policy='evict_last', other=0.0)
tmp66 = tmp64 - tmp65
tmp67 = tl_math.abs(tmp66)
tmp68 = tmp63 + tmp67
tmp69 = tmp68 - tmp58
tmp70 = tl.load(in_ptr2 + tl.broadcast_to(15 + r1 + 64 * r2, [RBLOCK]),
tmp33, eviction_policy='evict_last', other=0.0)
tmp71 = tl.load(in_ptr2 + tl.broadcast_to(16 + r1 + 64 * r2, [RBLOCK]),
tmp37, eviction_policy='evict_last', other=0.0)
tmp72 = tmp70 - tmp71
tmp73 = tl_math.abs(tmp72)
tmp74 = tl.load(in_ptr2 + tl.broadcast_to(12 + r1 + 64 * r2, [RBLOCK]),
tmp42, eviction_policy='evict_last', other=0.0)
tmp75 = tl.load(in_ptr2 + tl.broadcast_to(16 + r1 + 64 * r2, [RBLOCK]),
tmp45, eviction_policy='evict_last', other=0.0)
tmp76 = tmp74 - tmp75
tmp77 = tl_math.abs(tmp76)
tmp78 = tmp73 + tmp77
tmp79 = tmp78 - tmp58
tmp80 = tl.load(in_ptr2 + tl.broadcast_to(31 + r1 + 64 * r2, [RBLOCK]),
tmp33, eviction_policy='evict_last', other=0.0)
tmp81 = tl.load(in_ptr2 + tl.broadcast_to(32 + r1 + 64 * r2, [RBLOCK]),
tmp37, eviction_policy='evict_last', other=0.0)
tmp82 = tmp80 - tmp81
tmp83 = tl_math.abs(tmp82)
tmp84 = tl.load(in_ptr2 + tl.broadcast_to(28 + r1 + 64 * r2, [RBLOCK]),
tmp42, eviction_policy='evict_last', other=0.0)
tmp85 = tl.load(in_ptr2 + tl.broadcast_to(32 + r1 + 64 * r2, [RBLOCK]),
tmp45, eviction_policy='evict_last', other=0.0)
tmp86 = tmp84 - tmp85
tmp87 = tl_math.abs(tmp86)
tmp88 = tmp83 + tmp87
tmp89 = tmp88 - tmp58
tmp90 = tl.load(in_ptr2 + tl.broadcast_to(47 + r1 + 64 * r2, [RBLOCK]),
tmp33, eviction_policy='evict_last', other=0.0)
tmp91 = tl.load(in_ptr2 + tl.broadcast_to(48 + r1 + 64 * r2, [RBLOCK]),
tmp37, eviction_policy='evict_last', other=0.0)
tmp92 = tmp90 - tmp91
tmp93 = tl_math.abs(tmp92)
tmp94 = tl.load(in_ptr2 + tl.broadcast_to(44 + r1 + 64 * r2, [RBLOCK]),
tmp42, eviction_policy='evict_last', other=0.0)
tmp95 = tl.load(in_ptr2 + tl.broadcast_to(48 + r1 + 64 * r2, [RBLOCK]),
tmp45, eviction_policy='evict_last', other=0.0)
tmp96 = tmp94 - tmp95
tmp97 = tl_math.abs(tmp96)
tmp98 = tmp93 + tmp97
tmp99 = tmp98 - tmp58
tmp100 = tl_math.abs(tmp59)
tmp101 = tl.broadcast_to(tmp100, [RBLOCK])
tmp103 = triton_helpers.promote_to_tensor(tl.sum(tmp101, 0))
tmp104 = tl_math.abs(tmp69)
tmp105 = tl.broadcast_to(tmp104, [RBLOCK])
tmp107 = triton_helpers.promote_to_tensor(tl.sum(tmp105, 0))
tmp108 = tl_math.abs(tmp79)
tmp109 = tl.broadcast_to(tmp108, [RBLOCK])
tmp111 = triton_helpers.promote_to_tensor(tl.sum(tmp109, 0))
tmp112 = tl_math.abs(tmp89)
tmp113 = tl.broadcast_to(tmp112, [RBLOCK])
tmp115 = triton_helpers.promote_to_tensor(tl.sum(tmp113, 0))
tmp116 = tl_math.abs(tmp99)
tmp117 = tl.broadcast_to(tmp116, [RBLOCK])
tmp119 = triton_helpers.promote_to_tensor(tl.sum(tmp117, 0))
tmp120 = 256.0
tmp121 = tmp12 / tmp120
tmp122 = tmp107 / tmp120
tmp123 = tmp121 + tmp122
tmp124 = 0.0
tmp125 = tmp123 + tmp124
tmp126 = tmp18 / tmp120
tmp127 = tmp111 / tmp120
tmp128 = tmp126 + tmp127
tmp129 = tmp125 + tmp128
tmp130 = tmp24 / tmp120
tmp131 = tmp115 / tmp120
tmp132 = tmp130 + tmp131
tmp133 = tmp129 + tmp132
tmp134 = tmp30 / tmp120
tmp135 = tmp119 / tmp120
tmp136 = tmp134 + tmp135
tmp137 = tmp133 + tmp136
tmp138 = 0.25
tmp139 = tmp137 * tmp138
tmp140 = tmp6 / tmp120
tmp141 = tmp103 / tmp120
tmp142 = tmp140 + tmp141
tmp143 = 1.0
tmp144 = tmp142 * tmp143
tmp146 = 0.9998
tmp147 = libdevice.pow(tmp146, tmp145)
tmp148 = 100.0
tmp149 = tmp147 * tmp148
tmp150 = tmp149 * tmp139
tmp151 = tmp150 * tmp143
tl.debug_barrier()
tl.store(in_out_ptr1 + tl.full([1], 0, tl.int32), tmp144, None)
tl.store(out_ptr13 + tl.broadcast_to(r0, [RBLOCK]), tmp151, None)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf17 = buf0
del buf0
buf16 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_abs_add_div_mean_mse_loss_mul_pow_sub_0[grid(1)](buf17
, arg1_1, arg0_1, arg2_1, arg3_1, buf16, 1, 256, num_warps=2,
num_stages=1)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
return buf17, buf16
class TensorGradient(nn.Module):
"""
the gradient of tensor
"""
def __init__(self, L1=True):
super(TensorGradient, self).__init__()
self.L1 = L1
def forward(self, img):
w, h = img.size(-2), img.size(-1)
l = F.pad(img, [1, 0, 0, 0])
r = F.pad(img, [0, 1, 0, 0])
u = F.pad(img, [0, 0, 1, 0])
d = F.pad(img, [0, 0, 0, 1])
if self.L1:
return torch.abs((l - r)[..., 0:w, 0:h]) + torch.abs((u - d)[
..., 0:w, 0:h])
else:
return torch.sqrt(torch.pow((l - r)[..., 0:w, 0:h], 2) + torch.
pow((u - d)[..., 0:w, 0:h], 2))
class LossBasic(nn.Module):
"""
Basic loss function.
"""
def __init__(self, gradient_L1=True):
super(LossBasic, self).__init__()
self.l1_loss = nn.L1Loss()
self.l2_loss = nn.MSELoss()
self.gradient = TensorGradient(gradient_L1)
def forward(self, pred, ground_truth):
return self.l2_loss(pred, ground_truth) + self.l1_loss(self.
gradient(pred), self.gradient(ground_truth))
class LossAnneal(nn.Module):
"""
anneal loss function
"""
def __init__(self, alpha=0.9998, beta=100):
super(LossAnneal, self).__init__()
self.global_step = 0
self.loss_func = LossBasic(gradient_L1=True)
self.alpha = alpha
self.beta = beta
def forward(self, global_step, pred_i, ground_truth):
"""
:param global_step: int
:param pred_i: [batch_size, N, 3, height, width]
:param ground_truth: [batch_size, 3, height, width]
:return:
"""
loss = 0
for i in range(pred_i.size(1)):
loss += self.loss_func(pred_i[:, i, ...], ground_truth)
loss /= pred_i.size(1)
return self.beta * self.alpha ** global_step * loss
class LossFuncNew(nn.Module):
"""
loss function of KPN
"""
def __init__(self, coeff_basic=1.0, coeff_anneal=1.0, gradient_L1=True,
alpha=0.9998, beta=100):
super(LossFuncNew, self).__init__()
self.coeff_basic = coeff_basic
self.coeff_anneal = coeff_anneal
self.loss_basic = LossBasic(gradient_L1)
self.loss_anneal = LossAnneal(alpha, beta)
def forward(self, input_0, input_1, input_2, input_3):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0], output[1]
|
xenbaloch/efficientderain
|
LossFunc
| false
| 16,743
|
[
"MIT"
] | 109
|
d5646815fd14a5a03c859102ecd2f298db7e53be
|
https://github.com/xenbaloch/efficientderain/tree/d5646815fd14a5a03c859102ecd2f298db7e53be
|
SuperpointDescriptor
|
import torch
import torch.nn as nn
class SuperpointDescriptor(nn.Module):
""" Descriptor decoder based on the SuperPoint arcihtecture. """
def __init__(self, input_feat_dim=128):
super(SuperpointDescriptor, self).__init__()
self.relu = torch.nn.ReLU(inplace=True)
self.convPa = torch.nn.Conv2d(input_feat_dim, 256, kernel_size=3,
stride=1, padding=1)
self.convPb = torch.nn.Conv2d(256, 128, kernel_size=1, stride=1,
padding=0)
def forward(self, input_features):
feat = self.relu(self.convPa(input_features))
semi = self.convPb(feat)
return semi
def get_inputs():
return [torch.rand([4, 128, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 512
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 128 * x2 + 524288 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_3(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 512
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y0 = yindex % 128
y1 = yindex // 128
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 128 * x2 + 524288 * y1), ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4096 * y3), tmp2, ymask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 128, 64, 64), (524288, 4096, 64, 1))
assert_size_stride(primals_4, (128, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_5, (128,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(32768, 9)](primals_1, buf0, 32768, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 128, 64, 64), (524288, 1, 8192, 128),
torch.float32)
triton_poi_fused_1[grid(512, 4096)](primals_3, buf1, 512, 4096,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_3
buf2 = extern_kernels.convolution(buf1, buf0, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 256, 64, 64), (1048576, 1, 16384, 256))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_2[grid(4194304)](buf3, primals_2,
4194304, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 128, 64, 64), (524288, 1, 8192, 128))
buf5 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1),
torch.float32)
triton_poi_fused_convolution_3[grid(512, 4096)](buf4, primals_5,
buf5, 512, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del buf4
del primals_5
return buf5, buf0, buf1, primals_4, buf3
class SuperpointDescriptorNew(nn.Module):
""" Descriptor decoder based on the SuperPoint arcihtecture. """
def __init__(self, input_feat_dim=128):
super(SuperpointDescriptorNew, self).__init__()
self.relu = torch.nn.ReLU(inplace=True)
self.convPa = torch.nn.Conv2d(input_feat_dim, 256, kernel_size=3,
stride=1, padding=1)
self.convPb = torch.nn.Conv2d(256, 128, kernel_size=1, stride=1,
padding=0)
def forward(self, input_0):
primals_1 = self.convPa.weight
primals_2 = self.convPa.bias
primals_4 = self.convPb.weight
primals_5 = self.convPb.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
wx-b/SOLD2
|
SuperpointDescriptor
| false
| 16,744
|
[
"MIT"
] | 347
|
71c3243f9d3a695788d0a6bfd134b9849425900a
|
https://github.com/wx-b/SOLD2/tree/71c3243f9d3a695788d0a6bfd134b9849425900a
|
LossAnneal
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class TensorGradient(nn.Module):
"""
the gradient of tensor
"""
def __init__(self, L1=True):
super(TensorGradient, self).__init__()
self.L1 = L1
def forward(self, img):
w, h = img.size(-2), img.size(-1)
l = F.pad(img, [1, 0, 0, 0])
r = F.pad(img, [0, 1, 0, 0])
u = F.pad(img, [0, 0, 1, 0])
d = F.pad(img, [0, 0, 0, 1])
if self.L1:
return torch.abs((l - r)[..., 0:w, 0:h]) + torch.abs((u - d)[
..., 0:w, 0:h])
else:
return torch.sqrt(torch.pow((l - r)[..., 0:w, 0:h], 2) + torch.
pow((u - d)[..., 0:w, 0:h], 2))
class LossBasic(nn.Module):
"""
Basic loss function.
"""
def __init__(self, gradient_L1=True):
super(LossBasic, self).__init__()
self.l1_loss = nn.L1Loss()
self.l2_loss = nn.MSELoss()
self.gradient = TensorGradient(gradient_L1)
def forward(self, pred, ground_truth):
return self.l2_loss(pred, ground_truth) + self.l1_loss(self.
gradient(pred), self.gradient(ground_truth))
class LossAnneal(nn.Module):
"""
anneal loss function
"""
def __init__(self, alpha=0.9998, beta=100):
super(LossAnneal, self).__init__()
self.global_step = 0
self.loss_func = LossBasic(gradient_L1=True)
self.alpha = alpha
self.beta = beta
def forward(self, global_step, pred_i, ground_truth):
"""
:param global_step: int
:param pred_i: [batch_size, N, 3, height, width]
:param ground_truth: [batch_size, 3, height, width]
:return:
"""
loss = 0
for i in range(pred_i.size(1)):
loss += self.loss_func(pred_i[:, i, ...], ground_truth)
loss /= pred_i.size(1)
return self.beta * self.alpha ** global_step * loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_add_div_mean_mse_loss_mul_pow_sub_0(in_ptr0,
in_ptr1, in_ptr2, out_ptr11, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16 % 4
r5 = rindex
r3 = rindex % 4
r4 = rindex // 4 % 4
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None, eviction_policy='evict_last'
)
tmp1 = tl.load(in_ptr1 + r5, None)
tmp7 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None, eviction_policy=
'evict_last')
tmp120 = tl.load(in_ptr2 + r5, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp8 = tmp7 - tmp1
tmp9 = tmp8 * tmp8
tmp10 = tl.broadcast_to(tmp9, [RBLOCK])
tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0))
tmp14 = tmp13 - tmp1
tmp15 = tmp14 * tmp14
tmp16 = tl.broadcast_to(tmp15, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tmp20 = tmp19 - tmp1
tmp21 = tmp20 * tmp20
tmp22 = tl.broadcast_to(tmp21, [RBLOCK])
tmp24 = triton_helpers.promote_to_tensor(tl.sum(tmp22, 0))
tmp25 = -1 + r3
tmp26 = tl.full([1], 0, tl.int64)
tmp27 = tmp25 >= tmp26
tmp28 = tl.load(in_ptr0 + tl.broadcast_to(-1 + r0 + 64 * r1, [RBLOCK]),
tmp27, eviction_policy='evict_last', other=0.0)
tmp29 = r3
tmp30 = tl.full([1], 4, tl.int64)
tmp31 = tmp29 < tmp30
tmp32 = tl.load(in_ptr0 + tl.broadcast_to(r0 + 64 * r1, [RBLOCK]),
tmp31, eviction_policy='evict_last', other=0.0)
tmp33 = tmp28 - tmp32
tmp34 = tl_math.abs(tmp33)
tmp35 = -1 + r4
tmp36 = tmp35 >= tmp26
tmp37 = tl.load(in_ptr0 + tl.broadcast_to(-4 + r0 + 64 * r1, [RBLOCK]),
tmp36, eviction_policy='evict_last', other=0.0)
tmp38 = r4
tmp39 = tmp38 < tmp30
tmp40 = tl.load(in_ptr0 + tl.broadcast_to(r0 + 64 * r1, [RBLOCK]),
tmp39, eviction_policy='evict_last', other=0.0)
tmp41 = tmp37 - tmp40
tmp42 = tl_math.abs(tmp41)
tmp43 = tmp34 + tmp42
tmp44 = tl.load(in_ptr1 + tl.broadcast_to(-1 + r5, [RBLOCK]), tmp27,
other=0.0)
tmp45 = tl.load(in_ptr1 + tl.broadcast_to(r5, [RBLOCK]), tmp31, other=0.0)
tmp46 = tmp44 - tmp45
tmp47 = tl_math.abs(tmp46)
tmp48 = tl.load(in_ptr1 + tl.broadcast_to(-4 + r5, [RBLOCK]), tmp36,
other=0.0)
tmp49 = tl.load(in_ptr1 + tl.broadcast_to(r5, [RBLOCK]), tmp39, other=0.0)
tmp50 = tmp48 - tmp49
tmp51 = tl_math.abs(tmp50)
tmp52 = tmp47 + tmp51
tmp53 = tmp43 - tmp52
tmp54 = tl.load(in_ptr0 + tl.broadcast_to(15 + r0 + 64 * r1, [RBLOCK]),
tmp27, eviction_policy='evict_last', other=0.0)
tmp55 = tl.load(in_ptr0 + tl.broadcast_to(16 + r0 + 64 * r1, [RBLOCK]),
tmp31, eviction_policy='evict_last', other=0.0)
tmp56 = tmp54 - tmp55
tmp57 = tl_math.abs(tmp56)
tmp58 = tl.load(in_ptr0 + tl.broadcast_to(12 + r0 + 64 * r1, [RBLOCK]),
tmp36, eviction_policy='evict_last', other=0.0)
tmp59 = tl.load(in_ptr0 + tl.broadcast_to(16 + r0 + 64 * r1, [RBLOCK]),
tmp39, eviction_policy='evict_last', other=0.0)
tmp60 = tmp58 - tmp59
tmp61 = tl_math.abs(tmp60)
tmp62 = tmp57 + tmp61
tmp63 = tmp62 - tmp52
tmp64 = tl.load(in_ptr0 + tl.broadcast_to(31 + r0 + 64 * r1, [RBLOCK]),
tmp27, eviction_policy='evict_last', other=0.0)
tmp65 = tl.load(in_ptr0 + tl.broadcast_to(32 + r0 + 64 * r1, [RBLOCK]),
tmp31, eviction_policy='evict_last', other=0.0)
tmp66 = tmp64 - tmp65
tmp67 = tl_math.abs(tmp66)
tmp68 = tl.load(in_ptr0 + tl.broadcast_to(28 + r0 + 64 * r1, [RBLOCK]),
tmp36, eviction_policy='evict_last', other=0.0)
tmp69 = tl.load(in_ptr0 + tl.broadcast_to(32 + r0 + 64 * r1, [RBLOCK]),
tmp39, eviction_policy='evict_last', other=0.0)
tmp70 = tmp68 - tmp69
tmp71 = tl_math.abs(tmp70)
tmp72 = tmp67 + tmp71
tmp73 = tmp72 - tmp52
tmp74 = tl.load(in_ptr0 + tl.broadcast_to(47 + r0 + 64 * r1, [RBLOCK]),
tmp27, eviction_policy='evict_last', other=0.0)
tmp75 = tl.load(in_ptr0 + tl.broadcast_to(48 + r0 + 64 * r1, [RBLOCK]),
tmp31, eviction_policy='evict_last', other=0.0)
tmp76 = tmp74 - tmp75
tmp77 = tl_math.abs(tmp76)
tmp78 = tl.load(in_ptr0 + tl.broadcast_to(44 + r0 + 64 * r1, [RBLOCK]),
tmp36, eviction_policy='evict_last', other=0.0)
tmp79 = tl.load(in_ptr0 + tl.broadcast_to(48 + r0 + 64 * r1, [RBLOCK]),
tmp39, eviction_policy='evict_last', other=0.0)
tmp80 = tmp78 - tmp79
tmp81 = tl_math.abs(tmp80)
tmp82 = tmp77 + tmp81
tmp83 = tmp82 - tmp52
tmp84 = tl_math.abs(tmp53)
tmp85 = tl.broadcast_to(tmp84, [RBLOCK])
tmp87 = triton_helpers.promote_to_tensor(tl.sum(tmp85, 0))
tmp88 = tl_math.abs(tmp63)
tmp89 = tl.broadcast_to(tmp88, [RBLOCK])
tmp91 = triton_helpers.promote_to_tensor(tl.sum(tmp89, 0))
tmp92 = tl_math.abs(tmp73)
tmp93 = tl.broadcast_to(tmp92, [RBLOCK])
tmp95 = triton_helpers.promote_to_tensor(tl.sum(tmp93, 0))
tmp96 = tl_math.abs(tmp83)
tmp97 = tl.broadcast_to(tmp96, [RBLOCK])
tmp99 = triton_helpers.promote_to_tensor(tl.sum(tmp97, 0))
tmp100 = 256.0
tmp101 = tmp6 / tmp100
tmp102 = tmp87 / tmp100
tmp103 = tmp101 + tmp102
tmp104 = 0.0
tmp105 = tmp103 + tmp104
tmp106 = tmp12 / tmp100
tmp107 = tmp91 / tmp100
tmp108 = tmp106 + tmp107
tmp109 = tmp105 + tmp108
tmp110 = tmp18 / tmp100
tmp111 = tmp95 / tmp100
tmp112 = tmp110 + tmp111
tmp113 = tmp109 + tmp112
tmp114 = tmp24 / tmp100
tmp115 = tmp99 / tmp100
tmp116 = tmp114 + tmp115
tmp117 = tmp113 + tmp116
tmp118 = 0.25
tmp119 = tmp117 * tmp118
tmp121 = 0.9998
tmp122 = libdevice.pow(tmp121, tmp120)
tmp123 = 100.0
tmp124 = tmp122 * tmp123
tmp125 = tmp124 * tmp119
tl.store(out_ptr11 + tl.broadcast_to(r5, [RBLOCK]), tmp125, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_abs_add_div_mean_mse_loss_mul_pow_sub_0[grid(1)](
arg0_1, arg1_1, arg2_1, buf13, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf13,
class TensorGradient(nn.Module):
"""
the gradient of tensor
"""
def __init__(self, L1=True):
super(TensorGradient, self).__init__()
self.L1 = L1
def forward(self, img):
w, h = img.size(-2), img.size(-1)
l = F.pad(img, [1, 0, 0, 0])
r = F.pad(img, [0, 1, 0, 0])
u = F.pad(img, [0, 0, 1, 0])
d = F.pad(img, [0, 0, 0, 1])
if self.L1:
return torch.abs((l - r)[..., 0:w, 0:h]) + torch.abs((u - d)[
..., 0:w, 0:h])
else:
return torch.sqrt(torch.pow((l - r)[..., 0:w, 0:h], 2) + torch.
pow((u - d)[..., 0:w, 0:h], 2))
class LossBasic(nn.Module):
"""
Basic loss function.
"""
def __init__(self, gradient_L1=True):
super(LossBasic, self).__init__()
self.l1_loss = nn.L1Loss()
self.l2_loss = nn.MSELoss()
self.gradient = TensorGradient(gradient_L1)
def forward(self, pred, ground_truth):
return self.l2_loss(pred, ground_truth) + self.l1_loss(self.
gradient(pred), self.gradient(ground_truth))
class LossAnnealNew(nn.Module):
"""
anneal loss function
"""
def __init__(self, alpha=0.9998, beta=100):
super(LossAnnealNew, self).__init__()
self.global_step = 0
self.loss_func = LossBasic(gradient_L1=True)
self.alpha = alpha
self.beta = beta
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
xenbaloch/efficientderain
|
LossAnneal
| false
| 16,745
|
[
"MIT"
] | 109
|
d5646815fd14a5a03c859102ecd2f298db7e53be
|
https://github.com/xenbaloch/efficientderain/tree/d5646815fd14a5a03c859102ecd2f298db7e53be
|
SuperpointDecoder
|
import torch
import torch.nn as nn
class SuperpointDecoder(nn.Module):
""" Junction decoder based on the SuperPoint architecture. """
def __init__(self, input_feat_dim=128, backbone_name='lcnn'):
super(SuperpointDecoder, self).__init__()
self.relu = torch.nn.ReLU(inplace=True)
if backbone_name == 'lcnn':
self.convPa = torch.nn.Conv2d(input_feat_dim, 256, kernel_size=
3, stride=2, padding=1)
elif backbone_name == 'superpoint':
self.convPa = torch.nn.Conv2d(input_feat_dim, 256, kernel_size=
3, stride=1, padding=1)
else:
raise ValueError('[Error] Unknown backbone option.')
self.convPb = torch.nn.Conv2d(256, 65, kernel_size=1, stride=1,
padding=0)
def forward(self, input_features):
feat = self.relu(self.convPa(input_features))
semi = self.convPb(feat)
return semi
def get_inputs():
return [torch.rand([4, 128, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 512
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 128 * x2 + 524288 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_3(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 260
xnumel = 1024
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 65
y1 = yindex // 65
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 65 * x2 + 66560 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 1024 * y3), tmp2, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 128, 64, 64), (524288, 4096, 64, 1))
assert_size_stride(primals_4, (65, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_5, (65,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(32768, 9)](primals_1, buf0, 32768, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 128, 64, 64), (524288, 1, 8192, 128),
torch.float32)
triton_poi_fused_1[grid(512, 4096)](primals_3, buf1, 512, 4096,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_3
buf2 = extern_kernels.convolution(buf1, buf0, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 256, 32, 32), (262144, 1, 8192, 256))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_2[grid(1048576)](buf3, primals_2,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 65, 32, 32), (66560, 1, 2080, 65))
buf5 = empty_strided_cuda((4, 65, 32, 32), (66560, 1024, 32, 1),
torch.float32)
triton_poi_fused_convolution_3[grid(260, 1024)](buf4, primals_5,
buf5, 260, 1024, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del buf4
del primals_5
return buf5, buf0, buf1, primals_4, buf3
class SuperpointDecoderNew(nn.Module):
""" Junction decoder based on the SuperPoint architecture. """
def __init__(self, input_feat_dim=128, backbone_name='lcnn'):
super(SuperpointDecoderNew, self).__init__()
self.relu = torch.nn.ReLU(inplace=True)
if backbone_name == 'lcnn':
self.convPa = torch.nn.Conv2d(input_feat_dim, 256, kernel_size=
3, stride=2, padding=1)
elif backbone_name == 'superpoint':
self.convPa = torch.nn.Conv2d(input_feat_dim, 256, kernel_size=
3, stride=1, padding=1)
else:
raise ValueError('[Error] Unknown backbone option.')
self.convPb = torch.nn.Conv2d(256, 65, kernel_size=1, stride=1,
padding=0)
def forward(self, input_0):
primals_1 = self.convPa.weight
primals_2 = self.convPa.bias
primals_4 = self.convPb.weight
primals_5 = self.convPb.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
wx-b/SOLD2
|
SuperpointDecoder
| false
| 16,746
|
[
"MIT"
] | 347
|
71c3243f9d3a695788d0a6bfd134b9849425900a
|
https://github.com/wx-b/SOLD2/tree/71c3243f9d3a695788d0a6bfd134b9849425900a
|
WingLoss
|
import math
import torch
import torch.onnx
from torch.nn.modules.loss import _Loss
class WingLoss(_Loss):
def __init__(self, width=10, curvature=2.0, reduction='mean'):
super(WingLoss, self).__init__(reduction=reduction)
self.width = width
self.curvature = curvature
def forward(self, prediction, target):
return self.wing_loss(prediction, target, self.width, self.
curvature, self.reduction)
def wing_loss(self, prediction, target, width=10, curvature=2.0,
reduction='mean'):
diff_abs = (target - prediction).abs()
loss = diff_abs.clone()
idx_smaller = diff_abs < width
idx_bigger = diff_abs >= width
loss_smaller = width * torch.log(1 + diff_abs[idx_smaller] / curvature)
C = width - width * math.log(1 + width / curvature)
loss_biger = loss[idx_bigger] - C
loss = torch.cat((loss_smaller, loss_biger), 0)
if reduction == 'sum':
loss = loss.sum()
if reduction == 'mean':
loss = loss.mean()
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import torch.onnx
from torch.nn.modules.loss import _Loss
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_abs_clone_ge_lt_sub_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = 10.0
tmp5 = tmp3 < tmp4
tmp6 = tmp3 >= tmp4
tl.store(out_ptr0 + x0, tmp3, xmask)
tl.store(out_ptr1 + x0, tmp5, xmask)
tl.store(out_ptr2 + x0, tmp3, xmask)
tl.store(out_ptr3 + x0, tmp6, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_abs_clone_ge_lt_sub_0[grid(256)](arg0_1, arg1_1,
buf0, buf1, buf2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0, buf1, buf2, buf3
class WingLossNew(_Loss):
def __init__(self, width=10, curvature=2.0, reduction='mean'):
super(WingLossNew, self).__init__(reduction=reduction)
self.width = width
self.curvature = curvature
def wing_loss(self, prediction, target, width=10, curvature=2.0,
reduction='mean'):
diff_abs = (target - prediction).abs()
loss = diff_abs.clone()
idx_smaller = diff_abs < width
idx_bigger = diff_abs >= width
loss_smaller = width * torch.log(1 + diff_abs[idx_smaller] / curvature)
C = width - width * math.log(1 + width / curvature)
loss_biger = loss[idx_bigger] - C
loss = torch.cat((loss_smaller, loss_biger), 0)
if reduction == 'sum':
loss = loss.sum()
if reduction == 'mean':
loss = loss.mean()
return loss
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
xuguozhi/Peppa-Facial-Landmark-PyTorch
|
WingLoss
| false
| 16,747
|
[
"Apache-2.0"
] | 163
|
238063317fd31c4c21c5c43692e6a5d769970370
|
https://github.com/xuguozhi/Peppa-Facial-Landmark-PyTorch/tree/238063317fd31c4c21c5c43692e6a5d769970370
|
FC_Q
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class FC_Q(nn.Module):
def __init__(self, state_dim, num_actions):
super(FC_Q, self).__init__()
self.q1 = nn.Linear(state_dim, 256)
self.q2 = nn.Linear(256, 256)
self.q3 = nn.Linear(256, num_actions)
self.i1 = nn.Linear(state_dim, 256)
self.i2 = nn.Linear(256, 256)
self.i3 = nn.Linear(256, num_actions)
def forward(self, state):
q = F.relu(self.q1(state))
q = F.relu(self.q2(q))
i = F.relu(self.i1(state))
i = F.relu(self.i2(i))
i = self.i3(i)
return self.q3(q), F.log_softmax(i, dim=1), i
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4, 'num_actions': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x3, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (256, 256), (256, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (256, 4), (4, 1))
assert_size_stride(primals_7, (256,), (1,))
assert_size_stride(primals_8, (256, 256), (256, 1))
assert_size_stride(primals_9, (256,), (1,))
assert_size_stride(primals_10, (4, 256), (256, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4, 256), (256, 1))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf0
buf15 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf1,
primals_2, buf15, 16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0),
reinterpret_tensor(primals_4, (256, 256), (1, 256), 0), out=buf2)
buf3 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 256), (1, 4), 0), out=buf3)
del primals_6
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf3
buf13 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf4,
primals_7, buf13, 16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
buf5 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf4, (64, 256), (256, 1), 0),
reinterpret_tensor(primals_8, (256, 256), (1, 256), 0), out=buf5)
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf5
buf12 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf6,
primals_9, buf12, 16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_9
buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_11, reinterpret_tensor(buf6, (64, 256),
(256, 1), 0), reinterpret_tensor(primals_10, (256, 4), (1, 256),
0), alpha=1, beta=1, out=buf7)
del primals_11
buf8 = reinterpret_tensor(buf2, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf2
buf14 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf8,
primals_5, buf14, 16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf9 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_13, reinterpret_tensor(buf8, (64, 256),
(256, 1), 0), reinterpret_tensor(primals_12, (256, 4), (1, 256),
0), alpha=1, beta=1, out=buf9)
del primals_13
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__log_softmax_1[grid(256)](buf7, buf10, 256, XBLOCK
=256, num_warps=4, num_stages=1)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__log_softmax_2[grid(256)](buf10, buf11, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del buf10
return (reinterpret_tensor(buf9, (4, 4, 4, 4), (64, 16, 4, 1), 0),
buf11, reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 16, 4, 1), 0),
reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(buf1, (64, 256), (256, 1), 0),
reinterpret_tensor(buf4, (64, 256), (256, 1), 0),
reinterpret_tensor(buf6, (64, 256), (256, 1), 0),
reinterpret_tensor(buf8, (64, 256), (256, 1), 0), buf11, primals_12,
primals_10, buf12, primals_8, buf13, buf14, primals_4, buf15)
class FC_QNew(nn.Module):
def __init__(self, state_dim, num_actions):
super(FC_QNew, self).__init__()
self.q1 = nn.Linear(state_dim, 256)
self.q2 = nn.Linear(256, 256)
self.q3 = nn.Linear(256, num_actions)
self.i1 = nn.Linear(state_dim, 256)
self.i2 = nn.Linear(256, 256)
self.i3 = nn.Linear(256, num_actions)
def forward(self, input_0):
primals_1 = self.q1.weight
primals_2 = self.q1.bias
primals_4 = self.q2.weight
primals_5 = self.q2.bias
primals_10 = self.q3.weight
primals_11 = self.q3.bias
primals_6 = self.i1.weight
primals_7 = self.i1.bias
primals_8 = self.i2.weight
primals_9 = self.i2.bias
primals_12 = self.i3.weight
primals_13 = self.i3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0], output[1], output[2]
|
xtwentian3/BCQ
|
FC_Q
| false
| 16,748
|
[
"MIT"
] | 402
|
e114f8c474c57a36d9af78c42a06f612831afda2
|
https://github.com/xtwentian3/BCQ/tree/e114f8c474c57a36d9af78c42a06f612831afda2
|
EDCNN
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class SobelConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1,
padding=0, dilation=1, groups=1, bias=True, requires_grad=True):
assert kernel_size % 2 == 1, "SobelConv2d's kernel_size must be odd."
assert out_channels % 4 == 0, "SobelConv2d's out_channels must be a multiple of 4."
assert out_channels % groups == 0, "SobelConv2d's out_channels must be a multiple of groups."
super(SobelConv2d, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.bias = bias if requires_grad else False
if self.bias:
self.bias = nn.Parameter(torch.zeros(size=(out_channels,),
dtype=torch.float32), requires_grad=True)
else:
self.bias = None
self.sobel_weight = nn.Parameter(torch.zeros(size=(out_channels,
int(in_channels / groups), kernel_size, kernel_size)),
requires_grad=False)
kernel_mid = kernel_size // 2
for idx in range(out_channels):
if idx % 4 == 0:
self.sobel_weight[idx, :, 0, :] = -1
self.sobel_weight[idx, :, 0, kernel_mid] = -2
self.sobel_weight[idx, :, -1, :] = 1
self.sobel_weight[idx, :, -1, kernel_mid] = 2
elif idx % 4 == 1:
self.sobel_weight[idx, :, :, 0] = -1
self.sobel_weight[idx, :, kernel_mid, 0] = -2
self.sobel_weight[idx, :, :, -1] = 1
self.sobel_weight[idx, :, kernel_mid, -1] = 2
elif idx % 4 == 2:
self.sobel_weight[idx, :, 0, 0] = -2
for i in range(0, kernel_mid + 1):
self.sobel_weight[idx, :, kernel_mid - i, i] = -1
self.sobel_weight[idx, :, kernel_size - 1 - i,
kernel_mid + i] = 1
self.sobel_weight[idx, :, -1, -1] = 2
else:
self.sobel_weight[idx, :, -1, 0] = -2
for i in range(0, kernel_mid + 1):
self.sobel_weight[idx, :, kernel_mid + i, i] = -1
self.sobel_weight[idx, :, i, kernel_mid + i] = 1
self.sobel_weight[idx, :, 0, -1] = 2
if requires_grad:
self.sobel_factor = nn.Parameter(torch.ones(size=(out_channels,
1, 1, 1), dtype=torch.float32), requires_grad=True)
else:
self.sobel_factor = nn.Parameter(torch.ones(size=(out_channels,
1, 1, 1), dtype=torch.float32), requires_grad=False)
def forward(self, x):
if torch.cuda.is_available():
self.sobel_factor = self.sobel_factor
if isinstance(self.bias, nn.Parameter):
self.bias = self.bias
sobel_weight = self.sobel_weight * self.sobel_factor
if torch.cuda.is_available():
sobel_weight = sobel_weight
out = F.conv2d(x, sobel_weight, self.bias, self.stride, self.
padding, self.dilation, self.groups)
return out
class EDCNN(nn.Module):
def __init__(self, in_ch=1, out_ch=32, sobel_ch=32):
super(EDCNN, self).__init__()
self.conv_sobel = SobelConv2d(in_ch, sobel_ch, kernel_size=3,
stride=1, padding=1, bias=True)
self.conv_p1 = nn.Conv2d(in_ch + sobel_ch, out_ch, kernel_size=1,
stride=1, padding=0)
self.conv_f1 = nn.Conv2d(out_ch, out_ch, kernel_size=3, stride=1,
padding=1)
self.conv_p2 = nn.Conv2d(in_ch + sobel_ch + out_ch, out_ch,
kernel_size=1, stride=1, padding=0)
self.conv_f2 = nn.Conv2d(out_ch, out_ch, kernel_size=3, stride=1,
padding=1)
self.conv_p3 = nn.Conv2d(in_ch + sobel_ch + out_ch, out_ch,
kernel_size=1, stride=1, padding=0)
self.conv_f3 = nn.Conv2d(out_ch, out_ch, kernel_size=3, stride=1,
padding=1)
self.conv_p4 = nn.Conv2d(in_ch + sobel_ch + out_ch, out_ch,
kernel_size=1, stride=1, padding=0)
self.conv_f4 = nn.Conv2d(out_ch, out_ch, kernel_size=3, stride=1,
padding=1)
self.conv_p5 = nn.Conv2d(in_ch + sobel_ch + out_ch, out_ch,
kernel_size=1, stride=1, padding=0)
self.conv_f5 = nn.Conv2d(out_ch, out_ch, kernel_size=3, stride=1,
padding=1)
self.conv_p6 = nn.Conv2d(in_ch + sobel_ch + out_ch, out_ch,
kernel_size=1, stride=1, padding=0)
self.conv_f6 = nn.Conv2d(out_ch, out_ch, kernel_size=3, stride=1,
padding=1)
self.conv_p7 = nn.Conv2d(in_ch + sobel_ch + out_ch, out_ch,
kernel_size=1, stride=1, padding=0)
self.conv_f7 = nn.Conv2d(out_ch, out_ch, kernel_size=3, stride=1,
padding=1)
self.conv_p8 = nn.Conv2d(in_ch + sobel_ch + out_ch, out_ch,
kernel_size=1, stride=1, padding=0)
self.conv_f8 = nn.Conv2d(out_ch, in_ch, kernel_size=3, stride=1,
padding=1)
self.relu = nn.LeakyReLU()
def forward(self, x):
out_0 = self.conv_sobel(x)
out_0 = torch.cat((x, out_0), dim=-3)
out_1 = self.relu(self.conv_p1(out_0))
out_1 = self.relu(self.conv_f1(out_1))
out_1 = torch.cat((out_0, out_1), dim=-3)
out_2 = self.relu(self.conv_p2(out_1))
out_2 = self.relu(self.conv_f2(out_2))
out_2 = torch.cat((out_0, out_2), dim=-3)
out_3 = self.relu(self.conv_p3(out_2))
out_3 = self.relu(self.conv_f3(out_3))
out_3 = torch.cat((out_0, out_3), dim=-3)
out_4 = self.relu(self.conv_p4(out_3))
out_4 = self.relu(self.conv_f4(out_4))
out_4 = torch.cat((out_0, out_4), dim=-3)
out_5 = self.relu(self.conv_p5(out_4))
out_5 = self.relu(self.conv_f5(out_5))
out_5 = torch.cat((out_0, out_5), dim=-3)
out_6 = self.relu(self.conv_p6(out_5))
out_6 = self.relu(self.conv_f6(out_6))
out_6 = torch.cat((out_0, out_6), dim=-3)
out_7 = self.relu(self.conv_p7(out_6))
out_7 = self.relu(self.conv_f7(out_7))
out_7 = torch.cat((out_0, out_7), dim=-3)
out_8 = self.relu(self.conv_p8(out_7))
out_8 = self.conv_f8(out_8)
out = self.relu(x + out_8)
return out
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 9
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 33
x0 = xindex % 4096
x2 = xindex // 135168
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x2), tmp4, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 33, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 4096 * (-1 + x1) + 131072 * x2), tmp6,
other=0.0)
tmp10 = tl.load(in_ptr2 + (-1 + x1), tmp6, eviction_policy='evict_last',
other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp6, tmp11, tmp12)
tmp14 = tl.where(tmp4, tmp5, tmp13)
tl.store(out_ptr0 + x3, tmp14, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 32
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 32
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_cat_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 65
x0 = xindex % 4096
x2 = xindex // 266240
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 33, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 135168 * x2), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 65, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 4096 * (-33 + x1) + 131072 * x2), tmp6,
other=0.0).to(tl.int1)
tmp10 = tl.load(in_ptr2 + (x0 + 4096 * (-33 + x1) + 131072 * x2), tmp6,
other=0.0)
tmp11 = tl.load(in_ptr3 + (-33 + x1), tmp6, eviction_policy=
'evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = 0.01
tmp14 = tmp12 * tmp13
tmp15 = tl.where(tmp9, tmp12, tmp14)
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp6, tmp15, tmp16)
tmp18 = tl.where(tmp4, tmp5, tmp17)
tl.store(out_ptr0 + x3, tmp18, None)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_5(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, None)
tmp1 = tl.load(in_ptr1 + x0, None)
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp6 = 0.0
tmp7 = tmp5 > tmp6
tmp8 = 0.01
tmp9 = tmp5 * tmp8
tmp10 = tl.where(tmp7, tmp5, tmp9)
tl.store(out_ptr0 + x0, tmp7, None)
tl.store(out_ptr1 + x0, tmp10, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32,
primals_33, primals_34, primals_35, primals_36) = args
args.clear()
assert_size_stride(primals_1, (32, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (32, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_4, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_5, (32, 33, 1, 1), (33, 1, 1, 1))
assert_size_stride(primals_6, (32,), (1,))
assert_size_stride(primals_7, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_8, (32,), (1,))
assert_size_stride(primals_9, (32, 65, 1, 1), (65, 1, 1, 1))
assert_size_stride(primals_10, (32,), (1,))
assert_size_stride(primals_11, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_12, (32,), (1,))
assert_size_stride(primals_13, (32, 65, 1, 1), (65, 1, 1, 1))
assert_size_stride(primals_14, (32,), (1,))
assert_size_stride(primals_15, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_16, (32,), (1,))
assert_size_stride(primals_17, (32, 65, 1, 1), (65, 1, 1, 1))
assert_size_stride(primals_18, (32,), (1,))
assert_size_stride(primals_19, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_20, (32,), (1,))
assert_size_stride(primals_21, (32, 65, 1, 1), (65, 1, 1, 1))
assert_size_stride(primals_22, (32,), (1,))
assert_size_stride(primals_23, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_24, (32,), (1,))
assert_size_stride(primals_25, (32, 65, 1, 1), (65, 1, 1, 1))
assert_size_stride(primals_26, (32,), (1,))
assert_size_stride(primals_27, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_28, (32,), (1,))
assert_size_stride(primals_29, (32, 65, 1, 1), (65, 1, 1, 1))
assert_size_stride(primals_30, (32,), (1,))
assert_size_stride(primals_31, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_32, (32,), (1,))
assert_size_stride(primals_33, (32, 65, 1, 1), (65, 1, 1, 1))
assert_size_stride(primals_34, (32,), (1,))
assert_size_stride(primals_35, (1, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_36, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((32, 1, 3, 3), (9, 9, 3, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(288)](primals_3, primals_1, buf0, 288,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(primals_4, buf0, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf2 = empty_strided_cuda((4, 33, 64, 64), (135168, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_1[grid(540672)](primals_4, buf1, primals_2,
buf2, 540672, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf3 = extern_kernels.convolution(buf2, primals_5, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf4 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
buf5 = buf1
del buf1
triton_poi_fused_convolution_leaky_relu_2[grid(524288)](buf3,
primals_6, buf4, buf5, 524288, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf3
del primals_6
buf6 = extern_kernels.convolution(buf5, primals_7, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf7 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_3[grid(524288)](buf6,
primals_8, buf7, 524288, XBLOCK=1024, num_warps=4, num_stages=1)
buf8 = empty_strided_cuda((4, 65, 64, 64), (266240, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_4[grid(1064960)](buf2, buf7, buf6, primals_8,
buf8, 1064960, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_8
buf9 = extern_kernels.convolution(buf8, primals_9, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf10 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
buf11 = buf6
del buf6
triton_poi_fused_convolution_leaky_relu_2[grid(524288)](buf9,
primals_10, buf10, buf11, 524288, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf9
del primals_10
buf12 = extern_kernels.convolution(buf11, primals_11, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf13 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_3[grid(524288)](buf12,
primals_12, buf13, 524288, XBLOCK=1024, num_warps=4, num_stages=1)
buf14 = empty_strided_cuda((4, 65, 64, 64), (266240, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_4[grid(1064960)](buf2, buf13, buf12,
primals_12, buf14, 1064960, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_12
buf15 = extern_kernels.convolution(buf14, primals_13, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf16 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
buf17 = buf12
del buf12
triton_poi_fused_convolution_leaky_relu_2[grid(524288)](buf15,
primals_14, buf16, buf17, 524288, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf15
del primals_14
buf18 = extern_kernels.convolution(buf17, primals_15, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf19 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_3[grid(524288)](buf18,
primals_16, buf19, 524288, XBLOCK=1024, num_warps=4, num_stages=1)
buf20 = empty_strided_cuda((4, 65, 64, 64), (266240, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_4[grid(1064960)](buf2, buf19, buf18,
primals_16, buf20, 1064960, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_16
buf21 = extern_kernels.convolution(buf20, primals_17, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf21, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf22 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
buf23 = buf18
del buf18
triton_poi_fused_convolution_leaky_relu_2[grid(524288)](buf21,
primals_18, buf22, buf23, 524288, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf21
del primals_18
buf24 = extern_kernels.convolution(buf23, primals_19, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf25 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_3[grid(524288)](buf24,
primals_20, buf25, 524288, XBLOCK=1024, num_warps=4, num_stages=1)
buf26 = empty_strided_cuda((4, 65, 64, 64), (266240, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_4[grid(1064960)](buf2, buf25, buf24,
primals_20, buf26, 1064960, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_20
buf27 = extern_kernels.convolution(buf26, primals_21, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf27, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf28 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
buf29 = buf24
del buf24
triton_poi_fused_convolution_leaky_relu_2[grid(524288)](buf27,
primals_22, buf28, buf29, 524288, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf27
del primals_22
buf30 = extern_kernels.convolution(buf29, primals_23, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf30, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf31 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_3[grid(524288)](buf30,
primals_24, buf31, 524288, XBLOCK=1024, num_warps=4, num_stages=1)
buf32 = empty_strided_cuda((4, 65, 64, 64), (266240, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_4[grid(1064960)](buf2, buf31, buf30,
primals_24, buf32, 1064960, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_24
buf33 = extern_kernels.convolution(buf32, primals_25, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf33, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf34 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
buf35 = buf30
del buf30
triton_poi_fused_convolution_leaky_relu_2[grid(524288)](buf33,
primals_26, buf34, buf35, 524288, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf33
del primals_26
buf36 = extern_kernels.convolution(buf35, primals_27, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf36, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf37 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_3[grid(524288)](buf36,
primals_28, buf37, 524288, XBLOCK=1024, num_warps=4, num_stages=1)
buf38 = empty_strided_cuda((4, 65, 64, 64), (266240, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_4[grid(1064960)](buf2, buf37, buf36,
primals_28, buf38, 1064960, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_28
buf39 = extern_kernels.convolution(buf38, primals_29, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf39, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf40 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
buf41 = buf36
del buf36
triton_poi_fused_convolution_leaky_relu_2[grid(524288)](buf39,
primals_30, buf40, buf41, 524288, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf39
del primals_30
buf42 = extern_kernels.convolution(buf41, primals_31, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf42, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf43 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_3[grid(524288)](buf42,
primals_32, buf43, 524288, XBLOCK=1024, num_warps=4, num_stages=1)
buf44 = empty_strided_cuda((4, 65, 64, 64), (266240, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_4[grid(1064960)](buf2, buf43, buf42,
primals_32, buf44, 1064960, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_32
buf45 = extern_kernels.convolution(buf44, primals_33, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf45, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf46 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
buf47 = buf42
del buf42
triton_poi_fused_convolution_leaky_relu_2[grid(524288)](buf45,
primals_34, buf46, buf47, 524288, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf45
del primals_34
buf48 = extern_kernels.convolution(buf47, primals_35, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf48, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf49 = empty_strided_cuda((4, 1, 64, 64), (4096, 4096, 64, 1),
torch.bool)
buf50 = empty_strided_cuda((4, 1, 64, 64), (4096, 4096, 64, 1),
torch.float32)
triton_poi_fused_add_convolution_leaky_relu_5[grid(16384)](primals_4,
buf48, primals_36, buf49, buf50, 16384, XBLOCK=256, num_warps=4,
num_stages=1)
del buf48
del primals_36
return (buf50, primals_3, primals_4, primals_5, primals_7, primals_9,
primals_11, primals_13, primals_15, primals_17, primals_19,
primals_21, primals_23, primals_25, primals_27, primals_29,
primals_31, primals_33, primals_35, buf0, buf2, buf4, buf5, buf7,
buf8, buf10, buf11, buf13, buf14, buf16, buf17, buf19, buf20, buf22,
buf23, buf25, buf26, buf28, buf29, buf31, buf32, buf34, buf35,
buf37, buf38, buf40, buf41, buf43, buf44, buf46, buf47, buf49)
class SobelConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1,
padding=0, dilation=1, groups=1, bias=True, requires_grad=True):
assert kernel_size % 2 == 1, "SobelConv2d's kernel_size must be odd."
assert out_channels % 4 == 0, "SobelConv2d's out_channels must be a multiple of 4."
assert out_channels % groups == 0, "SobelConv2d's out_channels must be a multiple of groups."
super(SobelConv2d, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.bias = bias if requires_grad else False
if self.bias:
self.bias = nn.Parameter(torch.zeros(size=(out_channels,),
dtype=torch.float32), requires_grad=True)
else:
self.bias = None
self.sobel_weight = nn.Parameter(torch.zeros(size=(out_channels,
int(in_channels / groups), kernel_size, kernel_size)),
requires_grad=False)
kernel_mid = kernel_size // 2
for idx in range(out_channels):
if idx % 4 == 0:
self.sobel_weight[idx, :, 0, :] = -1
self.sobel_weight[idx, :, 0, kernel_mid] = -2
self.sobel_weight[idx, :, -1, :] = 1
self.sobel_weight[idx, :, -1, kernel_mid] = 2
elif idx % 4 == 1:
self.sobel_weight[idx, :, :, 0] = -1
self.sobel_weight[idx, :, kernel_mid, 0] = -2
self.sobel_weight[idx, :, :, -1] = 1
self.sobel_weight[idx, :, kernel_mid, -1] = 2
elif idx % 4 == 2:
self.sobel_weight[idx, :, 0, 0] = -2
for i in range(0, kernel_mid + 1):
self.sobel_weight[idx, :, kernel_mid - i, i] = -1
self.sobel_weight[idx, :, kernel_size - 1 - i,
kernel_mid + i] = 1
self.sobel_weight[idx, :, -1, -1] = 2
else:
self.sobel_weight[idx, :, -1, 0] = -2
for i in range(0, kernel_mid + 1):
self.sobel_weight[idx, :, kernel_mid + i, i] = -1
self.sobel_weight[idx, :, i, kernel_mid + i] = 1
self.sobel_weight[idx, :, 0, -1] = 2
if requires_grad:
self.sobel_factor = nn.Parameter(torch.ones(size=(out_channels,
1, 1, 1), dtype=torch.float32), requires_grad=True)
else:
self.sobel_factor = nn.Parameter(torch.ones(size=(out_channels,
1, 1, 1), dtype=torch.float32), requires_grad=False)
def forward(self, x):
if torch.cuda.is_available():
self.sobel_factor = self.sobel_factor
if isinstance(self.bias, nn.Parameter):
self.bias = self.bias
sobel_weight = self.sobel_weight * self.sobel_factor
if torch.cuda.is_available():
sobel_weight = sobel_weight
out = F.conv2d(x, sobel_weight, self.bias, self.stride, self.
padding, self.dilation, self.groups)
return out
class EDCNNNew(nn.Module):
def __init__(self, in_ch=1, out_ch=32, sobel_ch=32):
super(EDCNNNew, self).__init__()
self.conv_sobel = SobelConv2d(in_ch, sobel_ch, kernel_size=3,
stride=1, padding=1, bias=True)
self.conv_p1 = nn.Conv2d(in_ch + sobel_ch, out_ch, kernel_size=1,
stride=1, padding=0)
self.conv_f1 = nn.Conv2d(out_ch, out_ch, kernel_size=3, stride=1,
padding=1)
self.conv_p2 = nn.Conv2d(in_ch + sobel_ch + out_ch, out_ch,
kernel_size=1, stride=1, padding=0)
self.conv_f2 = nn.Conv2d(out_ch, out_ch, kernel_size=3, stride=1,
padding=1)
self.conv_p3 = nn.Conv2d(in_ch + sobel_ch + out_ch, out_ch,
kernel_size=1, stride=1, padding=0)
self.conv_f3 = nn.Conv2d(out_ch, out_ch, kernel_size=3, stride=1,
padding=1)
self.conv_p4 = nn.Conv2d(in_ch + sobel_ch + out_ch, out_ch,
kernel_size=1, stride=1, padding=0)
self.conv_f4 = nn.Conv2d(out_ch, out_ch, kernel_size=3, stride=1,
padding=1)
self.conv_p5 = nn.Conv2d(in_ch + sobel_ch + out_ch, out_ch,
kernel_size=1, stride=1, padding=0)
self.conv_f5 = nn.Conv2d(out_ch, out_ch, kernel_size=3, stride=1,
padding=1)
self.conv_p6 = nn.Conv2d(in_ch + sobel_ch + out_ch, out_ch,
kernel_size=1, stride=1, padding=0)
self.conv_f6 = nn.Conv2d(out_ch, out_ch, kernel_size=3, stride=1,
padding=1)
self.conv_p7 = nn.Conv2d(in_ch + sobel_ch + out_ch, out_ch,
kernel_size=1, stride=1, padding=0)
self.conv_f7 = nn.Conv2d(out_ch, out_ch, kernel_size=3, stride=1,
padding=1)
self.conv_p8 = nn.Conv2d(in_ch + sobel_ch + out_ch, out_ch,
kernel_size=1, stride=1, padding=0)
self.conv_f8 = nn.Conv2d(out_ch, in_ch, kernel_size=3, stride=1,
padding=1)
self.relu = nn.LeakyReLU()
def forward(self, input_0):
primals_2 = self.conv_sobel.bias
primals_3 = self.conv_sobel.sobel_weight
primals_1 = self.conv_sobel.sobel_factor
primals_5 = self.conv_p1.weight
primals_6 = self.conv_p1.bias
primals_7 = self.conv_f1.weight
primals_8 = self.conv_f1.bias
primals_9 = self.conv_p2.weight
primals_10 = self.conv_p2.bias
primals_11 = self.conv_f2.weight
primals_12 = self.conv_f2.bias
primals_13 = self.conv_p3.weight
primals_14 = self.conv_p3.bias
primals_15 = self.conv_f3.weight
primals_16 = self.conv_f3.bias
primals_17 = self.conv_p4.weight
primals_18 = self.conv_p4.bias
primals_19 = self.conv_f4.weight
primals_20 = self.conv_f4.bias
primals_21 = self.conv_p5.weight
primals_22 = self.conv_p5.bias
primals_23 = self.conv_f5.weight
primals_24 = self.conv_f5.bias
primals_25 = self.conv_p6.weight
primals_26 = self.conv_p6.bias
primals_27 = self.conv_f6.weight
primals_28 = self.conv_f6.bias
primals_29 = self.conv_p7.weight
primals_30 = self.conv_p7.bias
primals_31 = self.conv_f7.weight
primals_32 = self.conv_f7.bias
primals_33 = self.conv_p8.weight
primals_34 = self.conv_p8.bias
primals_35 = self.conv_f8.weight
primals_36 = self.conv_f8.bias
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33, primals_34,
primals_35, primals_36])
return output[0]
|
workingcoder/EDCNN
|
EDCNN
| false
| 16,749
|
[
"Apache-2.0"
] | 117
|
68305f465d2b731b60ce78bd0c95c7742d9f52d1
|
https://github.com/workingcoder/EDCNN/tree/68305f465d2b731b60ce78bd0c95c7742d9f52d1
|
Whitening2d
|
import torch
import torch.nn as nn
from torch.cuda.amp import custom_fwd
from torch.nn.functional import conv2d
class Whitening2d(nn.Module):
def __init__(self, output_dim: 'int', eps: 'float'=0.0):
"""Layer that computes hard whitening for W-MSE using the Cholesky decomposition.
Args:
output_dim (int): number of dimension of projected features.
eps (float, optional): eps for numerical stability in Cholesky decomposition. Defaults
to 0.0.
"""
super(Whitening2d, self).__init__()
self.output_dim = output_dim
self.eps = eps
@custom_fwd(cast_inputs=torch.float32)
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
"""Performs whitening using the Cholesky decomposition.
Args:
x (torch.Tensor): a batch or slice of projected features.
Returns:
torch.Tensor: a batch or slice of whitened features.
"""
x = x.unsqueeze(2).unsqueeze(3)
m = x.mean(0).view(self.output_dim, -1).mean(-1).view(1, -1, 1, 1)
xn = x - m
T = xn.permute(1, 0, 2, 3).contiguous().view(self.output_dim, -1)
f_cov = torch.mm(T, T.permute(1, 0)) / (T.shape[-1] - 1)
eye = torch.eye(self.output_dim).type(f_cov.type())
f_cov_shrinked = (1 - self.eps) * f_cov + self.eps * eye
inv_sqrt = torch.triangular_solve(eye, torch.cholesky(
f_cov_shrinked), upper=False)[0]
inv_sqrt = inv_sqrt.contiguous().view(self.output_dim, self.
output_dim, 1, 1)
decorrelated = conv2d(xn, inv_sqrt)
return decorrelated.squeeze(2).squeeze(2)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'output_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = 1.0
tmp11 = tmp9 / tmp10
tmp12 = tmp0 - tmp11
tl.store(out_ptr0 + x2, tmp12, xmask)
@triton.jit
def triton_poi_fused_clone_view_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__to_copy_add_div_eye_mul_2(in_out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = 0.3333333333333333
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tmp5 = x1
tmp6 = x0
tmp7 = tmp5 == tmp6
tmp8 = 0.0
tmp9 = tl.where(tmp7, tmp3, tmp8)
tmp10 = tmp9 * tmp8
tmp11 = tmp4 + tmp10
tl.store(in_out_ptr0 + x2, tmp11, xmask)
@triton.jit
def triton_poi_fused__to_copy_eye_3(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = x1
tmp1 = x0
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tl.store(out_ptr0 + x2, tmp5, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_sub_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_clone_view_1[grid(4, 4)](buf0, buf1, 4, 4, XBLOCK=
4, YBLOCK=4, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(buf1, (4, 4), (1, 4), 0),
out=buf2)
del buf1
buf3 = buf2
del buf2
triton_poi_fused__to_copy_add_div_eye_mul_2[grid(16)](buf3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf4 = torch.ops.aten.cholesky.default(buf3)
buf5 = buf4
del buf4
buf6 = buf3
del buf3
triton_poi_fused__to_copy_eye_3[grid(16)](buf6, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf7 = torch.ops.aten.triangular_solve.default(buf6, buf5, False)
del buf5
buf8 = buf7[0]
del buf7
buf10 = buf6
del buf6
triton_poi_fused_clone_view_1[grid(4, 4)](buf8, buf10, 4, 4, XBLOCK
=4, YBLOCK=4, num_warps=1, num_stages=1)
del buf8
buf11 = extern_kernels.convolution(buf0, reinterpret_tensor(buf10,
(4, 4, 1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0),
dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf11, (4, 4, 1, 1), (4, 1, 1, 1))
del buf0
del buf10
return reinterpret_tensor(buf11, (4, 4), (4, 1), 0),
class Whitening2dNew(nn.Module):
def __init__(self, output_dim: 'int', eps: 'float'=0.0):
"""Layer that computes hard whitening for W-MSE using the Cholesky decomposition.
Args:
output_dim (int): number of dimension of projected features.
eps (float, optional): eps for numerical stability in Cholesky decomposition. Defaults
to 0.0.
"""
super(Whitening2dNew, self).__init__()
self.output_dim = output_dim
self.eps = eps
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
xwyzsn/solo-learn
|
Whitening2d
| false
| 16,750
|
[
"MIT"
] | 693
|
16d021d8053439a3de205337ab2a11d191500b09
|
https://github.com/xwyzsn/solo-learn/tree/16d021d8053439a3de205337ab2a11d191500b09
|
SkipConnection
|
import torch
import torch.utils.data
import torch.nn as nn
def _init_weights(layer):
"""
Init weights of the layer
:param layer:
:return:
"""
nn.init.xavier_uniform_(layer.weight)
if layer.bias is not None:
nn.init.zeros_(layer.bias)
class SkipConnection(nn.Module):
"""
Connects the two given inputs with concatenation
:param in1: earlier input tensor of shape N x d1 x m x m
:param in2: later input tensor of shape N x d2 x m x m
:param in_features: d1+d2
:param out_features: output num of features
:return: Tensor of shape N x output_depth x m x m
"""
def __init__(self, in_features, out_features):
super().__init__()
self.conv = nn.Conv2d(in_features, out_features, kernel_size=1,
padding=0, bias=True)
_init_weights(self.conv)
def forward(self, in1, in2):
out = torch.cat((in1, in2), dim=1)
out = self.conv(out)
return out
def get_inputs():
return [torch.rand([4, 1, 4, 4]), torch.rand([4, 3, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 4
x0 = xindex % 16
x2 = xindex // 64
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 4, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-1 + x1) + 48 * x2), tmp6 & xmask,
other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(primals_2, (4, 3, 4, 4), (48, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(256)](primals_1, primals_2, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(256)](buf2, primals_4, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_4
return buf2, primals_3, buf0
def _init_weights(layer):
"""
Init weights of the layer
:param layer:
:return:
"""
nn.init.xavier_uniform_(layer.weight)
if layer.bias is not None:
nn.init.zeros_(layer.bias)
class SkipConnectionNew(nn.Module):
"""
Connects the two given inputs with concatenation
:param in1: earlier input tensor of shape N x d1 x m x m
:param in2: later input tensor of shape N x d2 x m x m
:param in_features: d1+d2
:param out_features: output num of features
:return: Tensor of shape N x output_depth x m x m
"""
def __init__(self, in_features, out_features):
super().__init__()
self.conv = nn.Conv2d(in_features, out_features, kernel_size=1,
padding=0, bias=True)
_init_weights(self.conv)
def forward(self, input_0, input_1):
primals_3 = self.conv.weight
primals_4 = self.conv.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
xyc1207/benchmarking-gnns
|
SkipConnection
| false
| 16,751
|
[
"MIT"
] | 1,809
|
9ba25a2825e8c155a93730d6e8f8752090292942
|
https://github.com/xyc1207/benchmarking-gnns/tree/9ba25a2825e8c155a93730d6e8f8752090292942
|
SuperpointBackbone
|
import torch
import torch.nn as nn
class SuperpointBackbone(nn.Module):
""" SuperPoint backbone. """
def __init__(self):
super(SuperpointBackbone, self).__init__()
self.relu = torch.nn.ReLU(inplace=True)
self.pool = torch.nn.MaxPool2d(kernel_size=2, stride=2)
c1, c2, c3, c4 = 64, 64, 128, 128
self.conv1a = torch.nn.Conv2d(1, c1, kernel_size=3, stride=1, padding=1
)
self.conv1b = torch.nn.Conv2d(c1, c1, kernel_size=3, stride=1,
padding=1)
self.conv2a = torch.nn.Conv2d(c1, c2, kernel_size=3, stride=1,
padding=1)
self.conv2b = torch.nn.Conv2d(c2, c2, kernel_size=3, stride=1,
padding=1)
self.conv3a = torch.nn.Conv2d(c2, c3, kernel_size=3, stride=1,
padding=1)
self.conv3b = torch.nn.Conv2d(c3, c3, kernel_size=3, stride=1,
padding=1)
self.conv4a = torch.nn.Conv2d(c3, c4, kernel_size=3, stride=1,
padding=1)
self.conv4b = torch.nn.Conv2d(c4, c4, kernel_size=3, stride=1,
padding=1)
def forward(self, input_images):
x = self.relu(self.conv1a(input_images))
x = self.relu(self.conv1b(x))
x = self.pool(x)
x = self.relu(self.conv2a(x))
x = self.relu(self.conv2b(x))
x = self.pool(x)
x = self.relu(self.conv3a(x))
x = self.relu(self.conv3b(x))
x = self.pool(x)
x = self.relu(self.conv4a(x))
x = self.relu(self.conv4b(x))
return x
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (y0 + 64 * x2 + 262144 * y1), tmp4, ymask)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 64
x1 = xindex // 64 % 32
x2 = xindex // 2048
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 8192 * x2), None)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 8192 * x2), None)
tmp3 = tl.load(in_ptr0 + (4096 + x0 + 128 * x1 + 8192 * x2), None)
tmp5 = tl.load(in_ptr0 + (4160 + x0 + 128 * x1 + 8192 * x2), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, None)
tl.store(out_ptr1 + x3, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_7(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 64
x1 = xindex // 64 % 16
x2 = xindex // 1024
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 4096 * x2), None)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 4096 * x2), None)
tmp3 = tl.load(in_ptr0 + (2048 + x0 + 128 * x1 + 4096 * x2), None)
tmp5 = tl.load(in_ptr0 + (2112 + x0 + 128 * x1 + 4096 * x2), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, None)
tl.store(out_ptr1 + x3, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_9(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 128
x1 = xindex // 128 % 8
x2 = xindex // 1024
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1 + 4096 * x2), None)
tmp1 = tl.load(in_ptr0 + (128 + x0 + 256 * x1 + 4096 * x2), None)
tmp3 = tl.load(in_ptr0 + (2048 + x0 + 256 * x1 + 4096 * x2), None)
tmp5 = tl.load(in_ptr0 + (2176 + x0 + 256 * x1 + 4096 * x2), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, None)
tl.store(out_ptr1 + x3, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_10(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_11(in_ptr0,
in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr,
XBLOCK: tl.constexpr):
ynumel = 512
xnumel = 64
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 128
y1 = yindex // 128
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 128 * x2 + 8192 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2 + 64 * y3), tmp4, xmask & ymask)
tl.store(out_ptr1 + (y0 + 128 * x2 + 8192 * y1), tmp6, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17) = args
args.clear()
assert_size_stride(primals_1, (64, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_9, (64,), (1,))
assert_size_stride(primals_10, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_11, (128,), (1,))
assert_size_stride(primals_12, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_13, (128,), (1,))
assert_size_stride(primals_14, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_15, (128,), (1,))
assert_size_stride(primals_16, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_17, (128,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch.
float32)
get_raw_stream(0)
triton_poi_fused_0[grid(4096, 9)](primals_4, buf0, 4096, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf1 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch.
float32)
triton_poi_fused_0[grid(4096, 9)](primals_6, buf1, 4096, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf2 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch.
float32)
triton_poi_fused_0[grid(4096, 9)](primals_8, buf2, 4096, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_8
buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch
.float32)
triton_poi_fused_1[grid(8192, 9)](primals_10, buf3, 8192, 9, XBLOCK
=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_10
buf4 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_2[grid(16384, 9)](primals_12, buf4, 16384, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_12
buf5 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_2[grid(16384, 9)](primals_14, buf5, 16384, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_14
buf6 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_2[grid(16384, 9)](primals_16, buf6, 16384, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_16
buf7 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf8 = empty_strided_cuda((4, 64, 64, 64), (262144, 1, 4096, 64),
torch.float32)
triton_poi_fused_convolution_relu_3[grid(256, 4096)](buf7,
primals_2, buf8, 256, 4096, XBLOCK=32, YBLOCK=32, num_warps=4,
num_stages=1)
del buf7
del primals_2
buf9 = extern_kernels.convolution(buf8, buf0, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 64, 64, 64), (262144, 1, 4096, 64))
buf10 = buf9
del buf9
triton_poi_fused_convolution_relu_4[grid(1048576)](buf10, primals_5,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf11 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64),
torch.float32)
buf12 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_5[grid(262144)](buf10,
buf11, buf12, 262144, XBLOCK=1024, num_warps=4, num_stages=1)
buf13 = extern_kernels.convolution(buf11, buf1, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 64, 32, 32), (65536, 1, 2048, 64))
buf14 = buf13
del buf13
triton_poi_fused_convolution_relu_6[grid(262144)](buf14, primals_7,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_7
buf15 = extern_kernels.convolution(buf14, buf2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 64, 32, 32), (65536, 1, 2048, 64))
buf16 = buf15
del buf15
triton_poi_fused_convolution_relu_6[grid(262144)](buf16, primals_9,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_9
buf17 = empty_strided_cuda((4, 64, 16, 16), (16384, 1, 1024, 64),
torch.float32)
buf18 = empty_strided_cuda((4, 64, 16, 16), (16384, 1, 1024, 64),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_7[grid(65536)](buf16,
buf17, buf18, 65536, XBLOCK=512, num_warps=4, num_stages=1)
buf19 = extern_kernels.convolution(buf17, buf3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf19, (4, 128, 16, 16), (32768, 1, 2048, 128))
buf20 = buf19
del buf19
triton_poi_fused_convolution_relu_8[grid(131072)](buf20, primals_11,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_11
buf21 = extern_kernels.convolution(buf20, buf4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf21, (4, 128, 16, 16), (32768, 1, 2048, 128))
buf22 = buf21
del buf21
triton_poi_fused_convolution_relu_8[grid(131072)](buf22, primals_13,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_13
buf23 = empty_strided_cuda((4, 128, 8, 8), (8192, 1, 1024, 128),
torch.float32)
buf24 = empty_strided_cuda((4, 128, 8, 8), (8192, 1, 1024, 128),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_9[grid(32768)](buf22,
buf23, buf24, 32768, XBLOCK=256, num_warps=4, num_stages=1)
buf25 = extern_kernels.convolution(buf23, buf5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf25, (4, 128, 8, 8), (8192, 1, 1024, 128))
buf26 = buf25
del buf25
triton_poi_fused_convolution_relu_10[grid(32768)](buf26, primals_15,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_15
buf27 = extern_kernels.convolution(buf26, buf6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf27, (4, 128, 8, 8), (8192, 1, 1024, 128))
buf28 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.
float32)
buf29 = empty_strided_cuda((4, 128, 8, 8), (8192, 1, 1024, 128),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_11[grid(512, 64)](
buf27, primals_17, buf28, buf29, 512, 64, XBLOCK=32, YBLOCK=32,
num_warps=4, num_stages=1)
del buf27
del primals_17
return (buf28, primals_1, primals_3, buf0, buf1, buf2, buf3, buf4, buf5,
buf6, buf8, buf10, buf11, buf12, buf14, buf16, buf17, buf18, buf20,
buf22, buf23, buf24, buf26, buf29)
class SuperpointBackboneNew(nn.Module):
""" SuperPoint backbone. """
def __init__(self):
super(SuperpointBackboneNew, self).__init__()
self.relu = torch.nn.ReLU(inplace=True)
self.pool = torch.nn.MaxPool2d(kernel_size=2, stride=2)
c1, c2, c3, c4 = 64, 64, 128, 128
self.conv1a = torch.nn.Conv2d(1, c1, kernel_size=3, stride=1, padding=1
)
self.conv1b = torch.nn.Conv2d(c1, c1, kernel_size=3, stride=1,
padding=1)
self.conv2a = torch.nn.Conv2d(c1, c2, kernel_size=3, stride=1,
padding=1)
self.conv2b = torch.nn.Conv2d(c2, c2, kernel_size=3, stride=1,
padding=1)
self.conv3a = torch.nn.Conv2d(c2, c3, kernel_size=3, stride=1,
padding=1)
self.conv3b = torch.nn.Conv2d(c3, c3, kernel_size=3, stride=1,
padding=1)
self.conv4a = torch.nn.Conv2d(c3, c4, kernel_size=3, stride=1,
padding=1)
self.conv4b = torch.nn.Conv2d(c4, c4, kernel_size=3, stride=1,
padding=1)
def forward(self, input_0):
primals_1 = self.conv1a.weight
primals_2 = self.conv1a.bias
primals_4 = self.conv1b.weight
primals_5 = self.conv1b.bias
primals_6 = self.conv2a.weight
primals_7 = self.conv2a.bias
primals_8 = self.conv2b.weight
primals_9 = self.conv2b.bias
primals_10 = self.conv3a.weight
primals_11 = self.conv3a.bias
primals_12 = self.conv3b.weight
primals_13 = self.conv3b.bias
primals_14 = self.conv4a.weight
primals_15 = self.conv4a.bias
primals_16 = self.conv4b.weight
primals_17 = self.conv4b.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17])
return output[0]
|
wx-b/SOLD2
|
SuperpointBackbone
| false
| 16,752
|
[
"MIT"
] | 347
|
71c3243f9d3a695788d0a6bfd134b9849425900a
|
https://github.com/wx-b/SOLD2/tree/71c3243f9d3a695788d0a6bfd134b9849425900a
|
GateLayer
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim.lr_scheduler import *
class GateLayer(nn.Module):
def __init__(self, dim, target_dim=None, dropout=None):
super(GateLayer, self).__init__()
if target_dim is None:
target_dim = dim
self.linear_transform = False
else:
self.target_dim = target_dim
self.linear_transform = True
self.gate = nn.Conv1d(dim, target_dim, 1)
if self.linear_transform:
self.linear = nn.Conv1d(dim, target_dim, 1)
self.dropout = dropout
def forward(self, x):
tx = x.transpose(1, 2)
gate = F.sigmoid(self.gate(tx))
if self.linear_transform:
linear = self.linear(tx)
else:
linear = tx
res = (gate * linear).transpose(2, 1)
if self.dropout:
res = self.dropout(res)
return res
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from torch.optim.lr_scheduler import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_mul_sigmoid_1(in_out_ptr0, in_ptr0,
in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.
constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_out_ptr0 + (x2 + 4 * y3), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + y0, ymask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tmp5 = tmp3 * tmp4
tl.debug_barrier()
tl.store(in_out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
tl.store(out_ptr0 + (x2 + 4 * y3), tmp5, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4), (16, 4, 1))
buf2 = buf1
del buf1
buf3 = buf0
del buf0
triton_poi_fused_convolution_mul_sigmoid_1[grid(16, 4)](buf2,
primals_3, primals_1, buf3, 16, 4, XBLOCK=4, YBLOCK=16,
num_warps=1, num_stages=1)
del primals_3
return reinterpret_tensor(buf3, (4, 4, 4), (16, 1, 4), 0
), primals_2, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0
), buf2
class GateLayerNew(nn.Module):
def __init__(self, dim, target_dim=None, dropout=None):
super(GateLayerNew, self).__init__()
if target_dim is None:
target_dim = dim
self.linear_transform = False
else:
self.target_dim = target_dim
self.linear_transform = True
self.gate = nn.Conv1d(dim, target_dim, 1)
if self.linear_transform:
self.linear = nn.Conv1d(dim, target_dim, 1)
self.dropout = dropout
def forward(self, input_0):
primals_2 = self.gate.weight
primals_3 = self.gate.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
xycforgithub/MultiTask-MRC
|
GateLayer
| false
| 16,753
|
[
"BSD-3-Clause"
] | 105
|
6e5fe8b3cbc40058784cecad73219390e3c2a922
|
https://github.com/xycforgithub/MultiTask-MRC/tree/6e5fe8b3cbc40058784cecad73219390e3c2a922
|
C3D_td5
|
import torch
import torch.nn as nn
class Path(object):
@staticmethod
def db_dir(database):
if database == 'ucf101':
root_dir = (
'/Users/pingaowang/Google Drive/study/video_classification_research/datasets/UCF-101'
)
output_dir = DATA_PATH
return root_dir, output_dir
elif database == 'hmdb51':
root_dir = '/Path/to/hmdb-51'
output_dir = '/path/to/VAR/hmdb51'
return root_dir, output_dir
elif database == 'ucf_motion':
root_dir = (
'/Users/pingaowang/Google Drive/study/video_classification_research/datasets/UCF-101'
)
output_dir = DATA_PATH
return root_dir, output_dir
else:
None
raise NotImplementedError
@staticmethod
def model_dir():
return '/path/to/Models/c3d-pretrained.pth'
class C3D_td5(nn.Module):
"""
The C3D network.
"""
def __init__(self, num_classes, pretrained=False):
super(C3D_td5, self).__init__()
self.conv1 = nn.Conv3d(3, 64, kernel_size=(5, 3, 3), padding=(2, 1, 1))
self.pool1 = nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2))
self.conv2 = nn.Conv3d(64, 128, kernel_size=(5, 3, 3), padding=(2,
1, 1))
self.pool2 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))
self.conv3a = nn.Conv3d(128, 256, kernel_size=(5, 3, 3), padding=(2,
1, 1))
self.conv3b = nn.Conv3d(256, 256, kernel_size=(5, 3, 3), padding=(2,
1, 1))
self.pool3 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))
self.conv4a = nn.Conv3d(256, 512, kernel_size=(5, 3, 3), padding=(2,
1, 1))
self.conv4b = nn.Conv3d(512, 512, kernel_size=(5, 3, 3), padding=(2,
1, 1))
self.pool4 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))
self.conv5a = nn.Conv3d(512, 512, kernel_size=(5, 3, 3), padding=(2,
1, 1))
self.conv5b = nn.Conv3d(512, 512, kernel_size=(5, 3, 3), padding=(2,
1, 1))
self.pool5 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2),
padding=(0, 1, 1))
self.fc6 = nn.Linear(8192, 4096)
self.fc7 = nn.Linear(4096, 4096)
self.fc8 = nn.Linear(4096, num_classes)
self.dropout = nn.Dropout(p=0.5)
self.relu = nn.ReLU()
self.__init_weight()
if pretrained:
self.__load_pretrained_weights()
def forward(self, x):
x = self.relu(self.conv1(x))
x = self.pool1(x)
x = self.relu(self.conv2(x))
x = self.pool2(x)
x = self.relu(self.conv3a(x))
x = self.relu(self.conv3b(x))
x = self.pool3(x)
x = self.relu(self.conv4a(x))
x = self.relu(self.conv4b(x))
x = self.pool4(x)
x = self.relu(self.conv5a(x))
x = self.relu(self.conv5b(x))
x = self.pool5(x)
x = x.view(-1, 8192)
x = self.relu(self.fc6(x))
x = self.dropout(x)
x = self.relu(self.fc7(x))
x = self.dropout(x)
logits = self.fc8(x)
return logits
def __load_pretrained_weights(self):
"""Initialiaze network."""
corresp_name = {'features.0.weight': 'conv1.weight',
'features.0.bias': 'conv1.bias', 'features.3.weight':
'conv2.weight', 'features.3.bias': 'conv2.bias',
'features.6.weight': 'conv3a.weight', 'features.6.bias':
'conv3a.bias', 'features.8.weight': 'conv3b.weight',
'features.8.bias': 'conv3b.bias', 'features.11.weight':
'conv4a.weight', 'features.11.bias': 'conv4a.bias',
'features.13.weight': 'conv4b.weight', 'features.13.bias':
'conv4b.bias', 'features.16.weight': 'conv5a.weight',
'features.16.bias': 'conv5a.bias', 'features.18.weight':
'conv5b.weight', 'features.18.bias': 'conv5b.bias',
'classifier.0.weight': 'fc6.weight', 'classifier.0.bias':
'fc6.bias', 'classifier.3.weight': 'fc7.weight',
'classifier.3.bias': 'fc7.bias'}
p_dict = torch.load(Path.model_dir())
s_dict = self.state_dict()
for name in p_dict:
if name not in corresp_name:
continue
s_dict[corresp_name[name]] = p_dict[name]
self.load_state_dict(s_dict)
def __init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv3d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def get_inputs():
return [torch.rand([4, 3, 64, 64, 64])]
def get_init_inputs():
return [[], {'num_classes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 262144 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 65536 % 128
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 8192 % 256
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 512
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 128 % 512
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 8192
x0 = xindex % 8192
x2 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 9, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 8192 * x1), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 12, tl.int64)
tmp9 = 0.0
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp6, tmp9, tmp10)
tmp12 = tl.where(tmp4, tmp5, tmp11)
tl.store(out_ptr0 + x2, tmp12, None)
@triton.jit
def triton_poi_fused_relu_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 4096
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 4096
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22, primals_23
) = args
args.clear()
assert_size_stride(primals_1, (64, 3, 5, 3, 3), (135, 45, 9, 3, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64, 64), (786432, 262144, 4096,
64, 1))
assert_size_stride(primals_4, (128, 64, 5, 3, 3), (2880, 45, 9, 3, 1))
assert_size_stride(primals_5, (128,), (1,))
assert_size_stride(primals_6, (256, 128, 5, 3, 3), (5760, 45, 9, 3, 1))
assert_size_stride(primals_7, (256,), (1,))
assert_size_stride(primals_8, (256, 256, 5, 3, 3), (11520, 45, 9, 3, 1))
assert_size_stride(primals_9, (256,), (1,))
assert_size_stride(primals_10, (512, 256, 5, 3, 3), (11520, 45, 9, 3, 1))
assert_size_stride(primals_11, (512,), (1,))
assert_size_stride(primals_12, (512, 512, 5, 3, 3), (23040, 45, 9, 3, 1))
assert_size_stride(primals_13, (512,), (1,))
assert_size_stride(primals_14, (512, 512, 5, 3, 3), (23040, 45, 9, 3, 1))
assert_size_stride(primals_15, (512,), (1,))
assert_size_stride(primals_16, (512, 512, 5, 3, 3), (23040, 45, 9, 3, 1))
assert_size_stride(primals_17, (512,), (1,))
assert_size_stride(primals_18, (4096, 8192), (8192, 1))
assert_size_stride(primals_19, (4096,), (1,))
assert_size_stride(primals_20, (4096, 4096), (4096, 1))
assert_size_stride(primals_21, (4096,), (1,))
assert_size_stride(primals_22, (4, 4096), (4096, 1))
assert_size_stride(primals_23, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1, 1), padding=(2, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64, 64), (16777216, 262144,
4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(67108864)](buf1, primals_2,
67108864, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = torch.ops.aten.max_pool3d_with_indices.default(buf1, [1, 2,
2], [1, 2, 2])
buf3 = buf2[0]
buf4 = buf2[1]
del buf2
buf5 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1, 1),
padding=(2, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 128, 64, 32, 32), (8388608, 65536,
1024, 32, 1))
buf6 = buf5
del buf5
triton_poi_fused_convolution_relu_1[grid(33554432)](buf6, primals_5,
33554432, XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf7 = torch.ops.aten.max_pool3d_with_indices.default(buf6, [2, 2,
2], [2, 2, 2])
buf8 = buf7[0]
buf9 = buf7[1]
del buf7
buf10 = extern_kernels.convolution(buf8, primals_6, stride=(1, 1, 1
), padding=(2, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 256, 32, 16, 16), (2097152, 8192, 256,
16, 1))
buf11 = buf10
del buf10
triton_poi_fused_convolution_relu_2[grid(8388608)](buf11, primals_7,
8388608, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_7
buf12 = extern_kernels.convolution(buf11, primals_8, stride=(1, 1,
1), padding=(2, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 256, 32, 16, 16), (2097152, 8192, 256,
16, 1))
buf13 = buf12
del buf12
triton_poi_fused_convolution_relu_2[grid(8388608)](buf13, primals_9,
8388608, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_9
buf14 = torch.ops.aten.max_pool3d_with_indices.default(buf13, [2, 2,
2], [2, 2, 2])
buf15 = buf14[0]
buf16 = buf14[1]
del buf14
buf17 = extern_kernels.convolution(buf15, primals_10, stride=(1, 1,
1), padding=(2, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 512, 16, 8, 8), (524288, 1024, 64, 8, 1))
buf18 = buf17
del buf17
triton_poi_fused_convolution_relu_3[grid(2097152)](buf18,
primals_11, 2097152, XBLOCK=512, num_warps=8, num_stages=1)
del primals_11
buf19 = extern_kernels.convolution(buf18, primals_12, stride=(1, 1,
1), padding=(2, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf19, (4, 512, 16, 8, 8), (524288, 1024, 64, 8, 1))
buf20 = buf19
del buf19
triton_poi_fused_convolution_relu_3[grid(2097152)](buf20,
primals_13, 2097152, XBLOCK=512, num_warps=8, num_stages=1)
del primals_13
buf21 = torch.ops.aten.max_pool3d_with_indices.default(buf20, [2, 2,
2], [2, 2, 2])
buf22 = buf21[0]
buf23 = buf21[1]
del buf21
buf24 = extern_kernels.convolution(buf22, primals_14, stride=(1, 1,
1), padding=(2, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 512, 8, 4, 4), (65536, 128, 16, 4, 1))
buf25 = buf24
del buf24
triton_poi_fused_convolution_relu_4[grid(262144)](buf25, primals_15,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_15
buf26 = extern_kernels.convolution(buf25, primals_16, stride=(1, 1,
1), padding=(2, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 512, 8, 4, 4), (65536, 128, 16, 4, 1))
buf27 = buf26
del buf26
triton_poi_fused_convolution_relu_4[grid(262144)](buf27, primals_17,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_17
buf28 = torch.ops.aten.max_pool3d_with_indices.default(buf27, [2, 2,
2], [2, 2, 2], [0, 1, 1])
buf29 = buf28[0]
buf30 = buf28[1]
del buf28
buf31 = empty_strided_cuda((12, 8192), (8192, 1), torch.float32)
triton_poi_fused_5[grid(98304)](buf29, buf31, 98304, XBLOCK=512,
num_warps=8, num_stages=1)
buf32 = empty_strided_cuda((12, 4096), (4096, 1), torch.float32)
extern_kernels.mm(buf31, reinterpret_tensor(primals_18, (8192, 4096
), (1, 8192), 0), out=buf32)
del buf31
buf33 = empty_strided_cuda((9, 4096), (4096, 1), torch.float32)
triton_poi_fused_relu_6[grid(36864)](buf32, primals_19, buf33,
36864, XBLOCK=512, num_warps=4, num_stages=1)
del buf32
del primals_19
buf34 = empty_strided_cuda((9, 4096), (4096, 1), torch.float32)
extern_kernels.mm(buf33, reinterpret_tensor(primals_20, (4096, 4096
), (1, 4096), 0), out=buf34)
buf35 = buf34
del buf34
triton_poi_fused_relu_7[grid(36864)](buf35, primals_21, 36864,
XBLOCK=512, num_warps=4, num_stages=1)
del primals_21
buf36 = empty_strided_cuda((9, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_23, buf35, reinterpret_tensor(
primals_22, (4096, 4), (1, 4096), 0), alpha=1, beta=1, out=buf36)
del primals_23
return (buf36, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, primals_12, primals_14, primals_16, buf1, buf3, buf4,
buf6, buf8, buf9, buf11, buf13, buf15, buf16, buf18, buf20, buf22,
buf23, buf25, buf27, buf30, reinterpret_tensor(buf29, (9, 8192), (
8192, 1), 0), buf33, buf35, primals_22, primals_20, primals_18)
class Path(object):
@staticmethod
def db_dir(database):
if database == 'ucf101':
root_dir = (
'/Users/pingaowang/Google Drive/study/video_classification_research/datasets/UCF-101'
)
output_dir = DATA_PATH
return root_dir, output_dir
elif database == 'hmdb51':
root_dir = '/Path/to/hmdb-51'
output_dir = '/path/to/VAR/hmdb51'
return root_dir, output_dir
elif database == 'ucf_motion':
root_dir = (
'/Users/pingaowang/Google Drive/study/video_classification_research/datasets/UCF-101'
)
output_dir = DATA_PATH
return root_dir, output_dir
else:
None
raise NotImplementedError
@staticmethod
def model_dir():
return '/path/to/Models/c3d-pretrained.pth'
class C3D_td5New(nn.Module):
"""
The C3D network.
"""
def __init__(self, num_classes, pretrained=False):
super(C3D_td5New, self).__init__()
self.conv1 = nn.Conv3d(3, 64, kernel_size=(5, 3, 3), padding=(2, 1, 1))
self.pool1 = nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2))
self.conv2 = nn.Conv3d(64, 128, kernel_size=(5, 3, 3), padding=(2,
1, 1))
self.pool2 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))
self.conv3a = nn.Conv3d(128, 256, kernel_size=(5, 3, 3), padding=(2,
1, 1))
self.conv3b = nn.Conv3d(256, 256, kernel_size=(5, 3, 3), padding=(2,
1, 1))
self.pool3 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))
self.conv4a = nn.Conv3d(256, 512, kernel_size=(5, 3, 3), padding=(2,
1, 1))
self.conv4b = nn.Conv3d(512, 512, kernel_size=(5, 3, 3), padding=(2,
1, 1))
self.pool4 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))
self.conv5a = nn.Conv3d(512, 512, kernel_size=(5, 3, 3), padding=(2,
1, 1))
self.conv5b = nn.Conv3d(512, 512, kernel_size=(5, 3, 3), padding=(2,
1, 1))
self.pool5 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2),
padding=(0, 1, 1))
self.fc6 = nn.Linear(8192, 4096)
self.fc7 = nn.Linear(4096, 4096)
self.fc8 = nn.Linear(4096, num_classes)
self.dropout = nn.Dropout(p=0.5)
self.relu = nn.ReLU()
self.__init_weight()
if pretrained:
self.__load_pretrained_weights()
def __load_pretrained_weights(self):
"""Initialiaze network."""
corresp_name = {'features.0.weight': 'conv1.weight',
'features.0.bias': 'conv1.bias', 'features.3.weight':
'conv2.weight', 'features.3.bias': 'conv2.bias',
'features.6.weight': 'conv3a.weight', 'features.6.bias':
'conv3a.bias', 'features.8.weight': 'conv3b.weight',
'features.8.bias': 'conv3b.bias', 'features.11.weight':
'conv4a.weight', 'features.11.bias': 'conv4a.bias',
'features.13.weight': 'conv4b.weight', 'features.13.bias':
'conv4b.bias', 'features.16.weight': 'conv5a.weight',
'features.16.bias': 'conv5a.bias', 'features.18.weight':
'conv5b.weight', 'features.18.bias': 'conv5b.bias',
'classifier.0.weight': 'fc6.weight', 'classifier.0.bias':
'fc6.bias', 'classifier.3.weight': 'fc7.weight',
'classifier.3.bias': 'fc7.bias'}
p_dict = torch.load(Path.model_dir())
s_dict = self.state_dict()
for name in p_dict:
if name not in corresp_name:
continue
s_dict[corresp_name[name]] = p_dict[name]
self.load_state_dict(s_dict)
def __init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv3d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3a.weight
primals_7 = self.conv3a.bias
primals_8 = self.conv3b.weight
primals_9 = self.conv3b.bias
primals_10 = self.conv4a.weight
primals_11 = self.conv4a.bias
primals_12 = self.conv4b.weight
primals_13 = self.conv4b.bias
primals_14 = self.conv5a.weight
primals_15 = self.conv5a.bias
primals_16 = self.conv5b.weight
primals_17 = self.conv5b.bias
primals_18 = self.fc6.weight
primals_19 = self.fc6.bias
primals_20 = self.fc7.weight
primals_21 = self.fc7.bias
primals_22 = self.fc8.weight
primals_23 = self.fc8.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23])
return output[0]
|
pingaowang/pytorch-video-recognition
|
C3D_td5
| false
| 16,754
|
[
"MIT"
] | 946
|
096267f88d96a77a74ff743fb0115d997e2cdafd
|
https://github.com/pingaowang/pytorch-video-recognition/tree/096267f88d96a77a74ff743fb0115d997e2cdafd
|
LayerNorm
|
import torch
import torch.utils.data
import torch.nn as nn
class LayerNorm(nn.Module):
def __init__(self, d):
super().__init__()
self.a = nn.Parameter(torch.ones(d).unsqueeze(0).unsqueeze(0))
self.b = nn.Parameter(torch.zeros(d).unsqueeze(0).unsqueeze(0))
def forward(self, x):
mean = x.mean(dim=(0, 1), keepdim=True)
var = x.var(dim=(0, 1), keepdim=True, unbiased=False)
x = self.a * (x - mean) / torch.sqrt(var + 1e-06) + self.b
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.utils.data
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mean_sqrt_var_0(in_out_ptr0, in_out_ptr1, in_ptr0,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * r1), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp1 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 16.0
tmp20 = tmp4 / tmp19
tmp21 = tmp18 / tmp19
tmp22 = 1e-06
tmp23 = tmp21 + tmp22
tmp24 = libdevice.sqrt(tmp23)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp20, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x0, tmp24, xmask)
@triton.jit
def triton_poi_fused_add_div_mul_sub_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x3 = xindex
x4 = xindex % 16
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 - tmp2
tmp4 = tmp0 * tmp3
tmp6 = tmp4 / tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 1, 4), (4, 4, 1))
assert_size_stride(primals_3, (1, 1, 4), (4, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 1, 4, 4), (16, 16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((1, 1, 4, 4), (16, 16, 4, 1), torch.float32)
buf1 = buf0
del buf0
buf5 = buf3
del buf3
get_raw_stream(0)
triton_per_fused_add_mean_sqrt_var_0[grid(16)](buf1, buf5,
primals_1, 16, 16, XBLOCK=8, num_warps=2, num_stages=1)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_mul_sub_1[grid(256)](primals_2, primals_1,
buf1, buf5, primals_3, buf6, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_2
del primals_3
return buf6, primals_1, buf1, buf5
class LayerNormNew(nn.Module):
def __init__(self, d):
super().__init__()
self.a = nn.Parameter(torch.ones(d).unsqueeze(0).unsqueeze(0))
self.b = nn.Parameter(torch.zeros(d).unsqueeze(0).unsqueeze(0))
def forward(self, input_0):
primals_2 = self.a
primals_3 = self.b
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
xyc1207/benchmarking-gnns
|
LayerNorm
| false
| 16,755
|
[
"MIT"
] | 1,809
|
9ba25a2825e8c155a93730d6e8f8752090292942
|
https://github.com/xyc1207/benchmarking-gnns/tree/9ba25a2825e8c155a93730d6e8f8752090292942
|
PSA_p
|
import torch
import torch.nn as nn
import torch._utils
def kaiming_init(module, a=0, mode='fan_out', nonlinearity='relu', bias=0,
distribution='normal'):
assert distribution in ['uniform', 'normal']
if distribution == 'uniform':
nn.init.kaiming_uniform_(module.weight, a=a, mode=mode,
nonlinearity=nonlinearity)
else:
nn.init.kaiming_normal_(module.weight, a=a, mode=mode, nonlinearity
=nonlinearity)
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
class PSA_p(nn.Module):
def __init__(self, inplanes, planes, kernel_size=1, stride=1):
super(PSA_p, self).__init__()
self.inplanes = inplanes
self.inter_planes = planes // 2
self.planes = planes
self.kernel_size = kernel_size
self.stride = stride
self.padding = (kernel_size - 1) // 2
self.conv_q_right = nn.Conv2d(self.inplanes, 1, kernel_size=1,
stride=stride, padding=0, bias=False)
self.conv_v_right = nn.Conv2d(self.inplanes, self.inter_planes,
kernel_size=1, stride=stride, padding=0, bias=False)
self.conv_up = nn.Conv2d(self.inter_planes, self.planes,
kernel_size=1, stride=1, padding=0, bias=False)
self.softmax_right = nn.Softmax(dim=2)
self.sigmoid = nn.Sigmoid()
self.conv_q_left = nn.Conv2d(self.inplanes, self.inter_planes,
kernel_size=1, stride=stride, padding=0, bias=False)
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv_v_left = nn.Conv2d(self.inplanes, self.inter_planes,
kernel_size=1, stride=stride, padding=0, bias=False)
self.softmax_left = nn.Softmax(dim=2)
self.reset_parameters()
def reset_parameters(self):
kaiming_init(self.conv_q_right, mode='fan_in')
kaiming_init(self.conv_v_right, mode='fan_in')
kaiming_init(self.conv_q_left, mode='fan_in')
kaiming_init(self.conv_v_left, mode='fan_in')
self.conv_q_right.inited = True
self.conv_v_right.inited = True
self.conv_q_left.inited = True
self.conv_v_left.inited = True
def spatial_pool(self, x):
input_x = self.conv_v_right(x)
batch, channel, height, width = input_x.size()
input_x = input_x.view(batch, channel, height * width)
context_mask = self.conv_q_right(x)
context_mask = context_mask.view(batch, 1, height * width)
context_mask = self.softmax_right(context_mask)
context = torch.matmul(input_x, context_mask.transpose(1, 2))
context = context.unsqueeze(-1)
context = self.conv_up(context)
mask_ch = self.sigmoid(context)
out = x * mask_ch
return out
def channel_pool(self, x):
g_x = self.conv_q_left(x)
batch, channel, height, width = g_x.size()
avg_x = self.avg_pool(g_x)
batch, channel, avg_x_h, avg_x_w = avg_x.size()
avg_x = avg_x.view(batch, channel, avg_x_h * avg_x_w).permute(0, 2, 1)
theta_x = self.conv_v_left(x).view(batch, self.inter_planes, height *
width)
context = torch.matmul(avg_x, theta_x)
context = self.softmax_left(context)
context = context.view(batch, 1, height, width)
mask_sp = self.sigmoid(context)
out = x * mask_sp
return out
def forward(self, x):
context_channel = self.spatial_pool(x)
context_spatial = self.channel_pool(x)
out = context_spatial + context_channel
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'inplanes': 4, 'planes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch._utils
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__softmax_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + 16 * x0), tmp11, xmask)
@triton.jit
def triton_per_fused_mean_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 8
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_per_fused__softmax_2(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tl.store(out_ptr0 + x0, tmp4, xmask)
tl.store(out_ptr1 + x0, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
x4 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr4 + x4, xmask, eviction_policy='evict_last')
tmp3 = tmp1 - tmp2
tmp4 = tl_math.exp(tmp3)
tmp6 = tmp4 / tmp5
tmp7 = tl.sigmoid(tmp6)
tmp8 = tmp0 * tmp7
tmp10 = tl.sigmoid(tmp9)
tmp11 = tmp0 * tmp10
tmp12 = tmp8 + tmp11
tl.store(out_ptr0 + x3, tmp12, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (2, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (4, 2, 1, 1), (2, 1, 1, 1))
assert_size_stride(primals_5, (2, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_6, (2, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 2, 4, 4), (32, 16, 4, 1))
buf1 = extern_kernels.convolution(primals_2, primals_3, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1))
buf4 = empty_strided_cuda((4, 1, 16), (16, 16, 1), torch.float32)
get_raw_stream(0)
triton_per_fused__softmax_0[grid(4)](buf1, buf4, 4, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf5 = empty_strided_cuda((4, 2, 1), (2, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 2, 16), (32, 16, 1),
0), reinterpret_tensor(buf4, (4, 16, 1), (16, 1, 16), 0), out=buf5)
buf6 = extern_kernels.convolution(reinterpret_tensor(buf5, (4, 2, 1,
1), (2, 1, 1, 1), 0), primals_4, stride=(1, 1), padding=(0, 0),
dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 1, 1), (4, 1, 1, 1))
buf7 = extern_kernels.convolution(primals_2, primals_5, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 2, 4, 4), (32, 16, 4, 1))
buf8 = empty_strided_cuda((4, 2, 1, 1), (2, 1, 8, 8), torch.float32)
buf10 = buf8
del buf8
triton_per_fused_mean_1[grid(8)](buf10, buf7, 8, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del buf7
buf9 = extern_kernels.convolution(primals_2, primals_6, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 2, 4, 4), (32, 16, 4, 1))
buf11 = reinterpret_tensor(buf1, (4, 1, 16), (16, 16, 1), 0)
del buf1
extern_kernels.bmm(reinterpret_tensor(buf10, (4, 1, 2), (2, 0, 1),
0), reinterpret_tensor(buf9, (4, 2, 16), (32, 16, 1), 0), out=buf11
)
buf12 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
buf13 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
triton_per_fused__softmax_2[grid(4)](buf11, buf12, buf13, 4, 16,
XBLOCK=1, num_warps=2, num_stages=1)
buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_sigmoid_3[grid(256)](primals_2, buf11,
buf12, buf13, buf6, buf14, 256, XBLOCK=128, num_warps=4,
num_stages=1)
return (buf14, primals_1, primals_2, primals_3, primals_4, primals_5,
primals_6, buf4, reinterpret_tensor(buf5, (4, 2, 1, 1), (2, 1, 1, 1
), 0), buf6, buf11, buf12, buf13, reinterpret_tensor(buf10, (4, 2,
1), (2, 1, 1), 0), reinterpret_tensor(buf9, (4, 16, 2), (32, 1, 16),
0), reinterpret_tensor(buf0, (4, 16, 2), (32, 1, 16), 0))
def kaiming_init(module, a=0, mode='fan_out', nonlinearity='relu', bias=0,
distribution='normal'):
assert distribution in ['uniform', 'normal']
if distribution == 'uniform':
nn.init.kaiming_uniform_(module.weight, a=a, mode=mode,
nonlinearity=nonlinearity)
else:
nn.init.kaiming_normal_(module.weight, a=a, mode=mode, nonlinearity
=nonlinearity)
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
class PSA_pNew(nn.Module):
def __init__(self, inplanes, planes, kernel_size=1, stride=1):
super(PSA_pNew, self).__init__()
self.inplanes = inplanes
self.inter_planes = planes // 2
self.planes = planes
self.kernel_size = kernel_size
self.stride = stride
self.padding = (kernel_size - 1) // 2
self.conv_q_right = nn.Conv2d(self.inplanes, 1, kernel_size=1,
stride=stride, padding=0, bias=False)
self.conv_v_right = nn.Conv2d(self.inplanes, self.inter_planes,
kernel_size=1, stride=stride, padding=0, bias=False)
self.conv_up = nn.Conv2d(self.inter_planes, self.planes,
kernel_size=1, stride=1, padding=0, bias=False)
self.softmax_right = nn.Softmax(dim=2)
self.sigmoid = nn.Sigmoid()
self.conv_q_left = nn.Conv2d(self.inplanes, self.inter_planes,
kernel_size=1, stride=stride, padding=0, bias=False)
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv_v_left = nn.Conv2d(self.inplanes, self.inter_planes,
kernel_size=1, stride=stride, padding=0, bias=False)
self.softmax_left = nn.Softmax(dim=2)
self.reset_parameters()
def reset_parameters(self):
kaiming_init(self.conv_q_right, mode='fan_in')
kaiming_init(self.conv_v_right, mode='fan_in')
kaiming_init(self.conv_q_left, mode='fan_in')
kaiming_init(self.conv_v_left, mode='fan_in')
self.conv_q_right.inited = True
self.conv_v_right.inited = True
self.conv_q_left.inited = True
self.conv_v_left.inited = True
def spatial_pool(self, x):
input_x = self.conv_v_right(x)
batch, channel, height, width = input_x.size()
input_x = input_x.view(batch, channel, height * width)
context_mask = self.conv_q_right(x)
context_mask = context_mask.view(batch, 1, height * width)
context_mask = self.softmax_right(context_mask)
context = torch.matmul(input_x, context_mask.transpose(1, 2))
context = context.unsqueeze(-1)
context = self.conv_up(context)
mask_ch = self.sigmoid(context)
out = x * mask_ch
return out
def channel_pool(self, x):
g_x = self.conv_q_left(x)
batch, channel, height, width = g_x.size()
avg_x = self.avg_pool(g_x)
batch, channel, avg_x_h, avg_x_w = avg_x.size()
avg_x = avg_x.view(batch, channel, avg_x_h * avg_x_w).permute(0, 2, 1)
theta_x = self.conv_v_left(x).view(batch, self.inter_planes, height *
width)
context = torch.matmul(avg_x, theta_x)
context = self.softmax_left(context)
context = context.view(batch, 1, height, width)
mask_sp = self.sigmoid(context)
out = x * mask_sp
return out
def forward(self, input_0):
primals_3 = self.conv_q_right.weight
primals_1 = self.conv_v_right.weight
primals_4 = self.conv_up.weight
primals_5 = self.conv_q_left.weight
primals_6 = self.conv_v_left.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
xuewengeophysics/PSA
|
PSA_p
| false
| 16,756
|
[
"Apache-2.0"
] | 175
|
06ee556de4e88ecc2a162bd89f9dd494407e3051
|
https://github.com/xuewengeophysics/PSA/tree/06ee556de4e88ecc2a162bd89f9dd494407e3051
|
ZeroConv2d
|
import torch
import torch.nn as nn
import torch.utils.data
from torch.nn import init
class ZeroConv2d(nn.Module):
def __init__(self, in_channel, out_channel):
super().__init__()
self.conv = nn.Conv2d(in_channel, out_channel, 1, padding=0)
init.uniform_(self.conv.weight, -0.001, 0.001)
init.uniform_(self.conv.bias, -0.001, 0.001)
self.scale = nn.Parameter(torch.zeros(1, out_channel, 1, 1))
def forward(self, x):
out = self.conv(x)
out = out * torch.exp(self.scale * 3)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channel': 4, 'out_channel': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.data
from torch.nn import init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_exp_mul_0(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = 3.0
tmp5 = tmp3 * tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp2 * tmp6
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_exp_mul_0[grid(256)](buf1, primals_2,
primals_4, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
return buf2, primals_1, primals_3, primals_4, buf1
class ZeroConv2dNew(nn.Module):
def __init__(self, in_channel, out_channel):
super().__init__()
self.conv = nn.Conv2d(in_channel, out_channel, 1, padding=0)
init.uniform_(self.conv.weight, -0.001, 0.001)
init.uniform_(self.conv.bias, -0.001, 0.001)
self.scale = nn.Parameter(torch.zeros(1, out_channel, 1, 1))
def forward(self, input_0):
primals_4 = self.scale
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
yhgon/NanoFlow
|
ZeroConv2d
| false
| 16,757
|
[
"BSD-3-Clause"
] | 62
|
73b24dfd4d607e73d6167897b83e9f61fcaaca3b
|
https://github.com/yhgon/NanoFlow/tree/73b24dfd4d607e73d6167897b83e9f61fcaaca3b
|
ManifoldPropagation
|
import torch
import torch.nn as nn
import torch.nn.functional as F
def shift(x, direction, amount):
if direction == 'left':
ret = F.pad(x, (amount, 0, 0, 0, 0, 0, 0, 0))[:, :, :, :-amount]
elif direction == 'right':
ret = F.pad(x, (0, amount, 0, 0, 0, 0, 0, 0))[:, :, :, amount:]
elif direction == 'top':
ret = F.pad(x, (0, 0, amount, 0, 0, 0, 0, 0))[:, :, :-amount, :]
elif direction == 'bottom':
ret = F.pad(x, (0, 0, 0, amount, 0, 0, 0, 0))[:, :, amount:, :]
else:
raise
return ret
class ManifoldPropagation(nn.Module):
def __init__(self, ic, k_hop=3, stride=1):
super().__init__()
self.k = nn.Conv2d(ic, ic, kernel_size=1, padding=0)
self.q = nn.Conv2d(ic, ic, kernel_size=1, padding=0)
self.v = nn.Conv2d(ic, ic, kernel_size=1, padding=0)
self.k_hop = k_hop
self.stride = stride
self.normalize = nn.Softmax(dim=1)
self.aggregate = nn.Conv2d(ic, ic, kernel_size=1)
def forward(self, x):
k = self.k(x)
q = self.q(x)
v = self.v(x)
_batch_size, _channel, _h, _w = x.shape
xl = shift(k, 'left', self.stride)
xr = shift(k, 'right', self.stride)
xt = shift(k, 'top', self.stride)
xb = shift(k, 'bottom', self.stride)
l = (q * xl).sum(1, keepdim=True)
r = (q * xr).sum(1, keepdim=True)
t = (q * xt).sum(1, keepdim=True)
b = (q * xb).sum(1, keepdim=True)
m = torch.ones_like(l)
A = self.normalize(torch.cat((l, r, t, b, m), dim=1))
l = A[:, 0:1]
r = A[:, 1:2]
t = A[:, 2:3]
b = A[:, 3:4]
m = A[:, 4:5]
for _ in range(self.k_hop):
v = self.propagation(v, l, r, t, b, m)
v = self.aggregate(v)
return v
def propagation(self, x, l, r, t, b, m):
p = l * shift(x, 'right', self.stride) + r * shift(x, 'left', self.
stride) + t * shift(x, 'bottom', self.stride) + b * shift(x,
'top', self.stride) + m * x
return p
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'ic': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_constant_pad_nd_convolution_1(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x6 = xindex // 5
x2 = xindex // 20 % 4
x7 = xindex
x5 = xindex // 4 % 5
x8 = xindex // 20
x9 = xindex % 20
tmp0 = -1 + x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.load(in_ptr0 + (-1 + x0 + 4 * x6), tmp2 & xmask, other=0.0)
tmp4 = tl.load(in_ptr1 + x2, tmp2 & xmask, eviction_policy='evict_last',
other=0.0)
tmp5 = tmp3 + tmp4
tmp6 = tl.full(tmp5.shape, 0.0, tmp5.dtype)
tmp7 = tl.where(tmp2, tmp5, tmp6)
tmp8 = x0
tmp9 = tl.full([1], 4, tl.int64)
tmp10 = tmp8 < tmp9
tmp11 = tl.load(in_ptr0 + (x0 + 4 * x6), tmp10 & xmask, other=0.0)
tmp12 = tl.load(in_ptr1 + x2, tmp10 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp10, tmp13, tmp14)
tmp16 = -1 + x5
tmp17 = tmp16 >= tmp1
tmp18 = tl.load(in_ptr0 + (-4 + x9 + 16 * x8), tmp17 & xmask, other=0.0)
tmp19 = tl.load(in_ptr1 + x2, tmp17 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp20 = tmp18 + tmp19
tmp21 = tl.full(tmp20.shape, 0.0, tmp20.dtype)
tmp22 = tl.where(tmp17, tmp20, tmp21)
tmp23 = x5
tmp24 = tmp23 < tmp9
tmp25 = tl.load(in_ptr0 + (x9 + 16 * x8), tmp24 & xmask, other=0.0)
tmp26 = tl.load(in_ptr1 + x2, tmp24 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp27 = tmp25 + tmp26
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp24, tmp27, tmp28)
tl.store(out_ptr0 + x7, tmp7, xmask)
tl.store(out_ptr1 + x7, tmp15, xmask)
tl.store(out_ptr2 + x7, tmp22, xmask)
tl.store(out_ptr3 + x7, tmp29, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16 % 5
x3 = xindex // 80
x4 = xindex % 16
x0 = xindex % 4
x1 = xindex // 4 % 4
x5 = xindex
tmp0 = x2
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x4 + 64 * x3), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (x0 + 5 * x1 + 80 * x3), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 * tmp6
tmp8 = tl.load(in_ptr0 + (16 + x4 + 64 * x3), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp9 = tl.load(in_ptr1 + (20 + x0 + 5 * x1 + 80 * x3), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tmp8 * tmp9
tmp11 = tmp7 + tmp10
tmp12 = tl.load(in_ptr0 + (32 + x4 + 64 * x3), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp13 = tl.load(in_ptr1 + (40 + x0 + 5 * x1 + 80 * x3), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp14 = tmp12 * tmp13
tmp15 = tmp11 + tmp14
tmp16 = tl.load(in_ptr0 + (48 + x4 + 64 * x3), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp17 = tl.load(in_ptr1 + (60 + x0 + 5 * x1 + 80 * x3), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp18 = tmp16 * tmp17
tmp19 = tmp15 + tmp18
tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype)
tmp21 = tl.where(tmp4, tmp19, tmp20)
tmp22 = tmp0 >= tmp3
tmp23 = tl.full([1], 2, tl.int64)
tmp24 = tmp0 < tmp23
tmp25 = tmp22 & tmp24
tmp26 = tl.load(in_ptr0 + (x4 + 64 * x3), tmp25 & xmask,
eviction_policy='evict_last', other=0.0)
tmp27 = tl.load(in_ptr2 + (1 + x0 + 5 * x1 + 80 * x3), tmp25 & xmask,
eviction_policy='evict_last', other=0.0)
tmp28 = tmp26 * tmp27
tmp29 = tl.load(in_ptr0 + (16 + x4 + 64 * x3), tmp25 & xmask,
eviction_policy='evict_last', other=0.0)
tmp30 = tl.load(in_ptr2 + (21 + x0 + 5 * x1 + 80 * x3), tmp25 & xmask,
eviction_policy='evict_last', other=0.0)
tmp31 = tmp29 * tmp30
tmp32 = tmp28 + tmp31
tmp33 = tl.load(in_ptr0 + (32 + x4 + 64 * x3), tmp25 & xmask,
eviction_policy='evict_last', other=0.0)
tmp34 = tl.load(in_ptr2 + (41 + x0 + 5 * x1 + 80 * x3), tmp25 & xmask,
eviction_policy='evict_last', other=0.0)
tmp35 = tmp33 * tmp34
tmp36 = tmp32 + tmp35
tmp37 = tl.load(in_ptr0 + (48 + x4 + 64 * x3), tmp25 & xmask,
eviction_policy='evict_last', other=0.0)
tmp38 = tl.load(in_ptr2 + (61 + x0 + 5 * x1 + 80 * x3), tmp25 & xmask,
eviction_policy='evict_last', other=0.0)
tmp39 = tmp37 * tmp38
tmp40 = tmp36 + tmp39
tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype)
tmp42 = tl.where(tmp25, tmp40, tmp41)
tmp43 = tmp0 >= tmp23
tmp44 = tl.full([1], 3, tl.int64)
tmp45 = tmp0 < tmp44
tmp46 = tmp43 & tmp45
tmp47 = tl.load(in_ptr0 + (x4 + 64 * x3), tmp46 & xmask,
eviction_policy='evict_last', other=0.0)
tmp48 = tl.load(in_ptr3 + (x4 + 80 * x3), tmp46 & xmask,
eviction_policy='evict_last', other=0.0)
tmp49 = tmp47 * tmp48
tmp50 = tl.load(in_ptr0 + (16 + x4 + 64 * x3), tmp46 & xmask,
eviction_policy='evict_last', other=0.0)
tmp51 = tl.load(in_ptr3 + (20 + x4 + 80 * x3), tmp46 & xmask,
eviction_policy='evict_last', other=0.0)
tmp52 = tmp50 * tmp51
tmp53 = tmp49 + tmp52
tmp54 = tl.load(in_ptr0 + (32 + x4 + 64 * x3), tmp46 & xmask,
eviction_policy='evict_last', other=0.0)
tmp55 = tl.load(in_ptr3 + (40 + x4 + 80 * x3), tmp46 & xmask,
eviction_policy='evict_last', other=0.0)
tmp56 = tmp54 * tmp55
tmp57 = tmp53 + tmp56
tmp58 = tl.load(in_ptr0 + (48 + x4 + 64 * x3), tmp46 & xmask,
eviction_policy='evict_last', other=0.0)
tmp59 = tl.load(in_ptr3 + (60 + x4 + 80 * x3), tmp46 & xmask,
eviction_policy='evict_last', other=0.0)
tmp60 = tmp58 * tmp59
tmp61 = tmp57 + tmp60
tmp62 = tl.full(tmp61.shape, 0.0, tmp61.dtype)
tmp63 = tl.where(tmp46, tmp61, tmp62)
tmp64 = tmp0 >= tmp44
tmp65 = tl.full([1], 4, tl.int64)
tmp66 = tmp0 < tmp65
tmp67 = tmp64 & tmp66
tmp68 = tl.load(in_ptr0 + (x4 + 64 * x3), tmp67 & xmask,
eviction_policy='evict_last', other=0.0)
tmp69 = tl.load(in_ptr4 + (4 + x4 + 80 * x3), tmp67 & xmask,
eviction_policy='evict_last', other=0.0)
tmp70 = tmp68 * tmp69
tmp71 = tl.load(in_ptr0 + (16 + x4 + 64 * x3), tmp67 & xmask,
eviction_policy='evict_last', other=0.0)
tmp72 = tl.load(in_ptr4 + (24 + x4 + 80 * x3), tmp67 & xmask,
eviction_policy='evict_last', other=0.0)
tmp73 = tmp71 * tmp72
tmp74 = tmp70 + tmp73
tmp75 = tl.load(in_ptr0 + (32 + x4 + 64 * x3), tmp67 & xmask,
eviction_policy='evict_last', other=0.0)
tmp76 = tl.load(in_ptr4 + (44 + x4 + 80 * x3), tmp67 & xmask,
eviction_policy='evict_last', other=0.0)
tmp77 = tmp75 * tmp76
tmp78 = tmp74 + tmp77
tmp79 = tl.load(in_ptr0 + (48 + x4 + 64 * x3), tmp67 & xmask,
eviction_policy='evict_last', other=0.0)
tmp80 = tl.load(in_ptr4 + (64 + x4 + 80 * x3), tmp67 & xmask,
eviction_policy='evict_last', other=0.0)
tmp81 = tmp79 * tmp80
tmp82 = tmp78 + tmp81
tmp83 = tl.full(tmp82.shape, 0.0, tmp82.dtype)
tmp84 = tl.where(tmp67, tmp82, tmp83)
tmp85 = tmp0 >= tmp65
tl.full([1], 5, tl.int64)
tmp88 = 1.0
tmp89 = tl.full(tmp88.shape, 0.0, tmp88.dtype)
tmp90 = tl.where(tmp85, tmp88, tmp89)
tmp91 = tl.where(tmp67, tmp84, tmp90)
tmp92 = tl.where(tmp46, tmp63, tmp91)
tmp93 = tl.where(tmp25, tmp42, tmp92)
tmp94 = tl.where(tmp4, tmp21, tmp93)
tl.store(out_ptr0 + x5, tmp94, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 80 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 80 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 80 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 80 * x1), xmask)
tmp7 = tl.load(in_ptr0 + (64 + x0 + 80 * x1), xmask)
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp0 - tmp8
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp1 - tmp8
tmp12 = tl_math.exp(tmp11)
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tl_math.exp(tmp14)
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp16 + tmp18
tmp20 = tmp7 - tmp8
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tl.store(out_ptr0 + x2, tmp8, xmask)
tl.store(out_ptr1 + x2, tmp22, xmask)
@triton.jit
def triton_poi_fused__softmax_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 80
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 - tmp1
tmp3 = tl_math.exp(tmp2)
tmp5 = tmp3 / tmp4
tl.store(in_out_ptr0 + x3, tmp5, xmask)
@triton.jit
def triton_poi_fused_constant_pad_nd_5(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = xindex // 5
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.load(in_ptr0 + (x0 + 4 * x1), tmp2 & xmask, other=0.0)
tl.store(out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused_constant_pad_nd_6(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = xindex // 5
x2 = xindex
tmp0 = -1 + x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1), tmp2 & xmask, other=0.0)
tl.store(out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused_constant_pad_nd_7(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 5
x2 = xindex // 20
x3 = xindex % 20
x4 = xindex
tmp0 = x1
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.load(in_ptr0 + (x3 + 16 * x2), tmp2 & xmask, other=0.0)
tl.store(out_ptr0 + x4, tmp3, xmask)
@triton.jit
def triton_poi_fused_constant_pad_nd_8(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 5
x2 = xindex // 20
x3 = xindex % 20
x4 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.load(in_ptr0 + (-4 + x3 + 16 * x2), tmp2 & xmask, other=0.0)
tl.store(out_ptr0 + x4, tmp3, xmask)
@triton.jit
def triton_poi_fused_add_mul_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 64
x4 = xindex % 16
x0 = xindex % 4
x5 = xindex // 4
x6 = xindex // 16
x7 = xindex
tmp0 = tl.load(in_ptr0 + (x4 + 80 * x3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + (1 + x0 + 5 * x5), xmask)
tmp3 = tl.load(in_ptr0 + (16 + x4 + 80 * x3), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr2 + (x0 + 5 * x5), xmask)
tmp7 = tl.load(in_ptr0 + (32 + x4 + 80 * x3), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr3 + (4 + x4 + 20 * x6), xmask)
tmp11 = tl.load(in_ptr0 + (48 + x4 + 80 * x3), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr4 + (x4 + 20 * x6), xmask)
tmp15 = tl.load(in_ptr0 + (64 + x4 + 80 * x3), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr5 + x7, xmask)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp17 = tmp15 * tmp16
tmp18 = tmp14 + tmp17
tl.store(out_ptr0 + x7, tmp18, xmask)
@triton.jit
def triton_poi_fused_add_mul_10(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 64
x4 = xindex % 16
x0 = xindex % 4
x5 = xindex // 4
x6 = xindex // 16
x7 = xindex
tmp0 = tl.load(in_ptr0 + (x4 + 80 * x3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + (1 + x0 + 5 * x5), xmask)
tmp3 = tl.load(in_ptr0 + (16 + x4 + 80 * x3), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr2 + (x0 + 5 * x5), xmask)
tmp7 = tl.load(in_ptr0 + (32 + x4 + 80 * x3), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr3 + (4 + x4 + 20 * x6), xmask)
tmp11 = tl.load(in_ptr0 + (48 + x4 + 80 * x3), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr4 + (x4 + 20 * x6), xmask)
tmp15 = tl.load(in_ptr0 + (64 + x4 + 80 * x3), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_out_ptr0 + x7, xmask)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp17 = tmp15 * tmp16
tmp18 = tmp14 + tmp17
tl.store(in_out_ptr0 + x7, tmp18, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = extern_kernels.convolution(primals_3, primals_4, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(256)](buf2, primals_5, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf3 = extern_kernels.convolution(primals_3, primals_6, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = buf3
del buf3
triton_poi_fused_convolution_0[grid(256)](buf4, primals_7, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4, 5, 4), (80, 20, 4, 1), torch.float32)
buf8 = empty_strided_cuda((4, 4, 5, 4), (80, 20, 4, 1), torch.float32)
triton_poi_fused_constant_pad_nd_convolution_1[grid(320)](buf0,
primals_2, buf5, buf6, buf7, buf8, 320, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_2
buf9 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32)
triton_poi_fused_cat_2[grid(320)](buf2, buf5, buf6, buf7, buf8,
buf9, 320, XBLOCK=128, num_warps=4, num_stages=1)
buf10 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf11 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(64)](buf9, buf10, buf11, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf12 = buf9
del buf9
triton_poi_fused__softmax_4[grid(320)](buf12, buf10, buf11, 320,
XBLOCK=256, num_warps=4, num_stages=1)
del buf10
del buf11
buf13 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32)
triton_poi_fused_constant_pad_nd_5[grid(320)](buf4, buf13, 320,
XBLOCK=128, num_warps=4, num_stages=1)
buf14 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32)
triton_poi_fused_constant_pad_nd_6[grid(320)](buf4, buf14, 320,
XBLOCK=128, num_warps=4, num_stages=1)
buf15 = empty_strided_cuda((4, 4, 5, 4), (80, 20, 4, 1), torch.float32)
triton_poi_fused_constant_pad_nd_7[grid(320)](buf4, buf15, 320,
XBLOCK=256, num_warps=4, num_stages=1)
buf16 = empty_strided_cuda((4, 4, 5, 4), (80, 20, 4, 1), torch.float32)
triton_poi_fused_constant_pad_nd_8[grid(320)](buf4, buf16, 320,
XBLOCK=128, num_warps=4, num_stages=1)
buf17 = buf0
del buf0
triton_poi_fused_add_mul_9[grid(256)](buf12, buf13, buf14, buf15,
buf16, buf4, buf17, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf18 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32)
triton_poi_fused_constant_pad_nd_5[grid(320)](buf17, buf18, 320,
XBLOCK=128, num_warps=4, num_stages=1)
buf19 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32)
triton_poi_fused_constant_pad_nd_6[grid(320)](buf17, buf19, 320,
XBLOCK=128, num_warps=4, num_stages=1)
buf20 = empty_strided_cuda((4, 4, 5, 4), (80, 20, 4, 1), torch.float32)
triton_poi_fused_constant_pad_nd_7[grid(320)](buf17, buf20, 320,
XBLOCK=256, num_warps=4, num_stages=1)
buf21 = empty_strided_cuda((4, 4, 5, 4), (80, 20, 4, 1), torch.float32)
triton_poi_fused_constant_pad_nd_8[grid(320)](buf17, buf21, 320,
XBLOCK=128, num_warps=4, num_stages=1)
buf22 = buf17
del buf17
triton_poi_fused_add_mul_10[grid(256)](buf22, buf12, buf18, buf19,
buf20, buf21, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf23 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32)
triton_poi_fused_constant_pad_nd_5[grid(320)](buf22, buf23, 320,
XBLOCK=128, num_warps=4, num_stages=1)
buf24 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32)
triton_poi_fused_constant_pad_nd_6[grid(320)](buf22, buf24, 320,
XBLOCK=128, num_warps=4, num_stages=1)
buf25 = empty_strided_cuda((4, 4, 5, 4), (80, 20, 4, 1), torch.float32)
triton_poi_fused_constant_pad_nd_7[grid(320)](buf22, buf25, 320,
XBLOCK=256, num_warps=4, num_stages=1)
buf26 = empty_strided_cuda((4, 4, 5, 4), (80, 20, 4, 1), torch.float32)
triton_poi_fused_constant_pad_nd_8[grid(320)](buf22, buf26, 320,
XBLOCK=128, num_warps=4, num_stages=1)
buf27 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_9[grid(256)](buf12, buf23, buf24, buf25,
buf26, buf22, buf27, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf28 = extern_kernels.convolution(buf27, primals_8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 4, 4, 4), (64, 16, 4, 1))
buf29 = buf28
del buf28
triton_poi_fused_convolution_0[grid(256)](buf29, primals_9, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
return (buf29, primals_1, primals_3, primals_4, primals_6, primals_8,
buf2, buf4, buf5, buf6, buf7, buf8, buf12, buf13, buf14, buf15,
buf16, buf18, buf19, buf20, buf21, buf22, buf23, buf24, buf25,
buf26, buf27)
def shift(x, direction, amount):
if direction == 'left':
ret = F.pad(x, (amount, 0, 0, 0, 0, 0, 0, 0))[:, :, :, :-amount]
elif direction == 'right':
ret = F.pad(x, (0, amount, 0, 0, 0, 0, 0, 0))[:, :, :, amount:]
elif direction == 'top':
ret = F.pad(x, (0, 0, amount, 0, 0, 0, 0, 0))[:, :, :-amount, :]
elif direction == 'bottom':
ret = F.pad(x, (0, 0, 0, amount, 0, 0, 0, 0))[:, :, amount:, :]
else:
raise
return ret
class ManifoldPropagationNew(nn.Module):
def __init__(self, ic, k_hop=3, stride=1):
super().__init__()
self.k = nn.Conv2d(ic, ic, kernel_size=1, padding=0)
self.q = nn.Conv2d(ic, ic, kernel_size=1, padding=0)
self.v = nn.Conv2d(ic, ic, kernel_size=1, padding=0)
self.k_hop = k_hop
self.stride = stride
self.normalize = nn.Softmax(dim=1)
self.aggregate = nn.Conv2d(ic, ic, kernel_size=1)
def propagation(self, x, l, r, t, b, m):
p = l * shift(x, 'right', self.stride) + r * shift(x, 'left', self.
stride) + t * shift(x, 'bottom', self.stride) + b * shift(x,
'top', self.stride) + m * x
return p
def forward(self, input_0):
primals_1 = self.k.weight
primals_2 = self.k.bias
primals_4 = self.q.weight
primals_5 = self.q.bias
primals_6 = self.v.weight
primals_7 = self.v.bias
primals_8 = self.aggregate.weight
primals_9 = self.aggregate.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
wonkyunglee/MPNet
|
ManifoldPropagation
| false
| 16,758
|
[
"MIT"
] | 1,280
|
3a6821a88a5e3db5bd97121761dbb361d9518bc2
|
https://github.com/wonkyunglee/MPNet/tree/3a6821a88a5e3db5bd97121761dbb361d9518bc2
|
Model
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(Model, self).__init__()
h_size_1 = 100
h_size_2 = 100
self.p_fc1 = nn.Linear(num_inputs, h_size_1)
self.p_fc2 = nn.Linear(h_size_1, h_size_2)
self.v_fc1 = nn.Linear(num_inputs, h_size_1 * 5)
self.v_fc2 = nn.Linear(h_size_1 * 5, h_size_2)
self.mu = nn.Linear(h_size_2, num_outputs)
self.log_std = nn.Parameter(torch.zeros(1, num_outputs))
self.v = nn.Linear(h_size_2, 1)
for name, p in self.named_parameters():
if 'bias' in name:
p.data.fill_(0)
"""
if 'mu.weight' in name:
p.data.normal_()
p.data /= torch.sum(p.data**2,0).expand_as(p.data)"""
self.train()
def forward(self, inputs):
x = F.tanh(self.p_fc1(inputs))
x = F.tanh(self.p_fc2(x))
mu = self.mu(x)
sigma_sq = torch.exp(self.log_std)
x = F.tanh(self.v_fc1(inputs))
x = F.tanh(self.v_fc2(x))
v = self.v(x)
return mu, sigma_sq, v
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_inputs': 4, 'num_outputs': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 100
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused_exp_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl_math.exp(tmp0)
tl.store(out_ptr0 + x0, tmp1, xmask)
@triton.jit
def triton_poi_fused_tanh_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 500
x2 = xindex // 2000
x4 = xindex % 2000
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(out_ptr0 + (x4 + 2016 * x2), tmp3, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 32000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 500
x1 = xindex // 500
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 500 * (x1 % 4) + 2016 * (x1 // 4)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14) = args
args.clear()
assert_size_stride(primals_1, (100, 4), (4, 1))
assert_size_stride(primals_2, (100,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (100, 100), (100, 1))
assert_size_stride(primals_5, (100,), (1,))
assert_size_stride(primals_6, (4, 100), (100, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (1, 4), (4, 1))
assert_size_stride(primals_9, (500, 4), (4, 1))
assert_size_stride(primals_10, (500,), (1,))
assert_size_stride(primals_11, (100, 500), (500, 1))
assert_size_stride(primals_12, (100,), (1,))
assert_size_stride(primals_13, (1, 100), (100, 1))
assert_size_stride(primals_14, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 100), (100, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 100), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 100), (1600, 400, 100, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(6400)](buf1, primals_2, 6400, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 100), (100, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 100), (100, 1), 0),
reinterpret_tensor(primals_4, (100, 100), (1, 100), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 100), (1600, 400, 100, 1), 0)
del buf2
triton_poi_fused_tanh_0[grid(6400)](buf3, primals_5, 6400, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 100),
(100, 1), 0), reinterpret_tensor(primals_6, (100, 4), (1, 100),
0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
triton_poi_fused_exp_1[grid(4)](primals_8, buf5, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_8
buf6 = empty_strided_cuda((64, 500), (500, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_9, (4, 500), (1, 4), 0), out=buf6)
del primals_9
buf7 = empty_strided_cuda((4, 4, 4, 500), (8064, 2016, 500, 1),
torch.float32)
triton_poi_fused_tanh_2[grid(32000)](buf6, primals_10, buf7, 32000,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_10
buf8 = buf6
del buf6
triton_poi_fused_3[grid(32000)](buf7, buf8, 32000, XBLOCK=256,
num_warps=4, num_stages=1)
buf9 = empty_strided_cuda((64, 100), (100, 1), torch.float32)
extern_kernels.mm(buf8, reinterpret_tensor(primals_11, (500, 100),
(1, 500), 0), out=buf9)
del buf8
buf10 = reinterpret_tensor(buf9, (4, 4, 4, 100), (1600, 400, 100, 1), 0
)
del buf9
triton_poi_fused_tanh_0[grid(6400)](buf10, primals_12, 6400, XBLOCK
=256, num_warps=4, num_stages=1)
del primals_12
buf12 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_14, reinterpret_tensor(buf10, (64, 100
), (100, 1), 0), reinterpret_tensor(primals_13, (100, 1), (1,
100), 0), alpha=1, beta=1, out=buf12)
del primals_14
return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf5,
reinterpret_tensor(buf12, (4, 4, 4, 1), (16, 4, 1, 1), 0),
reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, buf3, buf5,
buf7, buf10, primals_13, primals_11, primals_6, primals_4)
class ModelNew(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(ModelNew, self).__init__()
h_size_1 = 100
h_size_2 = 100
self.p_fc1 = nn.Linear(num_inputs, h_size_1)
self.p_fc2 = nn.Linear(h_size_1, h_size_2)
self.v_fc1 = nn.Linear(num_inputs, h_size_1 * 5)
self.v_fc2 = nn.Linear(h_size_1 * 5, h_size_2)
self.mu = nn.Linear(h_size_2, num_outputs)
self.log_std = nn.Parameter(torch.zeros(1, num_outputs))
self.v = nn.Linear(h_size_2, 1)
for name, p in self.named_parameters():
if 'bias' in name:
p.data.fill_(0)
"""
if 'mu.weight' in name:
p.data.normal_()
p.data /= torch.sum(p.data**2,0).expand_as(p.data)"""
self.train()
def forward(self, input_0):
primals_8 = self.log_std
primals_1 = self.p_fc1.weight
primals_2 = self.p_fc1.bias
primals_4 = self.p_fc2.weight
primals_5 = self.p_fc2.bias
primals_9 = self.v_fc1.weight
primals_10 = self.v_fc1.bias
primals_11 = self.v_fc2.weight
primals_12 = self.v_fc2.bias
primals_6 = self.mu.weight
primals_7 = self.mu.bias
primals_13 = self.v.weight
primals_14 = self.v.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14])
return output[0], output[1], output[2]
|
yanjiajia-september/Pytorch-DPPO
|
Model
| false
| 16,759
|
[
"MIT"
] | 179
|
5e1a75b6dfc6a170270253a35d10109718240e97
|
https://github.com/yanjiajia-september/Pytorch-DPPO/tree/5e1a75b6dfc6a170270253a35d10109718240e97
|
PoolFormerBlock
|
import math
import torch
import warnings
import torch.nn as nn
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
"""Copy & paste from PyTorch official master until it's in a few official releases - RW
Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
"""
def norm_cdf(x):
"""Computes standard normal cumulative distribution function"""
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
if mean < a - 2 * std or mean > b + 2 * std:
warnings.warn(
'mean is more than 2 std from [a, b] in nn.init.trunc_normal_. The distribution of values may be incorrect.'
, stacklevel=2)
with torch.no_grad():
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
tensor.uniform_(2 * l - 1, 2 * u - 1)
tensor.erfinv_()
tensor.mul_(std * math.sqrt(2.0))
tensor.add_(mean)
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0):
"""Copy & paste from PyTorch official master until it's in a few official releases - RW
Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
class GroupNorm(nn.GroupNorm):
"""
Group Normalization with 1 group.
Input: tensor in shape [B, C, H, W]
"""
def __init__(self, num_channels, **kwargs):
super().__init__(1, num_channels, **kwargs)
class Pooling(nn.Module):
"""
Implementation of pooling for PoolFormer
--pool_size: pooling size
"""
def __init__(self, pool_size=3):
super().__init__()
self.pool = nn.AvgPool2d(pool_size, stride=1, padding=pool_size //
2, count_include_pad=False)
def forward(self, x):
return self.pool(x) - x
class Mlp(nn.Module):
"""
Implementation of MLP with 1*1 convolutions.
Input: tensor with shape [B, C, H, W]
"""
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Conv2d(in_features, hidden_features, 1)
self.act = act_layer()
self.fc2 = nn.Conv2d(hidden_features, out_features, 1)
self.drop = nn.Dropout(drop)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Conv2d):
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class PoolFormerBlock(nn.Module):
"""
Implementation of one PoolFormer block.
--dim: embedding dim
--pool_size: pooling size
--mlp_ratio: mlp expansion ratio
--act_layer: activation
--norm_layer: normalization
--drop: dropout rate
--drop path: Stochastic Depth,
refer to https://arxiv.org/abs/1603.09382
--use_layer_scale, --layer_scale_init_value: LayerScale,
refer to https://arxiv.org/abs/2103.17239
"""
def __init__(self, dim, pool_size=3, mlp_ratio=4.0, act_layer=nn.GELU,
norm_layer=GroupNorm, drop=0.0, drop_path=0.0, use_layer_scale=True,
layer_scale_init_value=1e-05):
super().__init__()
self.norm1 = norm_layer(dim)
self.token_mixer = Pooling(pool_size=pool_size)
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
self.drop_path = DropPath(drop_path
) if drop_path > 0.0 else nn.Identity()
self.use_layer_scale = use_layer_scale
if use_layer_scale:
self.layer_scale_1 = nn.Parameter(layer_scale_init_value *
torch.ones(dim), requires_grad=True)
self.layer_scale_2 = nn.Parameter(layer_scale_init_value *
torch.ones(dim), requires_grad=True)
def forward(self, x):
if self.use_layer_scale:
x = x + self.drop_path(self.layer_scale_1.unsqueeze(-1).
unsqueeze(-1) * self.token_mixer(self.norm1(x)))
x = x + self.drop_path(self.layer_scale_2.unsqueeze(-1).
unsqueeze(-1) * self.mlp(self.norm2(x)))
else:
x = x + self.drop_path(self.token_mixer(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
import warnings
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_native_group_norm_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r3 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 64.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tl.store(out_ptr2 + (r1 + 64 * x0), tmp27, xmask)
tl.store(out_ptr3 + x0, tmp22, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_per_fused_avg_pool2d_native_group_norm_1(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr3, out_ptr4,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex // 4 % 4
r1 = rindex % 4
r6 = rindex
x0 = xindex
r3 = rindex // 16
tmp54 = tl.load(in_ptr1 + (r6 + 64 * x0), xmask, other=0.0)
tmp55 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last')
tmp56 = tl.load(in_ptr0 + (r6 + 64 * x0), xmask, other=0.0)
tmp83 = tl.load(in_ptr3 + r3, None, eviction_policy='evict_last')
tmp85 = tl.load(in_ptr4 + r3, None, eviction_policy='evict_last')
tmp0 = -1 + r2
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1, 1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + r1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-5 + r6 + 64 * x0), tmp10 & xmask, other=0.0)
tmp12 = r1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-4 + r6 + 64 * x0), tmp16 & xmask, other=0.0)
tmp18 = tmp17 + tmp11
tmp19 = 1 + r1
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-3 + r6 + 64 * x0), tmp23 & xmask, other=0.0)
tmp25 = tmp24 + tmp18
tmp26 = r2
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (-1 + r6 + 64 * x0), tmp30 & xmask, other=0.0)
tmp32 = tmp31 + tmp25
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (r6 + 64 * x0), tmp33 & xmask, other=0.0)
tmp35 = tmp34 + tmp32
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (1 + r6 + 64 * x0), tmp36 & xmask, other=0.0)
tmp38 = tmp37 + tmp35
tmp39 = 1 + r2
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (3 + r6 + 64 * x0), tmp43 & xmask, other=0.0)
tmp45 = tmp44 + tmp38
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (4 + r6 + 64 * x0), tmp46 & xmask, other=0.0)
tmp48 = tmp47 + tmp45
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (5 + r6 + 64 * x0), tmp49 & xmask, other=0.0)
tmp51 = tmp50 + tmp48
tmp52 = (0 * (0 >= -1 + r1) + (-1 + r1) * (-1 + r1 > 0)) * (0 * (0 >= -
1 + r2) + (-1 + r2) * (-1 + r2 > 0)) + (4 * (4 <= 2 + r1) + (2 + r1
) * (2 + r1 < 4)) * (4 * (4 <= 2 + r2) + (2 + r2) * (2 + r2 < 4)
) + -1 * (0 * (0 >= -1 + r1) + (-1 + r1) * (-1 + r1 > 0)) * (4 * (4 <=
2 + r2) + (2 + r2) * (2 + r2 < 4)) + -1 * (0 * (0 >= -1 + r2) + (-1 +
r2) * (-1 + r2 > 0)) * (4 * (4 <= 2 + r1) + (2 + r1) * (2 + r1 < 4))
tmp53 = tmp51 / tmp52
tmp57 = tmp53 - tmp56
tmp58 = tmp55 * tmp57
tmp59 = tmp54 + tmp58
tmp60 = tl.broadcast_to(tmp59, [XBLOCK, RBLOCK])
tl.where(xmask, tmp60, 0)
tmp63 = tl.broadcast_to(tmp60, [XBLOCK, RBLOCK])
tmp65 = tl.where(xmask, tmp63, 0)
tmp66 = tl.sum(tmp65, 1)[:, None]
tmp67 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp68 = tmp67.to(tl.float32)
tmp69 = tmp66 / tmp68
tmp70 = tmp60 - tmp69
tmp71 = tmp70 * tmp70
tmp72 = tl.broadcast_to(tmp71, [XBLOCK, RBLOCK])
tmp74 = tl.where(xmask, tmp72, 0)
tmp75 = tl.sum(tmp74, 1)[:, None]
tmp76 = tmp59 - tmp69
tmp77 = 64.0
tmp78 = tmp75 / tmp77
tmp79 = 1e-05
tmp80 = tmp78 + tmp79
tmp81 = libdevice.rsqrt(tmp80)
tmp82 = tmp76 * tmp81
tmp84 = tmp82 * tmp83
tmp86 = tmp84 + tmp85
tl.store(out_ptr0 + (r6 + 64 * x0), tmp53, xmask)
tl.store(out_ptr3 + (r6 + 64 * x0), tmp86, xmask)
tl.store(out_ptr4 + x0, tmp81, xmask)
tl.store(out_ptr1 + x0, tmp69, xmask)
@triton.jit
def triton_poi_fused_convolution_gelu_2(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = 0.7071067811865476
tmp6 = tmp2 * tmp5
tmp7 = libdevice.erf(tmp6)
tmp8 = 1.0
tmp9 = tmp7 + tmp8
tmp10 = tmp4 * tmp9
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_convolution_mul_sub_3(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, xmask)
tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x3, xmask)
tmp6 = tl.load(in_ptr4 + x3, xmask)
tmp10 = tl.load(in_ptr5 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp7 = tmp5 - tmp6
tmp8 = tmp4 * tmp7
tmp9 = tmp3 + tmp8
tmp11 = tmp10 * tmp2
tmp12 = tmp9 + tmp11
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp12, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (16, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_9, (16,), (1,))
assert_size_stride(primals_10, (4, 16, 1, 1), (16, 1, 1, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf16 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
get_raw_stream(0)
triton_per_fused_native_group_norm_0[grid(4)](primals_4, primals_2,
primals_3, buf0, buf3, buf16, 4, 64, XBLOCK=1, num_warps=2,
num_stages=1)
del primals_2
del primals_3
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf9 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
triton_per_fused_avg_pool2d_native_group_norm_1[grid(4)](buf3,
primals_4, primals_1, primals_6, primals_7, buf4, buf5, buf8,
buf9, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
del primals_7
buf10 = extern_kernels.convolution(buf8, primals_8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 16, 4, 4), (256, 16, 4, 1))
buf11 = buf10
del buf10
buf12 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.
float32)
triton_poi_fused_convolution_gelu_2[grid(1024)](buf11, primals_9,
buf12, 1024, XBLOCK=128, num_warps=4, num_stages=1)
del primals_9
buf13 = extern_kernels.convolution(buf12, primals_10, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 4, 4, 4), (64, 16, 4, 1))
buf14 = buf13
del buf13
buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_convolution_mul_sub_3[grid(256)](buf14,
primals_11, primals_4, primals_1, buf4, buf3, primals_5, buf15,
256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
return (buf15, primals_1, primals_4, primals_5, primals_6, primals_8,
primals_10, buf3, buf4, buf8, reinterpret_tensor(buf5, (4, 1), (1,
1), 0), reinterpret_tensor(buf9, (4, 1), (1, 1), 0), buf11, buf12,
buf14, reinterpret_tensor(buf0, (4, 1, 1), (1, 1, 1), 0),
reinterpret_tensor(buf16, (4, 1, 1), (1, 1, 1), 0))
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
"""Copy & paste from PyTorch official master until it's in a few official releases - RW
Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
"""
def norm_cdf(x):
"""Computes standard normal cumulative distribution function"""
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
if mean < a - 2 * std or mean > b + 2 * std:
warnings.warn(
'mean is more than 2 std from [a, b] in nn.init.trunc_normal_. The distribution of values may be incorrect.'
, stacklevel=2)
with torch.no_grad():
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
tensor.uniform_(2 * l - 1, 2 * u - 1)
tensor.erfinv_()
tensor.mul_(std * math.sqrt(2.0))
tensor.add_(mean)
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0):
"""Copy & paste from PyTorch official master until it's in a few official releases - RW
Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
class GroupNorm(nn.GroupNorm):
"""
Group Normalization with 1 group.
Input: tensor in shape [B, C, H, W]
"""
def __init__(self, num_channels, **kwargs):
super().__init__(1, num_channels, **kwargs)
class Pooling(nn.Module):
"""
Implementation of pooling for PoolFormer
--pool_size: pooling size
"""
def __init__(self, pool_size=3):
super().__init__()
self.pool = nn.AvgPool2d(pool_size, stride=1, padding=pool_size //
2, count_include_pad=False)
def forward(self, x):
return self.pool(x) - x
class Mlp(nn.Module):
"""
Implementation of MLP with 1*1 convolutions.
Input: tensor with shape [B, C, H, W]
"""
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Conv2d(in_features, hidden_features, 1)
self.act = act_layer()
self.fc2 = nn.Conv2d(hidden_features, out_features, 1)
self.drop = nn.Dropout(drop)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Conv2d):
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class PoolFormerBlockNew(nn.Module):
"""
Implementation of one PoolFormer block.
--dim: embedding dim
--pool_size: pooling size
--mlp_ratio: mlp expansion ratio
--act_layer: activation
--norm_layer: normalization
--drop: dropout rate
--drop path: Stochastic Depth,
refer to https://arxiv.org/abs/1603.09382
--use_layer_scale, --layer_scale_init_value: LayerScale,
refer to https://arxiv.org/abs/2103.17239
"""
def __init__(self, dim, pool_size=3, mlp_ratio=4.0, act_layer=nn.GELU,
norm_layer=GroupNorm, drop=0.0, drop_path=0.0, use_layer_scale=True,
layer_scale_init_value=1e-05):
super().__init__()
self.norm1 = norm_layer(dim)
self.token_mixer = Pooling(pool_size=pool_size)
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
self.drop_path = DropPath(drop_path
) if drop_path > 0.0 else nn.Identity()
self.use_layer_scale = use_layer_scale
if use_layer_scale:
self.layer_scale_1 = nn.Parameter(layer_scale_init_value *
torch.ones(dim), requires_grad=True)
self.layer_scale_2 = nn.Parameter(layer_scale_init_value *
torch.ones(dim), requires_grad=True)
def forward(self, input_0):
primals_1 = self.layer_scale_1
primals_2 = self.layer_scale_2
primals_3 = self.norm1.weight
primals_5 = self.norm1.bias
primals_6 = self.norm2.weight
primals_7 = self.norm2.bias
primals_8 = self.mlp.fc1.weight
primals_9 = self.mlp.fc1.bias
primals_10 = self.mlp.fc2.weight
primals_11 = self.mlp.fc2.bias
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
xwyzsn/solo-learn
|
PoolFormerBlock
| false
| 16,760
|
[
"MIT"
] | 693
|
16d021d8053439a3de205337ab2a11d191500b09
|
https://github.com/xwyzsn/solo-learn/tree/16d021d8053439a3de205337ab2a11d191500b09
|
EqualLinear
|
import torch
import torch.nn.functional as F
from torch import nn
class EqualLinear(nn.Module):
def __init__(self, in_dim, out_dim, lr_mul=1, bias=True):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_dim, in_dim))
if bias:
self.bias = nn.Parameter(torch.zeros(out_dim))
self.lr_mul = lr_mul
def forward(self, input):
return F.linear(input, self.weight * self.lr_mul, bias=self.bias *
self.lr_mul)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4, 'out_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](primals_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused_mul_1[grid(4)](primals_2, buf1, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(buf1, reinterpret_tensor(primals_3, (64, 4), (
4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1,
beta=1, out=buf2)
del buf0
del buf1
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0)
class EqualLinearNew(nn.Module):
def __init__(self, in_dim, out_dim, lr_mul=1, bias=True):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_dim, in_dim))
if bias:
self.bias = nn.Parameter(torch.zeros(out_dim))
self.lr_mul = lr_mul
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
yoona-ai/stylegan2-pytorch
|
EqualLinear
| false
| 16,761
|
[
"MIT"
] | 2,954
|
eceb8aacb669f19b79cc74c7160a85252b1086d6
|
https://github.com/yoona-ai/stylegan2-pytorch/tree/eceb8aacb669f19b79cc74c7160a85252b1086d6
|
KeypointsMSESmoothLoss
|
import torch
import torch.utils.data
import torch
import torch.nn as nn
class KeypointsMSESmoothLoss(nn.Module):
def __init__(self, threshold=400):
super().__init__()
self.threshold = threshold
def forward(self, output, target, target_weight):
batch_size = output.size(0)
num_joints = output.size(1)
heatmaps_pred = output.reshape((batch_size, num_joints, -1))
heatmaps_gt = target.reshape((batch_size, num_joints, -1))
dimension = heatmaps_pred.shape[-1]
diff = (heatmaps_gt - heatmaps_pred) ** 2 * target_weight[..., None]
diff[diff > self.threshold] = torch.pow(diff[diff > self.threshold],
0.1) * self.threshold ** 0.9
loss = torch.sum(diff) / (dimension * max(1, torch.sum(target_weight)))
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
import torch
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_gt_mul_pow_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x4 = xindex % 256
x5 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + x4, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x4, None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + x5, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp5 = tmp3 * tmp4
tmp6 = 400.0
tmp7 = tmp5 > tmp6
tl.store(out_ptr0 + x3, tmp5, None)
tl.store(out_ptr1 + x3, tmp7, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 16), (1024, 256, 64, 16, 1),
torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4, 16), (1024, 256, 64, 16, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_gt_mul_pow_sub_0[grid(4096)](arg1_1, arg0_1,
arg2_1, buf0, buf1, 4096, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf0, buf1
class KeypointsMSESmoothLossNew(nn.Module):
def __init__(self, threshold=400):
super().__init__()
self.threshold = threshold
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
yihui-he2020/epipolar-transformers
|
KeypointsMSESmoothLoss
| false
| 16,762
|
[
"MIT"
] | 360
|
6824f4345b2998500fbacd0f4e30f67f8e3da7b8
|
https://github.com/yihui-he2020/epipolar-transformers/tree/6824f4345b2998500fbacd0f4e30f67f8e3da7b8
|
Conv2D
|
import torch
import torch.nn as nn
import torch.utils.data
class Conv2D(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, dilation_h
=1, dilation_w=1, causal=True, use_wn_bias=True):
super(Conv2D, self).__init__()
self.causal = causal
self.use_wn_bias = use_wn_bias
self.dilation_h, self.dilation_w = dilation_h, dilation_w
if self.causal:
self.padding_h = dilation_h * (kernel_size - 1)
else:
self.padding_h = dilation_h * (kernel_size - 1) // 2
self.padding_w = dilation_w * (kernel_size - 1) // 2
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size,
dilation=(dilation_h, dilation_w), padding=(self.padding_h,
self.padding_w), bias=use_wn_bias)
self.conv = nn.utils.weight_norm(self.conv)
nn.init.kaiming_normal_(self.conv.weight)
def forward(self, tensor):
out = self.conv(tensor)
if self.causal and self.padding_h != 0:
out = out[:, :, :-self.padding_h, :]
return out
def reverse_fast(self, tensor):
self.conv.padding = 0, self.padding_w
out = self.conv(tensor)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__weight_norm_interface_0(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 36
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 36 * x0), rmask & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(rmask & xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp8 = tmp7 / tmp6
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr0 + (r1 + 36 * x0), tmp9, rmask & xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 24 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf0
buf2 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32)
get_raw_stream(0)
triton_per_fused__weight_norm_interface_0[grid(4)](buf1, primals_2,
primals_1, buf2, 4, 36, XBLOCK=1, num_warps=2, num_stages=1)
buf3 = extern_kernels.convolution(primals_4, buf2, stride=(1, 1),
padding=(2, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 6, 4), (96, 24, 4, 1))
buf4 = buf3
del buf3
triton_poi_fused_convolution_1[grid(384)](buf4, primals_3, 384,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
return reinterpret_tensor(buf4, (4, 4, 4, 4), (96, 24, 4, 1), 0
), buf2, primals_1, primals_2, primals_4, buf1, buf2
class Conv2DNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, dilation_h
=1, dilation_w=1, causal=True, use_wn_bias=True):
super(Conv2DNew, self).__init__()
self.causal = causal
self.use_wn_bias = use_wn_bias
self.dilation_h, self.dilation_w = dilation_h, dilation_w
if self.causal:
self.padding_h = dilation_h * (kernel_size - 1)
else:
self.padding_h = dilation_h * (kernel_size - 1) // 2
self.padding_w = dilation_w * (kernel_size - 1) // 2
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size,
dilation=(dilation_h, dilation_w), padding=(self.padding_h,
self.padding_w), bias=use_wn_bias)
self.conv = nn.utils.weight_norm(self.conv)
nn.init.kaiming_normal_(self.conv.weight)
def reverse_fast(self, tensor):
self.conv.padding = 0, self.padding_w
out = self.conv(tensor)
return out
def forward(self, input_0):
primals_3 = self.conv.bias
primals_1 = self.conv.weight_g
primals_2 = self.conv.weight_v
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
yhgon/NanoFlow
|
Conv2D
| false
| 16,763
|
[
"BSD-3-Clause"
] | 62
|
73b24dfd4d607e73d6167897b83e9f61fcaaca3b
|
https://github.com/yhgon/NanoFlow/tree/73b24dfd4d607e73d6167897b83e9f61fcaaca3b
|
LayerNorm
|
import torch
import torch.nn as nn
import torch.utils.data
class LayerNorm(nn.Module):
"""
LayerNorm that supports inputs of size B, C, T
"""
def __init__(self, num_channels, eps=1e-05, affine=True, device=None,
dtype=None):
super().__init__()
factory_kwargs = {'device': device, 'dtype': dtype}
self.num_channels = num_channels
self.eps = eps
self.affine = affine
if self.affine:
self.weight = nn.Parameter(torch.ones([1, num_channels, 1], **
factory_kwargs))
self.bias = nn.Parameter(torch.zeros([1, num_channels, 1], **
factory_kwargs))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
def forward(self, x):
assert x.dim() == 3
assert x.shape[1] == self.num_channels
mu = torch.mean(x, dim=1, keepdim=True)
res_x = x - mu
sigma = torch.mean(res_x ** 2, dim=1, keepdim=True)
out = res_x / torch.sqrt(sigma + self.eps)
if self.affine:
out *= self.weight
out += self.bias
return out
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'num_channels': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mean_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_sqrt_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
x1 = xindex // 4 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = 1e-05
tmp15 = tmp13 + tmp14
tmp16 = libdevice.sqrt(tmp15)
tmp17 = tmp0 / tmp16
tmp19 = tmp17 * tmp18
tmp21 = tmp19 + tmp20
tl.store(out_ptr0 + x3, tmp21, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (1, 4, 1), (4, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_sub_0[grid(64)](primals_1, buf0, 64, XBLOCK=
64, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_pow_sqrt_1[grid(64)](buf0,
primals_2, primals_3, buf1, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf0
del primals_2
del primals_3
return buf1, primals_1
class LayerNormNew(nn.Module):
"""
LayerNorm that supports inputs of size B, C, T
"""
def __init__(self, num_channels, eps=1e-05, affine=True, device=None,
dtype=None):
super().__init__()
factory_kwargs = {'device': device, 'dtype': dtype}
self.num_channels = num_channels
self.eps = eps
self.affine = affine
if self.affine:
self.weight = nn.Parameter(torch.ones([1, num_channels, 1], **
factory_kwargs))
self.bias = nn.Parameter(torch.zeros([1, num_channels, 1], **
factory_kwargs))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
def forward(self, input_0):
primals_2 = self.weight
primals_3 = self.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
yjh0410/actionformer_release
|
LayerNorm
| false
| 16,764
|
[
"MIT"
] | 61
|
7a97422111d3e29c8d2e14088c850c6975855ea7
|
https://github.com/yjh0410/actionformer_release/tree/7a97422111d3e29c8d2e14088c850c6975855ea7
|
AffineDropPath
|
import torch
import torch.nn as nn
import torch.utils.data
def drop_path(x, drop_prob=0.0, training=False):
"""
Stochastic Depth per sample.
"""
if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1)
mask = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
mask.floor_()
output = x.div(keep_prob) * mask
return output
class AffineDropPath(nn.Module):
"""
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks) with a per channel scaling factor (and zero init)
See: https://arxiv.org/pdf/2103.17239.pdf
"""
def __init__(self, num_dim, drop_prob=0.0, init_scale_value=0.0001):
super().__init__()
self.scale = nn.Parameter(init_scale_value * torch.ones((1, num_dim,
1)), requires_grad=True)
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(self.scale * x, self.drop_prob, self.training)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_dim': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x3 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 1), (4, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](primals_1, primals_2, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
return buf0, primals_2
def drop_path(x, drop_prob=0.0, training=False):
"""
Stochastic Depth per sample.
"""
if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1)
mask = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
mask.floor_()
output = x.div(keep_prob) * mask
return output
class AffineDropPathNew(nn.Module):
"""
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks) with a per channel scaling factor (and zero init)
See: https://arxiv.org/pdf/2103.17239.pdf
"""
def __init__(self, num_dim, drop_prob=0.0, init_scale_value=0.0001):
super().__init__()
self.scale = nn.Parameter(init_scale_value * torch.ones((1, num_dim,
1)), requires_grad=True)
self.drop_prob = drop_prob
def forward(self, input_0):
primals_1 = self.scale
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
yjh0410/actionformer_release
|
AffineDropPath
| false
| 16,765
|
[
"MIT"
] | 61
|
7a97422111d3e29c8d2e14088c850c6975855ea7
|
https://github.com/yjh0410/actionformer_release/tree/7a97422111d3e29c8d2e14088c850c6975855ea7
|
IA_gate
|
import torch
import torch.nn as nn
class IA_gate(nn.Module):
def __init__(self, in_dim, out_dim):
super(IA_gate, self).__init__()
self.IA = nn.Linear(in_dim, out_dim)
def forward(self, x, IA_head):
a = self.IA(IA_head)
a = 1.0 + torch.tanh(a)
a = a.unsqueeze(-1).unsqueeze(-1)
x = a * x
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4, 'out_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex // 16
x4 = xindex % 256
x5 = xindex
tmp0 = tl.load(in_ptr0 + x3, None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + x4, None, eviction_policy='evict_last')
tmp1 = libdevice.tanh(tmp0)
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp5 = tmp3 * tmp4
tl.store(out_ptr0 + x5, tmp5, None)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4, 4, 4), (1024, 256, 64, 16, 4,
1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(4096)](buf0, primals_4, buf1, 4096,
XBLOCK=128, num_warps=4, num_stages=1)
return buf1, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0
class IA_gateNew(nn.Module):
def __init__(self, in_dim, out_dim):
super(IA_gateNew, self).__init__()
self.IA = nn.Linear(in_dim, out_dim)
def forward(self, input_0, input_1):
primals_1 = self.IA.weight
primals_2 = self.IA.bias
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
yoxu515/CFBI
|
IA_gate
| false
| 16,766
|
[
"BSD-3-Clause"
] | 312
|
0bab1e3c9fc3e3ba0629f716d60221e8f8d9d586
|
https://github.com/yoxu515/CFBI/tree/0bab1e3c9fc3e3ba0629f716d60221e8f8d9d586
|
FocalLossSigmoid
|
import torch
import torch.nn as nn
from math import sqrt as sqrt
from itertools import product as product
class FocalLossSigmoid(nn.Module):
"""
sigmoid version focal loss
"""
def __init__(self, alpha=0.25, gamma=2, size_average=False):
super(FocalLossSigmoid, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.size_average = size_average
def forward(self, inputs, targets):
inputs.size(0)
inputs.size(1)
P = torch.sigmoid(inputs)
alpha_mask = self.alpha * targets
loss_pos = -1.0 * torch.pow(1 - P, self.gamma) * torch.log(P
) * targets * alpha_mask
loss_neg = -1.0 * torch.pow(P, self.gamma) * torch.log(1 - P) * (1 -
targets) * (1 - alpha_mask)
batch_loss = loss_neg + loss_pos
if self.size_average:
loss = batch_loss.mean()
else:
loss = batch_loss.sum()
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
from math import sqrt as sqrt
from itertools import product as product
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_log_mul_pow_rsub_sigmoid_sum_0(in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp9 = tl.load(in_ptr1 + r0, None)
tmp1 = tl.sigmoid(tmp0)
tmp2 = tmp1 * tmp1
tmp3 = -1.0
tmp4 = tmp2 * tmp3
tmp5 = 1.0
tmp6 = tmp5 - tmp1
tmp7 = tl_math.log(tmp6)
tmp8 = tmp4 * tmp7
tmp10 = tmp5 - tmp9
tmp11 = tmp8 * tmp10
tmp12 = 0.25
tmp13 = tmp9 * tmp12
tmp14 = tmp5 - tmp13
tmp15 = tmp11 * tmp14
tmp16 = tmp6 * tmp6
tmp17 = tmp16 * tmp3
tmp18 = tl_math.log(tmp1)
tmp19 = tmp17 * tmp18
tmp20 = tmp19 * tmp9
tmp21 = tmp20 * tmp13
tmp22 = tmp15 + tmp21
tmp23 = tl.broadcast_to(tmp22, [RBLOCK])
tmp25 = triton_helpers.promote_to_tensor(tl.sum(tmp23, 0))
tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp25, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_add_log_mul_pow_rsub_sigmoid_sum_0[grid(1)](arg0_1,
arg1_1, buf0, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class FocalLossSigmoidNew(nn.Module):
"""
sigmoid version focal loss
"""
def __init__(self, alpha=0.25, gamma=2, size_average=False):
super(FocalLossSigmoidNew, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.size_average = size_average
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
yqyao/SSD_Pytorch
|
FocalLossSigmoid
| false
| 16,767
|
[
"MIT"
] | 163
|
6060bbb650e7a1df7c12d7c9650a38eaba4ab6a8
|
https://github.com/yqyao/SSD_Pytorch/tree/6060bbb650e7a1df7c12d7c9650a38eaba4ab6a8
|
WeightMseLoss
|
import torch
import torch.nn as nn
class WeightMseLoss(nn.Module):
def __init__(self, size_average=True):
super(WeightMseLoss, self).__init__()
self.size_average = size_average
def forward(self, inputs, targets, weights):
""" inputs is N * C
targets is N * C
weights is N * C
"""
N = inputs.size(0)
C = inputs.size(1)
out = targets - inputs
out = weights * torch.pow(out, 2)
loss = out.sum()
if self.size_average:
loss = loss / (N * C)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_div_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tl.load(in_ptr2 + r0, None)
tmp3 = tmp1 - tmp2
tmp4 = tmp3 * tmp3
tmp5 = tmp0 * tmp4
tmp6 = tl.broadcast_to(tmp5, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = 0.0625
tmp10 = tmp8 * tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_div_mul_pow_sub_sum_0[grid(1)](buf1, arg2_1,
arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf1,
class WeightMseLossNew(nn.Module):
def __init__(self, size_average=True):
super(WeightMseLossNew, self).__init__()
self.size_average = size_average
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
yqyao/YOLOv3_Pytorch
|
WeightMseLoss
| false
| 16,768
|
[
"MIT"
] | 55
|
ea392f7d418be94605f86ba2b5d167ec30611def
|
https://github.com/yqyao/YOLOv3_Pytorch/tree/ea392f7d418be94605f86ba2b5d167ec30611def
|
DownsampleA
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.init
class DownsampleA(nn.Module):
def __init__(self, nIn, nOut, stride):
super(DownsampleA, self).__init__()
self.avg = nn.AvgPool2d(kernel_size=1, stride=stride)
def forward(self, x):
return torch.cat((self.avg(x), x.mul(0)), 1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'nIn': 4, 'nOut': 4, 'stride': 1}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 8
x0 = xindex % 16
x2 = xindex // 128
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = 1.0
tmp7 = tmp5 * tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp13 = tl.load(in_ptr0 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp10 &
xmask, other=0.0)
tmp14 = 0.0
tmp15 = tmp13 * tmp14
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp10, tmp15, tmp16)
tmp18 = tl.where(tmp4, tmp9, tmp17)
tl.store(out_ptr0 + x3, tmp18, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](arg0_1, buf0, 512, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class DownsampleANew(nn.Module):
def __init__(self, nIn, nOut, stride):
super(DownsampleANew, self).__init__()
self.avg = nn.AvgPool2d(kernel_size=1, stride=stride)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
yuanjef/imagenet-fast
|
DownsampleA
| false
| 16,769
|
[
"Apache-2.0"
] | 298
|
4c1cb1ec11c3444982913fc6526720a0d29b97c5
|
https://github.com/yuanjef/imagenet-fast/tree/4c1cb1ec11c3444982913fc6526720a0d29b97c5
|
ImgSenRanking
|
import torch
import numpy as np
import torch.utils.data
def l2norm(input, p=2.0, dim=1, eps=1e-12):
"""
Compute L2 norm, row-wise
"""
l2_inp = input / input.norm(p, dim, keepdim=True).clamp(min=eps)
return l2_inp.expand_as(input)
def xavier_weight(tensor):
nin, nout = tensor.size()[0], tensor.size()[1]
r = np.sqrt(6.0) / np.sqrt(nin + nout)
return tensor.normal_(0, r)
class ImgSenRanking(torch.nn.Module):
def __init__(self, dim_image, sent_dim, hid_dim):
super(ImgSenRanking, self).__init__()
self.register_buffer('device_id', torch.IntTensor(1))
self.linear_img = torch.nn.Linear(dim_image, hid_dim)
self.linear_sent = torch.nn.Linear(sent_dim, hid_dim)
self.init_weights()
def init_weights(self):
xavier_weight(self.linear_img.weight.data)
xavier_weight(self.linear_sent.weight.data)
self.linear_img.bias.data.fill_(0)
self.linear_sent.bias.data.fill_(0)
def forward(self, sent, img):
img_vec = self.linear_img(img)
sent_vec = self.linear_sent(sent)
return l2norm(sent_vec), l2norm(img_vec)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim_image': 4, 'sent_dim': 4, 'hid_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import numpy as np
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clamp_div_expand_linalg_vector_norm_0(in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x3, tmp15, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_div_expand_linalg_vector_norm_0[grid(256)](buf1,
buf2, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clamp_div_expand_linalg_vector_norm_0[grid(256)](buf0,
buf3, 256, XBLOCK=256, num_warps=4, num_stages=1)
return buf2, buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), buf1
def l2norm(input, p=2.0, dim=1, eps=1e-12):
"""
Compute L2 norm, row-wise
"""
l2_inp = input / input.norm(p, dim, keepdim=True).clamp(min=eps)
return l2_inp.expand_as(input)
def xavier_weight(tensor):
nin, nout = tensor.size()[0], tensor.size()[1]
r = np.sqrt(6.0) / np.sqrt(nin + nout)
return tensor.normal_(0, r)
class ImgSenRankingNew(torch.nn.Module):
def __init__(self, dim_image, sent_dim, hid_dim):
super(ImgSenRankingNew, self).__init__()
self.register_buffer('device_id', torch.IntTensor(1))
self.linear_img = torch.nn.Linear(dim_image, hid_dim)
self.linear_sent = torch.nn.Linear(sent_dim, hid_dim)
self.init_weights()
def init_weights(self):
xavier_weight(self.linear_img.weight.data)
xavier_weight(self.linear_sent.weight.data)
self.linear_img.bias.data.fill_(0)
self.linear_sent.bias.data.fill_(0)
def forward(self, input_0, input_1):
primals_1 = self.linear_img.weight
primals_2 = self.linear_img.bias
primals_4 = self.linear_sent.weight
primals_5 = self.linear_sent.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0], output[1]
|
ypxie/HDGan
|
ImgSenRanking
| false
| 16,770
|
[
"MIT"
] | 160
|
d98e2a85f7ae6ce7bfacd1c15e519558d97cb931
|
https://github.com/ypxie/HDGan/tree/d98e2a85f7ae6ce7bfacd1c15e519558d97cb931
|
Seedloss
|
import torch
import torch.nn as nn
from torch.nn import functional as F
class Seedloss(nn.Module):
def __init__(self, ignore_label=21):
super(Seedloss, self).__init__()
self.ignore_label = ignore_label
self.eps = 1e-05
def my_softmax(self, score, dim=1):
probs = torch.clamp(F.softmax(score, dim), self.eps, 1)
probs = probs / torch.sum(probs, dim=dim, keepdim=True)
return probs
def forward(self, predict, target):
"""
compute balanced seed loss
:param predict: (n, c, h, w)
:param target: (n, c, h, w)
:return:
"""
assert not target.requires_grad
target = target
assert torch.sum(torch.isinf(predict)) == 0
assert torch.sum(torch.isnan(predict)) == 0
input_log_prob = torch.log(self.my_softmax(predict, dim=1))
assert torch.sum(torch.isnan(input_log_prob)) == 0
fg_prob = input_log_prob[:, 1:, :, :]
fg_label = target[:, 1:, :, :]
fg_count = torch.sum(fg_label, dim=(1, 2, 3)) + self.eps
bg_prob = input_log_prob[:, 0:1, :, :]
bg_label = target[:, 0:1, :, :]
bg_count = torch.sum(bg_label, dim=(1, 2, 3)) + self.eps
loss_fg = torch.sum(fg_label * fg_prob, dim=(1, 2, 3))
loss_fg = -1 * torch.mean(loss_fg / fg_count)
loss_bg = torch.sum(bg_label * bg_prob, dim=(1, 2, 3))
loss_bg = -1 * torch.mean(loss_bg / bg_count)
total_loss = loss_bg + loss_fg
assert torch.sum(torch.isnan(total_loss)
) == 0, 'fg_loss: {} fg_count: {} bg_loss: {} bg_count: {}'.format(
loss_fg, fg_count, loss_bg, bg_count)
return total_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
from torch.nn import functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_clamp_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = 1e-05
tmp10 = triton_helpers.maximum(tmp8, tmp9)
tmp11 = 1.0
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tl.store(out_ptr0 + x3, tmp12, xmask)
@triton.jit
def triton_poi_fused_div_log_sum_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = tl_math.log(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_per_fused_add_mul_sum_3(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = 1e-05
tmp12 = tmp10 + tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp12, xmask)
tl.store(out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_per_fused_add_mul_sum_4(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 48
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16 + r1 + 64 * x0), rmask & xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (16 + r1 + 64 * x0), rmask & xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(rmask & xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = 1e-05
tmp12 = tmp10 + tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp12, xmask)
tl.store(out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_per_fused_add_div_eq_isnan_mean_mul_sum_5(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp6 = tl.load(in_ptr2 + r0, None)
tmp7 = tl.load(in_ptr3 + r0, None)
tmp2 = tmp0 / tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.sum(tmp3, 1)[:, None]
tmp8 = tmp6 / tmp7
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.sum(tmp9, 1)[:, None]
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = -1.0
tmp15 = tmp13 * tmp14
tmp16 = tmp5 / tmp12
tmp17 = tmp16 * tmp14
tmp18 = tmp15 + tmp17
tmp19 = libdevice.isnan(tmp18).to(tl.int1)
tmp20 = tmp19.to(tl.int64)
tmp21 = tl.full([1, 1], 0, tl.int64)
tmp22 = tmp20 == tmp21
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp15, None)
tl.debug_barrier()
tl.store(in_out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp17, None)
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp18, None)
tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp22, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_clamp_1[grid(256)](buf0, buf1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf2 = buf0
del buf0
triton_poi_fused_div_log_sum_2[grid(256)](buf1, buf2, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del buf1
buf3 = empty_strided_cuda((4,), (1,), torch.float32)
buf4 = empty_strided_cuda((4,), (1,), torch.float32)
buf5 = buf4
del buf4
triton_per_fused_add_mul_sum_3[grid(4)](buf5, arg0_1, buf2, buf3, 4,
16, XBLOCK=1, num_warps=2, num_stages=1)
buf8 = empty_strided_cuda((4,), (1,), torch.float32)
buf9 = empty_strided_cuda((4,), (1,), torch.float32)
buf10 = buf9
del buf9
triton_per_fused_add_mul_sum_4[grid(4)](buf10, arg0_1, buf2, buf8,
4, 48, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del buf2
buf11 = empty_strided_cuda((), (), torch.float32)
buf6 = empty_strided_cuda((), (), torch.float32)
buf7 = buf6
del buf6
buf12 = buf11
del buf11
buf13 = empty_strided_cuda((), (), torch.float32)
buf14 = empty_strided_cuda((), (), torch.bool)
triton_per_fused_add_div_eq_isnan_mean_mul_sum_5[grid(1)](buf7,
buf12, buf8, buf10, buf3, buf5, buf13, buf14, 1, 4, XBLOCK=1,
num_warps=2, num_stages=1)
del buf3
del buf8
return buf13, buf7, buf12, buf5, buf10, buf14
class SeedlossNew(nn.Module):
def __init__(self, ignore_label=21):
super(SeedlossNew, self).__init__()
self.ignore_label = ignore_label
self.eps = 1e-05
def my_softmax(self, score, dim=1):
probs = torch.clamp(F.softmax(score, dim), self.eps, 1)
probs = probs / torch.sum(probs, dim=dim, keepdim=True)
return probs
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
yaoqi-zd/SGAN
|
Seedloss
| false
| 16,771
|
[
"MIT"
] | 48
|
43d8a859b03967e2423a73ef1ba332ee71714ba4
|
https://github.com/yaoqi-zd/SGAN/tree/43d8a859b03967e2423a73ef1ba332ee71714ba4
|
PrimaryCaps
|
import torch
import torch.nn as nn
class PrimaryCaps(nn.Module):
"""Creates a primary convolutional capsule layer
that outputs a pose matrix and an activation.
Note that for computation convenience, pose matrix
are stored in first part while the activations are
stored in the second part.
Args:
A: output of the normal conv layer
B: number of types of capsules
K: kernel size of convolution
P: size of pose matrix is P*P
stride: stride of convolution
Shape:
input: (*, A, h, w)
output: (*, h', w', B*(P*P+1))
h', w' is computed the same way as convolution layer
parameter size is: K*K*A*B*P*P + B*P*P
"""
def __init__(self, A=32, B=32, K=1, P=4, stride=1):
super(PrimaryCaps, self).__init__()
self.pose = nn.Conv2d(in_channels=A, out_channels=B * P * P,
kernel_size=K, stride=stride, bias=True)
self.a = nn.Conv2d(in_channels=A, out_channels=B, kernel_size=K,
stride=stride, bias=True)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
p = self.pose(x)
a = self.a(x)
a = self.sigmoid(a)
out = torch.cat([p, a], dim=1)
out = out.permute(0, 2, 3, 1)
return out
def get_inputs():
return [torch.rand([4, 32, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 128
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 32 * x2 + 131072 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, None)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 544
x0 = xindex % 4096
x2 = xindex // 2228224
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 512, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (512 * x0 + 2097152 * x2 + x1), tmp4,
eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tl.full([1], 544, tl.int64)
tmp13 = tl.load(in_ptr2 + (32 * x0 + 131072 * x2 + (-512 + x1)), tmp10,
eviction_policy='evict_last', other=0.0)
tmp14 = tl.sigmoid(tmp13)
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp10, tmp14, tmp15)
tmp17 = tl.where(tmp4, tmp9, tmp16)
tl.store(out_ptr0 + x3, tmp17, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (512, 32, 1, 1), (32, 1, 1, 1))
assert_size_stride(primals_2, (512,), (1,))
assert_size_stride(primals_3, (4, 32, 64, 64), (131072, 4096, 64, 1))
assert_size_stride(primals_4, (32, 32, 1, 1), (32, 1, 1, 1))
assert_size_stride(primals_5, (32,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 32, 64, 64), (131072, 1, 2048, 32),
torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(128, 4096)](primals_3, buf0, 128, 4096,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_3
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 512, 64, 64), (2097152, 1, 32768, 512))
buf2 = extern_kernels.convolution(buf0, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 64, 64), (131072, 1, 2048, 32))
buf3 = buf2
del buf2
triton_poi_fused_convolution_1[grid(524288)](buf3, primals_5,
524288, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 544, 64, 64), (2228224, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_2[grid(8912896)](buf1, primals_2, buf3, buf4,
8912896, XBLOCK=512, num_warps=8, num_stages=1)
del buf1
del primals_2
return reinterpret_tensor(buf4, (4, 64, 64, 544), (2228224, 64, 1, 4096), 0
), primals_1, buf0, primals_4, buf3
class PrimaryCapsNew(nn.Module):
"""Creates a primary convolutional capsule layer
that outputs a pose matrix and an activation.
Note that for computation convenience, pose matrix
are stored in first part while the activations are
stored in the second part.
Args:
A: output of the normal conv layer
B: number of types of capsules
K: kernel size of convolution
P: size of pose matrix is P*P
stride: stride of convolution
Shape:
input: (*, A, h, w)
output: (*, h', w', B*(P*P+1))
h', w' is computed the same way as convolution layer
parameter size is: K*K*A*B*P*P + B*P*P
"""
def __init__(self, A=32, B=32, K=1, P=4, stride=1):
super(PrimaryCapsNew, self).__init__()
self.pose = nn.Conv2d(in_channels=A, out_channels=B * P * P,
kernel_size=K, stride=stride, bias=True)
self.a = nn.Conv2d(in_channels=A, out_channels=B, kernel_size=K,
stride=stride, bias=True)
self.sigmoid = nn.Sigmoid()
def forward(self, input_0):
primals_1 = self.pose.weight
primals_2 = self.pose.bias
primals_4 = self.a.weight
primals_5 = self.a.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
yl-1993/Matrix-Capsules-EM-PyTorch
|
PrimaryCaps
| false
| 16,772
|
[
"MIT"
] | 97
|
ca4cd7f45a4234ddf49efe9db34c9ff645378437
|
https://github.com/yl-1993/Matrix-Capsules-EM-PyTorch/tree/ca4cd7f45a4234ddf49efe9db34c9ff645378437
|
MaskedMHA
|
import math
import torch
import torch.nn as nn
import torch.utils.data
from torch.nn import functional as F
class MaskedMHA(nn.Module):
"""
Multi Head Attention with mask
Modified from https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
def __init__(self, n_embd, n_head, attn_pdrop=0.0, proj_pdrop=0.0):
super().__init__()
assert n_embd % n_head == 0
self.n_embd = n_embd
self.n_head = n_head
self.n_channels = n_embd // n_head
self.scale = 1.0 / math.sqrt(self.n_channels)
self.key = nn.Conv1d(self.n_embd, self.n_embd, 1)
self.query = nn.Conv1d(self.n_embd, self.n_embd, 1)
self.value = nn.Conv1d(self.n_embd, self.n_embd, 1)
self.attn_drop = nn.Dropout(attn_pdrop)
self.proj_drop = nn.Dropout(proj_pdrop)
self.proj = nn.Conv1d(self.n_embd, self.n_embd, 1)
def forward(self, x, mask):
B, C, _T = x.size()
k = self.key(x)
q = self.query(x)
v = self.value(x)
k = k.view(B, self.n_head, self.n_channels, -1).transpose(2, 3)
q = q.view(B, self.n_head, self.n_channels, -1).transpose(2, 3)
v = v.view(B, self.n_head, self.n_channels, -1).transpose(2, 3)
att = q * self.scale @ k.transpose(-2, -1)
att = att.masked_fill(torch.logical_not(mask[:, :, None, :]), float
('-inf'))
att = F.softmax(att, dim=-1)
att = self.attn_drop(att)
out = att @ (v * mask[:, :, :, None].float())
out = out.transpose(2, 3).contiguous().view(B, C, -1)
out = self.proj_drop(self.proj(out)) * mask.float()
return out, mask
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'n_embd': 4, 'n_head': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_logical_not_masked_fill_2(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp1 = tmp0 != 0
tmp2 = tmp1 == 0
tmp4 = float('-inf')
tmp5 = tl.where(tmp2, tmp4, tmp3)
tmp7 = tmp6 != 0
tmp8 = tmp7 == 0
tmp10 = tl.where(tmp8, tmp4, tmp9)
tmp11 = triton_helpers.maximum(tmp5, tmp10)
tmp13 = tmp12 != 0
tmp14 = tmp13 == 0
tmp16 = tl.where(tmp14, tmp4, tmp15)
tmp17 = triton_helpers.maximum(tmp11, tmp16)
tmp19 = tmp18 != 0
tmp20 = tmp19 == 0
tmp22 = tl.where(tmp20, tmp4, tmp21)
tmp23 = triton_helpers.maximum(tmp17, tmp22)
tmp24 = tmp5 - tmp23
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp10 - tmp23
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tmp16 - tmp23
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp28 + tmp30
tmp32 = tmp22 - tmp23
tmp33 = tl_math.exp(tmp32)
tmp34 = tmp31 + tmp33
tl.store(out_ptr0 + x2, tmp23, xmask)
tl.store(out_ptr1 + x2, tmp34, xmask)
@triton.jit
def triton_poi_fused__softmax_logical_not_masked_fill_3(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex
x4 = xindex // 4
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_out_ptr0 + x3, xmask)
tmp6 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp1 = tmp0 != 0
tmp2 = tmp1 == 0
tmp4 = float('-inf')
tmp5 = tl.where(tmp2, tmp4, tmp3)
tmp7 = tmp5 - tmp6
tmp8 = tl_math.exp(tmp7)
tmp10 = tmp8 / tmp9
tl.store(in_out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_mul_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x3, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_9, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_10, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4), (16, 4, 1))
buf1 = extern_kernels.convolution(primals_1, primals_4, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4), (16, 4, 1))
buf2 = extern_kernels.convolution(primals_1, primals_6, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4), (16, 4, 1))
buf3 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf1
get_raw_stream(0)
triton_poi_fused_mul_0[grid(64)](buf3, primals_5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_5
buf4 = buf0
del buf0
triton_poi_fused_convolution_1[grid(64)](buf4, primals_3, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_logical_not_masked_fill_2[grid(64)](primals_8
, buf5, buf6, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf8 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused__softmax_logical_not_masked_fill_3[grid(256)](buf8,
primals_8, buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf6
buf9 = reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf2
triton_poi_fused_mul_4[grid(64)](buf9, primals_7, primals_8, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_7
buf10 = reinterpret_tensor(buf7, (16, 4, 1), (4, 1, 1), 0)
del buf7
extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10)
buf11 = extern_kernels.convolution(reinterpret_tensor(buf10, (4, 4,
4), (16, 4, 1), 0), primals_9, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf11, (4, 4, 4), (16, 4, 1))
buf12 = buf11
del buf11
triton_poi_fused_mul_4[grid(64)](buf12, primals_10, primals_8, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_10
return (buf12, primals_1, primals_2, primals_4, primals_6, primals_8,
primals_9, buf8, reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0),
reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0),
reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0))
class MaskedMHANew(nn.Module):
"""
Multi Head Attention with mask
Modified from https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
def __init__(self, n_embd, n_head, attn_pdrop=0.0, proj_pdrop=0.0):
super().__init__()
assert n_embd % n_head == 0
self.n_embd = n_embd
self.n_head = n_head
self.n_channels = n_embd // n_head
self.scale = 1.0 / math.sqrt(self.n_channels)
self.key = nn.Conv1d(self.n_embd, self.n_embd, 1)
self.query = nn.Conv1d(self.n_embd, self.n_embd, 1)
self.value = nn.Conv1d(self.n_embd, self.n_embd, 1)
self.attn_drop = nn.Dropout(attn_pdrop)
self.proj_drop = nn.Dropout(proj_pdrop)
self.proj = nn.Conv1d(self.n_embd, self.n_embd, 1)
def forward(self, input_0, input_1):
primals_2 = self.key.weight
primals_3 = self.key.bias
primals_4 = self.query.weight
primals_5 = self.query.bias
primals_6 = self.value.weight
primals_7 = self.value.bias
primals_9 = self.proj.weight
primals_10 = self.proj.bias
primals_1 = input_0
primals_8 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0], output[1]
|
yjh0410/actionformer_release
|
MaskedMHA
| false
| 16,773
|
[
"MIT"
] | 61
|
7a97422111d3e29c8d2e14088c850c6975855ea7
|
https://github.com/yjh0410/actionformer_release/tree/7a97422111d3e29c8d2e14088c850c6975855ea7
|
FocalLoss
|
import torch
import numpy as np
import torch.utils.data
import torch
import torch.nn as nn
from torch.nn import functional as F
class FocalLoss(nn.Module):
def __init__(self, weight=None, gamma=1.0, num_classes=80):
super(FocalLoss, self).__init__()
assert gamma >= 0
self.gamma = gamma
self.weight = weight
self.num_classes = num_classes
prior = np.array([0.119217, 0.15927, 0.570566, 0.1045, 0.04089,
0.005522])
self.prior = torch.tensor(prior).float()
self.weight_b = torch.from_numpy(np.array([1.11, 1.06, 1.01, 1.16,
1.84, 10.0, 1.0])).float()
def forward(self, input, target):
CE = F.cross_entropy(input, target, reduction='none')
p = torch.exp(-CE)
loss = (1 - p) ** self.gamma * CE
return loss.sum() / CE.shape[0]
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import numpy as np
import torch.utils.data
import torch
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_per_fused__log_softmax_div_exp_mul_neg_pow_rsub_sum_1(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp5 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp8 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp13 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp16 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp20 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp24 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp1 = tl_math.exp(tmp0)
tmp3 = tl_math.exp(tmp2)
tmp4 = tmp1 + tmp3
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp4 + tmp6
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tl_math.log(tmp10)
tmp12 = tmp0 - tmp11
tmp14 = tmp12 * tmp13
tmp15 = tmp2 - tmp11
tmp17 = tmp15 * tmp16
tmp18 = tmp14 + tmp17
tmp19 = tmp5 - tmp11
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp8 - tmp11
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = -tmp26
tmp28 = -tmp27
tmp29 = tl_math.exp(tmp28)
tmp30 = 1.0
tmp31 = tmp30 - tmp29
tmp32 = tmp31 * tmp27
tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK])
tmp35 = tl.sum(tmp33, 1)[:, None]
tmp36 = 0.25
tmp37 = tmp35 * tmp36
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp37, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
triton_per_fused__log_softmax_div_exp_mul_neg_pow_rsub_sum_1[grid(1)](
buf3, buf0, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del buf0
return buf3,
class FocalLossNew(nn.Module):
def __init__(self, weight=None, gamma=1.0, num_classes=80):
super(FocalLossNew, self).__init__()
assert gamma >= 0
self.gamma = gamma
self.weight = weight
self.num_classes = num_classes
prior = np.array([0.119217, 0.15927, 0.570566, 0.1045, 0.04089,
0.005522])
self.prior = torch.tensor(prior).float()
self.weight_b = torch.from_numpy(np.array([1.11, 1.06, 1.01, 1.16,
1.84, 10.0, 1.0])).float()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
yulonghui/yingying_boss
|
FocalLoss
| false
| 16,774
|
[
"MIT"
] | 306
|
f9cf956cb6507ef43f8005c61027f6b54f418224
|
https://github.com/yulonghui/yingying_boss/tree/f9cf956cb6507ef43f8005c61027f6b54f418224
|
GCT
|
import torch
import torch.nn as nn
class GCT(nn.Module):
def __init__(self, num_channels, epsilon=1e-05, mode='l2', after_relu=False
):
super(GCT, self).__init__()
self.alpha = nn.Parameter(torch.ones(1, num_channels, 1, 1))
self.gamma = nn.Parameter(torch.zeros(1, num_channels, 1, 1))
self.beta = nn.Parameter(torch.zeros(1, num_channels, 1, 1))
self.epsilon = epsilon
self.mode = mode
self.after_relu = after_relu
def forward(self, x):
if self.mode == 'l2':
embedding = (x.pow(2).sum((2, 3), keepdim=True) + self.epsilon
).pow(0.5) * self.alpha
norm = self.gamma / (embedding.pow(2).mean(dim=1, keepdim=True) +
self.epsilon).pow(0.5)
elif self.mode == 'l1':
if not self.after_relu:
_x = torch.abs(x)
else:
_x = x
embedding = _x.sum((2, 3), keepdim=True) * self.alpha
norm = self.gamma / (torch.abs(embedding).mean(dim=1, keepdim=
True) + self.epsilon)
else:
None
exit()
gate = 1.0 + torch.tanh(embedding * norm + self.beta)
return x * gate
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_channels': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_pow_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.sqrt(tmp7)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_mean_mul_pow_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + 1)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp11 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + 2)
tmp13 = tl.broadcast_to(tmp12, [XBLOCK])
tmp17 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr1 + 3)
tmp19 = tl.broadcast_to(tmp18, [XBLOCK])
tmp3 = tmp0 * tmp2
tmp4 = tmp3 * tmp3
tmp8 = tmp5 * tmp7
tmp9 = tmp8 * tmp8
tmp10 = tmp4 + tmp9
tmp14 = tmp11 * tmp13
tmp15 = tmp14 * tmp14
tmp16 = tmp10 + tmp15
tmp20 = tmp17 * tmp19
tmp21 = tmp20 * tmp20
tmp22 = tmp16 + tmp21
tmp23 = 4.0
tmp24 = tmp22 / tmp23
tmp25 = 1e-05
tmp26 = tmp24 + tmp25
tmp27 = libdevice.sqrt(tmp26)
tl.store(out_ptr0 + x0, tmp27, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_tanh_2(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 / tmp4
tmp6 = tmp2 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = libdevice.tanh(tmp8)
tmp10 = 1.0
tmp11 = tmp9 + tmp10
tl.store(out_ptr0 + x2, tmp11, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_tanh_3(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_add_pow_sum_0[grid(16)](buf1, primals_1, 16, 16,
XBLOCK=1, num_warps=2, num_stages=1)
buf2 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
triton_poi_fused_add_mean_mul_pow_1[grid(4)](buf1, primals_2, buf2,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
triton_poi_fused_add_div_mean_mul_pow_tanh_2[grid(16)](buf1,
primals_2, primals_3, buf2, primals_4, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf2
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_pow_tanh_3[grid(256)](primals_1,
buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf3
return buf4, primals_1, primals_2, primals_3, primals_4, buf1
class GCTNew(nn.Module):
def __init__(self, num_channels, epsilon=1e-05, mode='l2', after_relu=False
):
super(GCTNew, self).__init__()
self.alpha = nn.Parameter(torch.ones(1, num_channels, 1, 1))
self.gamma = nn.Parameter(torch.zeros(1, num_channels, 1, 1))
self.beta = nn.Parameter(torch.zeros(1, num_channels, 1, 1))
self.epsilon = epsilon
self.mode = mode
self.after_relu = after_relu
def forward(self, input_0):
primals_2 = self.alpha
primals_3 = self.gamma
primals_4 = self.beta
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
yoxu515/CFBI
|
GCT
| false
| 16,775
|
[
"BSD-3-Clause"
] | 312
|
0bab1e3c9fc3e3ba0629f716d60221e8f8d9d586
|
https://github.com/yoxu515/CFBI/tree/0bab1e3c9fc3e3ba0629f716d60221e8f8d9d586
|
MyEntLoss
|
import torch
import torch.nn as nn
class MyEntLoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
x = torch.nn.Softmax(dim=1)(x)
p = x / torch.repeat_interleave(x.sum(dim=1).unsqueeze(-1), repeats
=20, dim=1)
logp = torch.log2(p)
ent = -torch.mul(p, logp)
entloss = torch.sum(ent, dim=1)
return entloss
def get_inputs():
return [torch.rand([4, 80, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_red_fused__softmax_sum_0(in_ptr0, out_ptr0, out_ptr1, out_ptr2,
xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 64
rnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 16
x1 = xindex // 16
_tmp2 = tl.full([XBLOCK, RBLOCK], float('-inf'), tl.float32)
x3 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 1280 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = triton_helpers.maximum(_tmp2, tmp1)
_tmp2 = tl.where(rmask & xmask, tmp3, _tmp2)
tmp2 = triton_helpers.max2(_tmp2, 1)[:, None]
tl.store(out_ptr0 + x3, tmp2, xmask)
_tmp8 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp4 = tl.load(in_ptr0 + (x0 + 16 * r2 + 1280 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp5 = tmp4 - tmp2
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = _tmp8 + tmp7
_tmp8 = tl.where(rmask & xmask, tmp9, _tmp8)
tmp8 = tl.sum(_tmp8, 1)[:, None]
tl.store(out_ptr1 + x3, tmp8, xmask)
_tmp15 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp10 = tl.load(in_ptr0 + (x0 + 16 * r2 + 1280 * x1), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tl_math.exp(tmp11)
tmp13 = tmp12 / tmp8
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = _tmp15 + tmp14
_tmp15 = tl.where(rmask & xmask, tmp16, _tmp15)
tmp15 = tl.sum(_tmp15, 1)[:, None]
tl.store(out_ptr2 + x3, tmp15, xmask)
@triton.jit
def triton_red_fused__softmax_div_log2_mul_neg_sum_1(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.
constexpr):
xnumel = 64
rnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x2 = xindex // 16
x4 = xindex % 16
x5 = xindex
tmp1 = tl.load(in_out_ptr0 + x5, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
x1 = xindex // 4 % 4
_tmp12 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r3 = rindex
tmp0 = tl.load(in_ptr0 + (x4 + 16 * r3 + 1280 * x2), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp6 = tl.load(in_ptr2 + (x1 + 4 * (r3 // 20) + 16 * x2), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.exp(tmp2)
tmp5 = tmp3 / tmp4
tmp7 = tmp5 / tmp6
tmp8 = libdevice.log2(tmp7)
tmp9 = tmp7 * tmp8
tmp10 = -tmp9
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = _tmp12 + tmp11
_tmp12 = tl.where(rmask & xmask, tmp13, _tmp12)
tmp12 = tl.sum(_tmp12, 1)[:, None]
tl.store(in_out_ptr0 + x5, tmp12, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 80, 4, 4), (1280, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_red_fused__softmax_sum_0[grid(64)](arg0_1, buf0, buf1, buf2,
64, 80, XBLOCK=64, RBLOCK=8, num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0)
del buf0
triton_red_fused__softmax_div_log2_mul_neg_sum_1[grid(64)](buf3,
arg0_1, buf1, buf2, 64, 80, XBLOCK=64, RBLOCK=8, num_warps=4,
num_stages=1)
del arg0_1
del buf1
del buf2
return buf3,
class MyEntLossNew(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
yuantn/MI-AOD
|
MyEntLoss
| false
| 16,776
|
[
"Apache-2.0"
] | 188
|
e57114d60f9ce5e43839cdf7068a90ee58092ec8
|
https://github.com/yuantn/MI-AOD/tree/e57114d60f9ce5e43839cdf7068a90ee58092ec8
|
ActorNetwork
|
import torch
import torch.nn.functional as func
class ActorNetwork(torch.nn.Module):
def __init__(self, s_space, a_space):
super(ActorNetwork, self).__init__()
self.first_dense = torch.nn.Linear(s_space, 50)
self.second_dense = torch.nn.Linear(50, a_space)
def forward(self, s):
phi_s = func.relu(self.first_dense(s))
prb_a = func.sigmoid(self.second_dense(phi_s))
return prb_a
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'s_space': 4, 'a_space': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 3200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 50
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (50, 4), (4, 1))
assert_size_stride(primals_2, (50,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 50), (50, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 50), (50, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 50), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 50), (800, 200, 50, 1), 0)
del buf0
buf4 = empty_strided_cuda((4, 4, 4, 50), (800, 200, 50, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(3200)](buf1,
primals_2, buf4, 3200, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 50), (50, 1), 0),
reinterpret_tensor(primals_4, (50, 4), (1, 50), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused_sigmoid_1[grid(256)](buf3, primals_5, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_5
return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 50), (50, 1), 0
), buf3, primals_4, buf4
class ActorNetworkNew(torch.nn.Module):
def __init__(self, s_space, a_space):
super(ActorNetworkNew, self).__init__()
self.first_dense = torch.nn.Linear(s_space, 50)
self.second_dense = torch.nn.Linear(50, a_space)
def forward(self, input_0):
primals_1 = self.first_dense.weight
primals_2 = self.first_dense.bias
primals_4 = self.second_dense.weight
primals_5 = self.second_dense.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
yutiansut/Personae
|
ActorNetwork
| false
| 16,777
|
[
"MIT"
] | 1,046
|
e5e89cbaaf2c4708952d25fdb25e99837aecdb4e
|
https://github.com/yutiansut/Personae/tree/e5e89cbaaf2c4708952d25fdb25e99837aecdb4e
|
CriticNetwork
|
import torch
import torch.nn.functional as func
class CriticNetwork(torch.nn.Module):
def __init__(self, s_space, a_space):
super(CriticNetwork, self).__init__()
self.s_dense = torch.nn.Linear(s_space, 50)
self.a_dense = torch.nn.Linear(a_space, 50)
self.q_dense = torch.nn.Linear(50, 1)
def forward(self, s, a):
phi_s = self.s_dense(s)
phi_a = self.a_dense(a)
pre_q = func.relu(phi_s + phi_a)
q_value = self.q_dense(pre_q)
return q_value
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'s_space': 4, 'a_space': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 3200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 50
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp7 = tl.full([1], 0, tl.int32)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp9 = 0.0
tmp10 = tmp8 <= tmp9
tl.store(in_out_ptr0 + x2, tmp8, xmask)
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (50, 4), (4, 1))
assert_size_stride(primals_2, (50,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (50, 4), (4, 1))
assert_size_stride(primals_5, (50,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (1, 50), (50, 1))
assert_size_stride(primals_8, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 50), (50, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 50), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 50), (50, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 50), (1, 4), 0), out=buf1)
del primals_4
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 50), (800, 200, 50, 1), 0)
del buf0
buf5 = empty_strided_cuda((4, 4, 4, 50), (800, 200, 50, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_add_relu_threshold_backward_0[grid(3200)](buf2,
primals_2, buf1, primals_5, buf5, 3200, XBLOCK=128, num_warps=4,
num_stages=1)
del buf1
del primals_2
del primals_5
buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_8, reinterpret_tensor(buf2, (64, 50),
(50, 1), 0), reinterpret_tensor(primals_7, (50, 1), (1, 50), 0),
alpha=1, beta=1, out=buf4)
del primals_8
return reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0
), reinterpret_tensor(buf2, (64, 50), (50, 1), 0), primals_7, buf5
class CriticNetworkNew(torch.nn.Module):
def __init__(self, s_space, a_space):
super(CriticNetworkNew, self).__init__()
self.s_dense = torch.nn.Linear(s_space, 50)
self.a_dense = torch.nn.Linear(a_space, 50)
self.q_dense = torch.nn.Linear(50, 1)
def forward(self, input_0, input_1):
primals_1 = self.s_dense.weight
primals_2 = self.s_dense.bias
primals_4 = self.a_dense.weight
primals_5 = self.a_dense.bias
primals_7 = self.q_dense.weight
primals_8 = self.q_dense.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
|
yutiansut/Personae
|
CriticNetwork
| false
| 16,778
|
[
"MIT"
] | 1,046
|
e5e89cbaaf2c4708952d25fdb25e99837aecdb4e
|
https://github.com/yutiansut/Personae/tree/e5e89cbaaf2c4708952d25fdb25e99837aecdb4e
|
GELU
|
import torch
import numpy as np
import torch.nn as nn
class GELU(nn.Module):
def forward(self, x):
cdf = 0.5 * (1.0 + torch.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 *
torch.pow(x, 3))))
return x * cdf
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_pow_sqrt_tanh_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0 * tmp0
tmp2 = tmp1 * tmp0
tmp3 = 0.044715
tmp4 = tmp2 * tmp3
tmp5 = tmp0 + tmp4
tmp6 = 0.7978845608028654
tmp7 = tmp6 * tmp5
tmp8 = libdevice.tanh(tmp7)
tmp9 = 1.0
tmp10 = tmp8 + tmp9
tmp11 = 0.5
tmp12 = tmp10 * tmp11
tmp13 = tmp0 * tmp12
tl.store(out_ptr0 + x0, tmp13, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_pow_sqrt_tanh_0[grid(256)](arg0_1, buf0,
256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class GELUNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
yyht/Funnel_Transformer
|
GELU
| false
| 16,779
|
[
"MIT"
] | 193
|
4b35a794d5e122a8054471863a52d4eac1c39dcd
|
https://github.com/yyht/Funnel_Transformer/tree/4b35a794d5e122a8054471863a52d4eac1c39dcd
|
DynamicPreHead
|
import torch
import torch.nn as nn
class DynamicPreHead(nn.Module):
def __init__(self, in_dim=3, embed_dim=100, kernel_size=1):
super(DynamicPreHead, self).__init__()
self.conv = nn.Conv2d(in_dim, embed_dim, kernel_size=kernel_size,
stride=1, padding=int((kernel_size - 1) / 2))
self.bn = nn.GroupNorm(int(embed_dim / 4), embed_dim)
self.relu = nn.ReLU(True)
nn.init.kaiming_normal_(self.conv.weight, mode='fan_out',
nonlinearity='relu')
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 100
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, None)
@triton.jit
def triton_per_fused_native_group_norm_2(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 12800
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r3 = rindex
x0 = xindex % 50
x1 = xindex // 50 % 64
x2 = xindex // 3200
x4 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 100 * ((r3 + 128 * x1) % 4096) +
409600 * x2 + (r3 + 128 * x1) // 4096), xmask, eviction_policy=
'evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 128, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tl.store(out_ptr0 + x4, tmp10, xmask)
tl.store(out_ptr1 + x4, tmp16, xmask)
tl.store(out_ptr2 + x4, tmp9, xmask)
@triton.jit
def triton_per_fused_native_group_norm_3(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 200
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 50
x1 = xindex // 50
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 50 * r2 + 3200 * x1), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (x0 + 50 * r2 + 3200 * x1), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (x0 + 50 * r2 + 3200 * x1), xmask, other=0.0)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp3, 0)
tmp8 = tl.where(xmask, tmp4, 0)
tmp9 = tl.where(xmask, tmp5, 0)
tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1)
tmp13 = tmp10[:, None]
tmp14 = tmp11[:, None]
tmp15 = tmp12[:, None]
tl.store(out_ptr0 + x3, tmp13, xmask)
tl.store(out_ptr1 + x3, tmp14, xmask)
tl.store(out_ptr2 + x3, tmp15, xmask)
@triton.jit
def triton_per_fused_native_group_norm_4(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 100
RBLOCK: tl.constexpr = 2
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 2 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 2 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (r1 + 2 * x0), xmask, other=0.0)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp3, 0)
tmp8 = tl.where(xmask, tmp4, 0)
tmp9 = tl.where(xmask, tmp5, 0)
tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1)
tmp13 = tmp10[:, None]
tmp14 = tmp11[:, None]
tmp12[:, None]
tmp16 = 16384.0
tmp17 = tmp14 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tl.store(out_ptr2 + x0, tmp20, xmask)
tl.store(out_ptr0 + x0, tmp13, xmask)
tl.store(out_ptr1 + x0, tmp14, xmask)
@triton.jit
def triton_poi_fused_native_group_norm_relu_threshold_backward_5(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 400
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y0 = yindex % 100
y1 = yindex // 100
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 100 * x2 + 409600 * y1), ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y3 // 4, ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + y3 // 4, ymask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + y0, ymask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 16384.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1, 1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp16 = 0.0
tmp17 = tmp15 <= tmp16
tl.store(out_ptr0 + (x2 + 4096 * y3), tmp15, ymask)
tl.store(out_ptr1 + (y0 + 100 * x2 + 409600 * y1), tmp17, ymask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (100, 3, 1, 1), (3, 1, 1, 1))
assert_size_stride(primals_2, (100,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (100,), (1,))
assert_size_stride(primals_5, (100,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch
.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(12, 4096)](primals_3, buf0, 12, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 100, 64, 64), (409600, 1, 6400, 100))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(1638400)](buf2, primals_2,
1638400, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf3 = empty_strided_cuda((4, 25, 1, 1, 2, 64), (3200, 2, 12800,
12800, 1, 50), torch.float32)
buf4 = empty_strided_cuda((4, 25, 1, 1, 2, 64), (3200, 2, 12800,
12800, 1, 50), torch.float32)
buf5 = empty_strided_cuda((4, 25, 1, 1, 2, 64), (3200, 2, 12800,
12800, 1, 50), torch.float32)
triton_per_fused_native_group_norm_2[grid(12800)](buf2, buf3, buf4,
buf5, 12800, 128, XBLOCK=8, num_warps=8, num_stages=1)
buf6 = empty_strided_cuda((4, 25, 1, 1, 2), (50, 2, 200, 200, 1),
torch.float32)
buf7 = empty_strided_cuda((4, 25, 1, 1, 2), (50, 2, 200, 200, 1),
torch.float32)
buf8 = empty_strided_cuda((4, 25, 1, 1, 2), (50, 2, 200, 200, 1),
torch.float32)
triton_per_fused_native_group_norm_3[grid(200)](buf3, buf4, buf5,
buf6, buf7, buf8, 200, 64, XBLOCK=8, num_warps=4, num_stages=1)
del buf3
del buf4
del buf5
buf9 = empty_strided_cuda((4, 25, 1, 1), (25, 1, 100, 100), torch.
float32)
buf10 = empty_strided_cuda((4, 25, 1, 1), (25, 1, 100, 100), torch.
float32)
buf12 = empty_strided_cuda((4, 25, 1, 1), (25, 1, 100, 100), torch.
float32)
triton_per_fused_native_group_norm_4[grid(100)](buf6, buf7, buf8,
buf9, buf10, buf12, 100, 2, XBLOCK=128, num_warps=2, num_stages=1)
del buf6
del buf7
del buf8
buf13 = empty_strided_cuda((4, 100, 64, 64), (409600, 4096, 64, 1),
torch.float32)
buf14 = empty_strided_cuda((4, 100, 64, 64), (409600, 1, 6400, 100),
torch.bool)
triton_poi_fused_native_group_norm_relu_threshold_backward_5[grid(
400, 4096)](buf2, buf9, buf10, primals_4, primals_5, buf13,
buf14, 400, 4096, XBLOCK=64, YBLOCK=64, num_warps=8, num_stages=1)
del buf10
del primals_5
return buf13, primals_1, buf0, primals_4, buf2, reinterpret_tensor(buf9,
(4, 25), (25, 1), 0), reinterpret_tensor(buf12, (4, 25), (25, 1), 0
), buf14
class DynamicPreHeadNew(nn.Module):
def __init__(self, in_dim=3, embed_dim=100, kernel_size=1):
super(DynamicPreHeadNew, self).__init__()
self.conv = nn.Conv2d(in_dim, embed_dim, kernel_size=kernel_size,
stride=1, padding=int((kernel_size - 1) / 2))
self.bn = nn.GroupNorm(int(embed_dim / 4), embed_dim)
self.relu = nn.ReLU(True)
nn.init.kaiming_normal_(self.conv.weight, mode='fan_out',
nonlinearity='relu')
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_4 = self.bn.weight
primals_5 = self.bn.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
yoxu515/CFBI
|
DynamicPreHead
| false
| 16,780
|
[
"BSD-3-Clause"
] | 312
|
0bab1e3c9fc3e3ba0629f716d60221e8f8d9d586
|
https://github.com/yoxu515/CFBI/tree/0bab1e3c9fc3e3ba0629f716d60221e8f8d9d586
|
Dense
|
import torch
import numpy as np
import torch.nn as nn
def get_einsum_string(ndims, einsum_symbols=None):
if einsum_symbols is None:
einsum_symbols = ['u', 'v', 'w', 'x', 'y', 'z']
assert ndims <= len(einsum_symbols)
einsum_prefix = ''
for i in range(ndims):
einsum_prefix += einsum_symbols[i]
return einsum_prefix
def maybe_convert_to_list(x):
if isinstance(x, (int, float)):
return [x]
elif isinstance(x, (list, tuple)):
return list(x)
class Dense(nn.Module):
"""Dense layer."""
def __init__(self, inp_shape, out_shape, bias=True, reverse_order=False):
super(Dense, self).__init__()
self.inp_shape = maybe_convert_to_list(inp_shape)
self.out_shape = maybe_convert_to_list(out_shape)
self.reverse_order = reverse_order
if self.reverse_order:
self.einsum_str = '...{0},{1}{0}->...{1}'.format(get_einsum_string
(len(self.inp_shape), ['a', 'b', 'c', 'd']),
get_einsum_string(len(self.out_shape), ['e', 'f', 'g', 'h']))
weight_shape = self.out_shape + self.inp_shape
else:
self.einsum_str = '...{0},{0}{1}->...{1}'.format(get_einsum_string
(len(self.inp_shape), ['a', 'b', 'c', 'd']),
get_einsum_string(len(self.out_shape), ['e', 'f', 'g', 'h']))
weight_shape = self.inp_shape + self.out_shape
self.weight = nn.Parameter(torch.zeros(weight_shape))
if bias:
self.bias = nn.Parameter(torch.zeros(self.out_shape))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
fan_in = np.prod(self.inp_shape)
fan_out = np.prod(self.out_shape)
std = np.sqrt(1.0 / float(fan_in + fan_out))
nn.init.normal_(self.weight, std=std)
if self.bias is not None:
nn.init.constant_(self.bias, 0.0)
def forward(self, inputs):
output = torch.einsum(self.einsum_str, inputs, self.weight)
if self.bias is not None:
output = output + self.bias
return output
def extra_repr(self):
return 'inp_shape={}, out_shape={}, bias={}'.format(self.inp_shape,
self.out_shape, self.bias is not None)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'inp_shape': 4, 'out_shape': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 64, 4), (256, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_2, (1, 64, 4), (256,
4, 1), 0), reinterpret_tensor(primals_1, (1, 4, 4), (16, 4, 1),
0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](buf1, primals_3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_3
return buf1, reinterpret_tensor(primals_2, (1, 4, 64), (256, 1, 4), 0)
def get_einsum_string(ndims, einsum_symbols=None):
if einsum_symbols is None:
einsum_symbols = ['u', 'v', 'w', 'x', 'y', 'z']
assert ndims <= len(einsum_symbols)
einsum_prefix = ''
for i in range(ndims):
einsum_prefix += einsum_symbols[i]
return einsum_prefix
def maybe_convert_to_list(x):
if isinstance(x, (int, float)):
return [x]
elif isinstance(x, (list, tuple)):
return list(x)
class DenseNew(nn.Module):
"""Dense layer."""
def __init__(self, inp_shape, out_shape, bias=True, reverse_order=False):
super(DenseNew, self).__init__()
self.inp_shape = maybe_convert_to_list(inp_shape)
self.out_shape = maybe_convert_to_list(out_shape)
self.reverse_order = reverse_order
if self.reverse_order:
self.einsum_str = '...{0},{1}{0}->...{1}'.format(get_einsum_string
(len(self.inp_shape), ['a', 'b', 'c', 'd']),
get_einsum_string(len(self.out_shape), ['e', 'f', 'g', 'h']))
weight_shape = self.out_shape + self.inp_shape
else:
self.einsum_str = '...{0},{0}{1}->...{1}'.format(get_einsum_string
(len(self.inp_shape), ['a', 'b', 'c', 'd']),
get_einsum_string(len(self.out_shape), ['e', 'f', 'g', 'h']))
weight_shape = self.inp_shape + self.out_shape
self.weight = nn.Parameter(torch.zeros(weight_shape))
if bias:
self.bias = nn.Parameter(torch.zeros(self.out_shape))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
fan_in = np.prod(self.inp_shape)
fan_out = np.prod(self.out_shape)
std = np.sqrt(1.0 / float(fan_in + fan_out))
nn.init.normal_(self.weight, std=std)
if self.bias is not None:
nn.init.constant_(self.bias, 0.0)
def extra_repr(self):
return 'inp_shape={}, out_shape={}, bias={}'.format(self.inp_shape,
self.out_shape, self.bias is not None)
def forward(self, input_0):
primals_1 = self.weight
primals_3 = self.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
yyht/Funnel_Transformer
|
Dense
| false
| 16,781
|
[
"MIT"
] | 193
|
4b35a794d5e122a8054471863a52d4eac1c39dcd
|
https://github.com/yyht/Funnel_Transformer/tree/4b35a794d5e122a8054471863a52d4eac1c39dcd
|
InteractionLayer
|
import math
import torch
import torchvision.transforms.functional as F
from torch import nn
import torch.nn.functional as F
class InteractionLayer(nn.Module):
def __init__(self, d_model, d_feature, dropout=0.1):
super().__init__()
self.d_feature = d_feature
self.det_tfm = nn.Linear(d_model, d_feature)
self.rel_tfm = nn.Linear(d_model, d_feature)
self.det_value_tfm = nn.Linear(d_model, d_feature)
self.rel_norm = nn.LayerNorm(d_model)
if dropout is not None:
self.dropout = dropout
self.det_dropout = nn.Dropout(dropout)
self.rel_add_dropout = nn.Dropout(dropout)
else:
self.dropout = None
def forward(self, det_in, rel_in):
det_attn_in = self.det_tfm(det_in)
rel_attn_in = self.rel_tfm(rel_in)
det_value = self.det_value_tfm(det_in)
scores = torch.matmul(det_attn_in.transpose(0, 1), rel_attn_in.
permute(1, 2, 0)) / math.sqrt(self.d_feature)
det_weight = F.softmax(scores.transpose(1, 2), dim=-1)
if self.dropout is not None:
det_weight = self.det_dropout(det_weight)
rel_add = torch.matmul(det_weight, det_value.transpose(0, 1))
rel_out = self.rel_add_dropout(rel_add) + rel_in.transpose(0, 1)
rel_out = self.rel_norm(rel_out)
return det_in, rel_out.transpose(0, 1)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'd_feature': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tl_math.exp(tmp14)
tl.store(out_ptr0 + x3, tmp15, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (y0 + 16 * y1), ymask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + y0 + 16 * y1), ymask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + y0 + 16 * y1), ymask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + y0 + 16 * y1), ymask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2 + 4 * y3), tmp8, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4 * x1 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x1 + 16 * x0), xmask, eviction_policy
='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x1 + 16 * x0), xmask, eviction_policy
='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x1 + 16 * x0), xmask,
eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
tl.store(out_ptr1 + x2, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_3(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x4 = xindex // 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2 + 16 * x1), xmask)
tmp3 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x3, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (16,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_8, reinterpret_tensor(primals_3, (16,
4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf2)
del primals_7
del primals_8
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (4, 16, 1),
0), reinterpret_tensor(buf1, (4, 4, 4), (4, 1, 16), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf3, buf4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf5 = buf3
del buf3
triton_poi_fused__softmax_1[grid(16, 4)](buf4, buf5, 16, 4, XBLOCK=
4, YBLOCK=16, num_warps=1, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0)
del buf4
extern_kernels.bmm(buf5, reinterpret_tensor(buf2, (4, 4, 4), (4, 16,
1), 0), out=buf6)
buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_native_layer_norm_2[grid(16)](buf6, primals_6,
buf7, buf8, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_3[grid(64)](buf6, primals_6,
buf7, buf8, primals_9, primals_10, buf9, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf7
del buf8
del primals_10
return reinterpret_tensor(buf9, (4, 4, 4), (4, 16, 1), 0
), primals_6, primals_9, reinterpret_tensor(primals_3, (16, 4), (4,
1), 0), buf5, buf6, reinterpret_tensor(buf2, (4, 4, 4), (4, 1, 16), 0
), reinterpret_tensor(buf0, (4, 4, 4), (4, 1, 16), 0
), reinterpret_tensor(buf1, (4, 4, 4), (4, 16, 1), 0)
class InteractionLayerNew(nn.Module):
def __init__(self, d_model, d_feature, dropout=0.1):
super().__init__()
self.d_feature = d_feature
self.det_tfm = nn.Linear(d_model, d_feature)
self.rel_tfm = nn.Linear(d_model, d_feature)
self.det_value_tfm = nn.Linear(d_model, d_feature)
self.rel_norm = nn.LayerNorm(d_model)
if dropout is not None:
self.dropout = dropout
self.det_dropout = nn.Dropout(dropout)
self.rel_add_dropout = nn.Dropout(dropout)
else:
self.dropout = None
def forward(self, input_0, input_1):
primals_1 = self.det_tfm.weight
primals_2 = self.det_tfm.bias
primals_4 = self.rel_tfm.weight
primals_5 = self.rel_tfm.bias
primals_7 = self.det_value_tfm.weight
primals_8 = self.det_value_tfm.bias
primals_9 = self.rel_norm.weight
primals_10 = self.rel_norm.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0], output[1]
|
yoyomimi/AS-Net
|
InteractionLayer
| false
| 16,782
|
[
"MIT"
] | 49
|
85ce753707c6d1838c3983111ccbba4b1861f438
|
https://github.com/yoyomimi/AS-Net/tree/85ce753707c6d1838c3983111ccbba4b1861f438
|
Biaffine
|
import torch
import torch.nn as nn
class Biaffine(nn.Module):
"""
Biaffine layer for first-order scoring :cite:`dozat-etal-2017-biaffine`.
This function has a tensor of weights :math:`W` and bias terms if needed.
The score :math:`s(x, y)` of the vector pair :math:`(x, y)` is computed as :math:`x^T W y / d^s`,
where `d` and `s` are vector dimension and scaling factor respectively.
:math:`x` and :math:`y` can be concatenated with bias terms.
Args:
n_in (int):
The size of the input feature.
n_out (int):
The number of output channels.
scale (float):
Factor to scale the scores. Default: 0.
bias_x (bool):
If ``True``, adds a bias term for tensor :math:`x`. Default: ``True``.
bias_y (bool):
If ``True``, adds a bias term for tensor :math:`y`. Default: ``True``.
"""
def __init__(self, n_in, n_out=1, scale=0, bias_x=True, bias_y=True):
super().__init__()
self.n_in = n_in
self.n_out = n_out
self.scale = scale
self.bias_x = bias_x
self.bias_y = bias_y
self.weight = nn.Parameter(torch.Tensor(n_out, n_in + bias_x, n_in +
bias_y))
self.reset_parameters()
def __repr__(self):
s = f'n_in={self.n_in}'
if self.n_out > 1:
s += f', n_out={self.n_out}'
if self.scale != 0:
s += f', scale={self.scale}'
if self.bias_x:
s += f', bias_x={self.bias_x}'
if self.bias_y:
s += f', bias_y={self.bias_y}'
return f'{self.__class__.__name__}({s})'
def reset_parameters(self):
nn.init.zeros_(self.weight)
def forward(self, x, y):
"""
Args:
x (torch.Tensor): ``[batch_size, seq_len, n_in]``.
y (torch.Tensor): ``[batch_size, seq_len, n_in]``.
Returns:
~torch.Tensor:
A scoring tensor of shape ``[batch_size, n_out, seq_len, seq_len]``.
If ``n_out=1``, the dimension for ``n_out`` will be squeezed automatically.
"""
if self.bias_x:
x = torch.cat((x, torch.ones_like(x[..., :1])), -1)
if self.bias_y:
y = torch.cat((y, torch.ones_like(y[..., :1])), -1)
s = torch.einsum('bxi,oij,byj->boxy', x, self.weight, y
) / self.n_in ** self.scale
s = s.squeeze(1)
return s
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'n_in': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = xindex // 5
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 5, tl.int64)
tmp9 = 1.0
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp6, tmp9, tmp10)
tmp12 = tl.where(tmp4, tmp5, tmp11)
tl.store(out_ptr0 + x2, tmp12, xmask)
@triton.jit
def triton_poi_fused_div_squeeze_1(in_ptr0, out_ptr1, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr1 + (y0 + 4 * x2 + 16 * y1), tmp2, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (1, 5, 5), (25, 5, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(80)](primals_1, buf0, 80, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((1, 16, 5), (80, 5, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (1, 16, 5), (0, 5, 1),
0), primals_3, out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32)
triton_poi_fused_cat_0[grid(80)](primals_2, buf2, 80, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_2
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf2, reinterpret_tensor(buf1, (4, 5, 4), (20, 1,
5), 0), out=buf3)
del buf1
buf5 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
triton_poi_fused_div_squeeze_1[grid(16, 4)](buf3, buf5, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
del buf3
return buf5, reinterpret_tensor(buf2, (4, 5, 4), (20, 1, 5), 0
), reinterpret_tensor(buf0, (1, 5, 16), (80, 1, 5), 0)
class BiaffineNew(nn.Module):
"""
Biaffine layer for first-order scoring :cite:`dozat-etal-2017-biaffine`.
This function has a tensor of weights :math:`W` and bias terms if needed.
The score :math:`s(x, y)` of the vector pair :math:`(x, y)` is computed as :math:`x^T W y / d^s`,
where `d` and `s` are vector dimension and scaling factor respectively.
:math:`x` and :math:`y` can be concatenated with bias terms.
Args:
n_in (int):
The size of the input feature.
n_out (int):
The number of output channels.
scale (float):
Factor to scale the scores. Default: 0.
bias_x (bool):
If ``True``, adds a bias term for tensor :math:`x`. Default: ``True``.
bias_y (bool):
If ``True``, adds a bias term for tensor :math:`y`. Default: ``True``.
"""
def __init__(self, n_in, n_out=1, scale=0, bias_x=True, bias_y=True):
super().__init__()
self.n_in = n_in
self.n_out = n_out
self.scale = scale
self.bias_x = bias_x
self.bias_y = bias_y
self.weight = nn.Parameter(torch.Tensor(n_out, n_in + bias_x, n_in +
bias_y))
self.reset_parameters()
def __repr__(self):
s = f'n_in={self.n_in}'
if self.n_out > 1:
s += f', n_out={self.n_out}'
if self.scale != 0:
s += f', scale={self.scale}'
if self.bias_x:
s += f', bias_x={self.bias_x}'
if self.bias_y:
s += f', bias_y={self.bias_y}'
return f'{self.__class__.__name__}({s})'
def reset_parameters(self):
nn.init.zeros_(self.weight)
def forward(self, input_0, input_1):
primals_3 = self.weight
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
|
yzhangcs/parser
|
Biaffine
| false
| 16,783
|
[
"MIT"
] | 439
|
3abebde1c9fe0bf2e99adce845aaf2a04b194f8a
|
https://github.com/yzhangcs/parser/tree/3abebde1c9fe0bf2e99adce845aaf2a04b194f8a
|
SAN
|
import torch
import torch.nn as nn
class SAN(nn.Module):
def __init__(self, d_model, nhead, dropout=0.1):
super(SAN, self).__init__()
self.d_model = d_model
self.nhead = nhead
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.dropout = nn.Dropout(p=dropout)
self.norm = nn.LayerNorm(d_model)
def forward(self, src, src_mask=None, src_key_padding_mask=None):
"""
:param src:
:param src_mask:
:param src_key_padding_mask:
:return:
"""
src2, _ = self.self_attn(src, src, src, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)
src = src + self.dropout(src2)
src = self.norm(src)
return src
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'nhead': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (12,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4),
(1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_3, (4,), (1,), 4),
primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 16),
alpha=1, beta=1, out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_3, (4,), (1,), 8),
primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 32),
alpha=1, beta=1, out=buf2)
del primals_2
buf3 = reinterpret_tensor(buf0, (4, 4, 1), (1, 4, 16), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](buf3, primals_3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_3
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf3, reinterpret_tensor(buf1, (4, 1, 4), (1, 1,
4), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(64)](buf4, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf6 = buf4
del buf4
triton_poi_fused__softmax_2[grid(64)](buf5, buf6, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf5
buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (4, 4, 1), (1, 4,
1), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
triton_poi_fused_clone_3[grid(4, 4)](buf7, buf8, 4, 4, XBLOCK=4,
YBLOCK=4, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf7, (4, 4), (4, 1), 0)
del buf7
extern_kernels.addmm(primals_5, reinterpret_tensor(buf8, (4, 4), (4,
1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha
=1, beta=1, out=buf9)
del primals_5
buf10 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf11 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused_add_native_layer_norm_4[grid(4)](primals_1, buf9,
buf10, buf11, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_5[grid(16)](primals_1, buf9,
buf10, buf11, primals_6, primals_7, buf12, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf10
del buf11
del primals_7
return buf12, primals_1, primals_6, buf6, reinterpret_tensor(buf8, (4,
4), (4, 1), 0), buf9, primals_4, reinterpret_tensor(buf2, (4, 1, 4),
(1, 1, 4), 0), reinterpret_tensor(buf3, (4, 1, 4), (1, 1, 4), 0
), reinterpret_tensor(buf1, (4, 4, 1), (1, 4, 1), 0)
class SANNew(nn.Module):
def __init__(self, d_model, nhead, dropout=0.1):
super(SANNew, self).__init__()
self.d_model = d_model
self.nhead = nhead
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.dropout = nn.Dropout(p=dropout)
self.norm = nn.LayerNorm(d_model)
def forward(self, input_0):
primals_2 = self.self_attn.in_proj_weight
primals_3 = self.self_attn.in_proj_bias
primals_1 = self.self_attn.out_proj.weight
primals_5 = self.self_attn.out_proj.bias
primals_6 = self.norm.weight
primals_7 = self.norm.bias
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
yuriy-os/russian-reviews-bert-e2e-absa
|
SAN
| false
| 16,784
|
[
"Apache-2.0"
] | 293
|
369a6179353e3bf28643e8e9347b624078e38bf4
|
https://github.com/yuriy-os/russian-reviews-bert-e2e-absa/tree/369a6179353e3bf28643e8e9347b624078e38bf4
|
Triaffine
|
import torch
import torch.nn as nn
class Triaffine(nn.Module):
"""
Triaffine layer for second-order scoring :cite:`zhang-etal-2020-efficient,wang-etal-2019-second`.
This function has a tensor of weights :math:`W` and bias terms if needed.
The score :math:`s(x, y, z)` of the vector triple :math:`(x, y, z)` is computed as :math:`x^T z^T W y / d^s`,
where `d` and `s` are vector dimension and scaling factor respectively.
:math:`x` and :math:`y` can be concatenated with bias terms.
Args:
n_in (int):
The size of the input feature.
n_out (int):
The number of output channels.
scale (float):
Factor to scale the scores. Default: 0.
bias_x (bool):
If ``True``, adds a bias term for tensor :math:`x`. Default: ``False``.
bias_y (bool):
If ``True``, adds a bias term for tensor :math:`y`. Default: ``False``.
"""
def __init__(self, n_in, n_out=1, scale=0, bias_x=False, bias_y=False):
super().__init__()
self.n_in = n_in
self.n_out = n_out
self.scale = scale
self.bias_x = bias_x
self.bias_y = bias_y
self.weight = nn.Parameter(torch.Tensor(n_out, n_in + bias_x, n_in,
n_in + bias_y))
self.reset_parameters()
def __repr__(self):
s = f'n_in={self.n_in}'
if self.n_out > 1:
s += f', n_out={self.n_out}'
if self.scale != 0:
s += f', scale={self.scale}'
if self.bias_x:
s += f', bias_x={self.bias_x}'
if self.bias_y:
s += f', bias_y={self.bias_y}'
return f'{self.__class__.__name__}({s})'
def reset_parameters(self):
nn.init.zeros_(self.weight)
def forward(self, x, y, z):
"""
Args:
x (torch.Tensor): ``[batch_size, seq_len, n_in]``.
y (torch.Tensor): ``[batch_size, seq_len, n_in]``.
z (torch.Tensor): ``[batch_size, seq_len, n_in]``.
Returns:
~torch.Tensor:
A scoring tensor of shape ``[batch_size, n_out, seq_len, seq_len, seq_len]``.
If ``n_out=1``, the dimension for ``n_out`` will be squeezed automatically.
"""
if self.bias_x:
x = torch.cat((x, torch.ones_like(x[..., :1])), -1)
if self.bias_y:
y = torch.cat((y, torch.ones_like(y[..., :1])), -1)
w = torch.einsum('bzk,oikj->bozij', z, self.weight)
s = torch.einsum('bxi,bozij,byj->bozxy', x, w, y
) / self.n_in ** self.scale
s = s.squeeze(1)
return s
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'n_in': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask)
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex % 4
x3 = xindex // 4
y0 = yindex % 4
y1 = yindex // 4
x5 = xindex
y4 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x3 + 16 * x2 + 64 * y1), xmask & ymask)
tl.store(out_ptr0 + (x5 + 16 * y4), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_div_squeeze_3(in_ptr0, out_ptr1, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr1 + (y0 + 16 * x2 + 64 * y1), tmp2, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4, 1, 1), (16, 16, 4, 1, 1, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((1, 16, 16), (256, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_2, (1, 16, 4), (64, 4,
1), 0), reinterpret_tensor(buf0, (1, 4, 16), (0, 16, 1), 0),
out=buf1)
del buf0
buf2 = empty_strided_cuda((4, 4, 1, 4, 1, 4, 1), (64, 16, 16, 4, 4,
1, 1), torch.float32)
triton_poi_fused_clone_1[grid(256)](buf1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (4, 4, 16), (64, 16, 1), 0)
del buf1
extern_kernels.bmm(primals_4, reinterpret_tensor(buf2, (4, 4, 16),
(64, 16, 1), 0), out=buf3)
buf4 = reinterpret_tensor(buf2, (4, 4, 1, 4, 4, 1, 1), (64, 16, 16,
4, 1, 1, 1), 0)
del buf2
triton_poi_fused_clone_2[grid(16, 16)](buf3, buf4, 16, 16, XBLOCK=
16, YBLOCK=16, num_warps=4, num_stages=1)
buf5 = buf3
del buf3
extern_kernels.bmm(primals_3, reinterpret_tensor(buf4, (4, 4, 16),
(64, 16, 1), 0), out=buf5)
buf7 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 4, 1, 16), 0)
del buf4
triton_poi_fused_div_squeeze_3[grid(64, 4)](buf5, buf7, 64, 4,
XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1)
del buf5
return buf7, reinterpret_tensor(primals_3, (4, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(primals_4, (4, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(primals_2, (1, 4, 16), (64, 1, 4), 0)
class TriaffineNew(nn.Module):
"""
Triaffine layer for second-order scoring :cite:`zhang-etal-2020-efficient,wang-etal-2019-second`.
This function has a tensor of weights :math:`W` and bias terms if needed.
The score :math:`s(x, y, z)` of the vector triple :math:`(x, y, z)` is computed as :math:`x^T z^T W y / d^s`,
where `d` and `s` are vector dimension and scaling factor respectively.
:math:`x` and :math:`y` can be concatenated with bias terms.
Args:
n_in (int):
The size of the input feature.
n_out (int):
The number of output channels.
scale (float):
Factor to scale the scores. Default: 0.
bias_x (bool):
If ``True``, adds a bias term for tensor :math:`x`. Default: ``False``.
bias_y (bool):
If ``True``, adds a bias term for tensor :math:`y`. Default: ``False``.
"""
def __init__(self, n_in, n_out=1, scale=0, bias_x=False, bias_y=False):
super().__init__()
self.n_in = n_in
self.n_out = n_out
self.scale = scale
self.bias_x = bias_x
self.bias_y = bias_y
self.weight = nn.Parameter(torch.Tensor(n_out, n_in + bias_x, n_in,
n_in + bias_y))
self.reset_parameters()
def __repr__(self):
s = f'n_in={self.n_in}'
if self.n_out > 1:
s += f', n_out={self.n_out}'
if self.scale != 0:
s += f', scale={self.scale}'
if self.bias_x:
s += f', bias_x={self.bias_x}'
if self.bias_y:
s += f', bias_y={self.bias_y}'
return f'{self.__class__.__name__}({s})'
def reset_parameters(self):
nn.init.zeros_(self.weight)
def forward(self, input_0, input_1, input_2):
primals_1 = self.weight
primals_2 = input_0
primals_3 = input_1
primals_4 = input_2
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
yzhangcs/parser
|
Triaffine
| false
| 16,785
|
[
"MIT"
] | 439
|
3abebde1c9fe0bf2e99adce845aaf2a04b194f8a
|
https://github.com/yzhangcs/parser/tree/3abebde1c9fe0bf2e99adce845aaf2a04b194f8a
|
GlobalMaxPooling
|
import torch
import torch.nn as nn
class GlobalMaxPooling(nn.Module):
def __init__(self):
super(GlobalMaxPooling, self).__init__()
def forward(self, x):
res, _ = torch.max(x, dim=1)
return res
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
class GlobalMaxPoolingNew(nn.Module):
def __init__(self):
super(GlobalMaxPoolingNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
zake7749/Sequence-to-Sequence-101
|
GlobalMaxPooling
| false
| 16,786
|
[
"MIT"
] | 64
|
f9e9a8e836dc1cb3b35d6e148f6378fcd2736951
|
https://github.com/zake7749/Sequence-to-Sequence-101/tree/f9e9a8e836dc1cb3b35d6e148f6378fcd2736951
|
PositionalEmbedding
|
import torch
import torch.nn as nn
class PositionalEmbedding(nn.Module):
def __init__(self, n_model, max_len=1024):
super().__init__()
self.embed = nn.Embedding(max_len, n_model)
self.reset_parameters()
@torch.no_grad()
def reset_parameters(self):
w = self.embed.weight
max_len, n_model = w.shape
w = w.new_tensor(range(max_len)).unsqueeze(-1) / 10000 ** (w.
new_tensor(range(n_model)) // 2 * 2 / n_model)
w[:, 0::2], w[:, 1::2] = w[:, 0::2].sin(), w[:, 1::2].cos()
self.embed.weight.copy_(w)
def forward(self, x):
return self.embed(x.new_tensor(range(x.shape[1])).long())
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_model': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy_lift_fresh_0(out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tl.full([1], 2, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 0.0
tmp6 = 1.0
tmp7 = tl.where(tmp4, tmp5, tmp6)
tmp8 = tl.full([1], 3, tl.int64)
tmp9 = tmp0 < tmp8
tmp10 = 2.0
tmp11 = 3.0
tmp12 = tl.where(tmp9, tmp10, tmp11)
tmp13 = tl.where(tmp2, tmp7, tmp12)
tmp14 = tmp13.to(tl.int32)
tl.store(out_ptr0 + x0, tmp14, xmask)
@triton.jit
def triton_poi_fused_embedding_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = x1
tmp1 = tl.full([1], 2, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 0.0
tmp6 = 1.0
tmp7 = tl.where(tmp4, tmp5, tmp6)
tmp8 = tl.full([1], 3, tl.int64)
tmp9 = tmp0 < tmp8
tmp10 = 2.0
tmp11 = 3.0
tmp12 = tl.where(tmp9, tmp10, tmp11)
tmp13 = tl.where(tmp2, tmp7, tmp12)
tmp14 = tmp13.to(tl.int32)
tmp15 = tl.load(in_ptr0 + (x0 + 4 * tmp14), xmask)
tl.store(out_ptr0 + x2, tmp15, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1024, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.int64)
get_raw_stream(0)
triton_poi_fused__to_copy_lift_fresh_0[grid(4)](buf0, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_embedding_1[grid(16)](primals_2, buf1, 16, XBLOCK=
16, num_warps=1, num_stages=1)
del primals_2
return buf1, buf0
class PositionalEmbeddingNew(nn.Module):
def __init__(self, n_model, max_len=1024):
super().__init__()
self.embed = nn.Embedding(max_len, n_model)
self.reset_parameters()
@torch.no_grad()
def reset_parameters(self):
w = self.embed.weight
max_len, n_model = w.shape
w = w.new_tensor(range(max_len)).unsqueeze(-1) / 10000 ** (w.
new_tensor(range(n_model)) // 2 * 2 / n_model)
w[:, 0::2], w[:, 1::2] = w[:, 0::2].sin(), w[:, 1::2].cos()
self.embed.weight.copy_(w)
def forward(self, input_0):
primals_2 = self.embed.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
yzhangcs/parser
|
PositionalEmbedding
| false
| 16,787
|
[
"MIT"
] | 439
|
3abebde1c9fe0bf2e99adce845aaf2a04b194f8a
|
https://github.com/yzhangcs/parser/tree/3abebde1c9fe0bf2e99adce845aaf2a04b194f8a
|
KernelConv
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class KernelConv(nn.Module):
"""
the class of computing prediction
"""
def __init__(self, kernel_size=[5], sep_conv=False, core_bias=False):
super(KernelConv, self).__init__()
self.kernel_size = sorted(kernel_size)
self.sep_conv = sep_conv
self.core_bias = core_bias
def _sep_conv_core(self, core, batch_size, N, color, height, width):
"""
convert the sep_conv core to conv2d core
2p --> p^2
:param core: shape: batch*(N*2*K)*height*width
:return:
"""
kernel_total = sum(self.kernel_size)
core = core.view(batch_size, N, -1, color, height, width)
if not self.core_bias:
core_1, core_2 = torch.split(core, kernel_total, dim=2)
else:
core_1, core_2, core_3 = torch.split(core, kernel_total, dim=2)
core_out = {}
cur = 0
for K in self.kernel_size:
t1 = core_1[:, :, cur:cur + K, ...].view(batch_size, N, K, 1, 3,
height, width)
t2 = core_2[:, :, cur:cur + K, ...].view(batch_size, N, 1, K, 3,
height, width)
core_out[K] = torch.einsum('ijklno,ijlmno->ijkmno', [t1, t2]).view(
batch_size, N, K * K, color, height, width)
cur += K
return core_out, None if not self.core_bias else core_3.squeeze()
def _convert_dict(self, core, batch_size, N, color, height, width):
"""
make sure the core to be a dict, generally, only one kind of kernel size is suitable for the func.
:param core: shape: batch_size*(N*K*K)*height*width
:return: core_out, a dict
"""
core_out = {}
core = core.view(batch_size, N, -1, color, height, width)
core_out[self.kernel_size[0]] = core[:, :, 0:self.kernel_size[0] **
2, ...]
bias = None if not self.core_bias else core[:, :, -1, ...]
return core_out, bias
def forward(self, frames, core, white_level=1.0, rate=1):
"""
compute the pred image according to core and frames
:param frames: [batch_size, N, 3, height, width]
:param core: [batch_size, N, dict(kernel), 3, height, width]
:return:
"""
if len(frames.size()) == 5:
batch_size, N, color, height, width = frames.size()
else:
batch_size, N, height, width = frames.size()
color = 1
frames = frames.view(batch_size, N, color, height, width)
if self.sep_conv:
core, bias = self._sep_conv_core(core, batch_size, N, color,
height, width)
else:
core, bias = self._convert_dict(core, batch_size, N, color,
height, width)
img_stack = []
pred_img = []
kernel = self.kernel_size[::-1]
for index, K in enumerate(kernel):
if not img_stack:
padding_num = K // 2 * rate
frame_pad = F.pad(frames, [padding_num, padding_num,
padding_num, padding_num])
for i in range(0, K):
for j in range(0, K):
img_stack.append(frame_pad[..., i * rate:i * rate +
height, j * rate:j * rate + width])
img_stack = torch.stack(img_stack, dim=2)
else:
k_diff = (kernel[index - 1] - kernel[index]) // 2
img_stack = img_stack[:, :, k_diff:-k_diff, ...]
pred_img.append(torch.sum(core[K].mul(img_stack), dim=2,
keepdim=False))
pred_img = torch.stack(pred_img, dim=0)
pred_img_i = torch.mean(pred_img, dim=0, keepdim=False)
pred_img_i = pred_img_i.squeeze(2)
if self.core_bias:
if bias is None:
raise ValueError('The bias should not be None.')
pred_img_i += bias
pred_img_i = pred_img_i / white_level
return pred_img_i
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_stack_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x4 = xindex
x2 = xindex // 16
x5 = xindex % 16
tmp0 = -2 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -2 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-10 + x4), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x5 + 400 * x2), tmp11, xmask)
@triton.jit
def triton_poi_fused_stack_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x4 = xindex
x2 = xindex // 16
x5 = xindex % 16
tmp0 = -2 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-9 + x4), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x5 + 400 * x2), tmp11, xmask)
@triton.jit
def triton_poi_fused_stack_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x3 = xindex
x2 = xindex // 16
x4 = xindex % 16
tmp0 = -2 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-8 + x3), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x4 + 400 * x2), tmp11, xmask)
@triton.jit
def triton_poi_fused_stack_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x4 = xindex
x2 = xindex // 16
x5 = xindex % 16
tmp0 = -2 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-7 + x4), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x5 + 400 * x2), tmp11, xmask)
@triton.jit
def triton_poi_fused_stack_4(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x4 = xindex
x2 = xindex // 16
x5 = xindex % 16
tmp0 = -2 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 2 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-6 + x4), tmp10 & xmask, other=0.0)
tmp12 = -1 + x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = -2 + x0
tmp16 = tmp15 >= tmp1
tmp17 = tmp15 < tmp3
tmp18 = tmp13 & tmp14
tmp19 = tmp18 & tmp16
tmp20 = tmp19 & tmp17
tmp21 = tl.load(in_ptr0 + (-6 + x4), tmp20 & xmask, other=0.0)
tl.store(out_ptr0 + (x5 + 400 * x2), tmp11, xmask)
tl.store(out_ptr1 + (x5 + 400 * x2), tmp21, xmask)
@triton.jit
def triton_poi_fused_stack_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x4 = xindex
x2 = xindex // 16
x5 = xindex % 16
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-5 + x4), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x5 + 400 * x2), tmp11, xmask)
@triton.jit
def triton_poi_fused_stack_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x3 = xindex
x2 = xindex // 16
x4 = xindex % 16
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-4 + x3), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x4 + 400 * x2), tmp11, xmask)
@triton.jit
def triton_poi_fused_stack_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x4 = xindex
x2 = xindex // 16
x5 = xindex % 16
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-3 + x4), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x5 + 400 * x2), tmp11, xmask)
@triton.jit
def triton_poi_fused_stack_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x4 = xindex
x2 = xindex // 16
x5 = xindex % 16
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 2 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-2 + x4), tmp10 & xmask, other=0.0)
tmp12 = x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = -2 + x0
tmp16 = tmp15 >= tmp1
tmp17 = tmp15 < tmp3
tmp18 = tmp13 & tmp14
tmp19 = tmp18 & tmp16
tmp20 = tmp19 & tmp17
tmp21 = tl.load(in_ptr0 + (-2 + x4), tmp20 & xmask, other=0.0)
tl.store(out_ptr0 + (x5 + 400 * x2), tmp11, xmask)
tl.store(out_ptr1 + (x5 + 400 * x2), tmp21, xmask)
@triton.jit
def triton_poi_fused_stack_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x4 = xindex
x2 = xindex // 16
x5 = xindex % 16
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-1 + x4), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x5 + 400 * x2), tmp11, xmask)
@triton.jit
def triton_poi_fused_stack_10(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x3 = xindex
x2 = xindex // 16
x4 = xindex % 16
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + x3, tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x4 + 400 * x2), tmp11, xmask)
@triton.jit
def triton_poi_fused_stack_11(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x4 = xindex
x2 = xindex // 16
x5 = xindex % 16
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (1 + x4), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x5 + 400 * x2), tmp11, xmask)
@triton.jit
def triton_poi_fused_stack_12(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x4 = xindex
x2 = xindex // 16
x5 = xindex % 16
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 2 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (2 + x4), tmp10 & xmask, other=0.0)
tmp12 = 1 + x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = -2 + x0
tmp16 = tmp15 >= tmp1
tmp17 = tmp15 < tmp3
tmp18 = tmp13 & tmp14
tmp19 = tmp18 & tmp16
tmp20 = tmp19 & tmp17
tmp21 = tl.load(in_ptr0 + (2 + x4), tmp20 & xmask, other=0.0)
tl.store(out_ptr0 + (x5 + 400 * x2), tmp11, xmask)
tl.store(out_ptr1 + (x5 + 400 * x2), tmp21, xmask)
@triton.jit
def triton_poi_fused_stack_13(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x4 = xindex
x2 = xindex // 16
x5 = xindex % 16
tmp0 = 1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (3 + x4), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x5 + 400 * x2), tmp11, xmask)
@triton.jit
def triton_poi_fused_stack_14(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x3 = xindex
x2 = xindex // 16
x4 = xindex % 16
tmp0 = 1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (4 + x3), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x4 + 400 * x2), tmp11, xmask)
@triton.jit
def triton_poi_fused_stack_15(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x4 = xindex
x2 = xindex // 16
x5 = xindex % 16
tmp0 = 1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (5 + x4), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x5 + 400 * x2), tmp11, xmask)
@triton.jit
def triton_poi_fused_stack_16(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x4 = xindex
x2 = xindex // 16
x5 = xindex % 16
tmp0 = 1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 2 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (6 + x4), tmp10 & xmask, other=0.0)
tmp12 = 2 + x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = -2 + x0
tmp16 = tmp15 >= tmp1
tmp17 = tmp15 < tmp3
tmp18 = tmp13 & tmp14
tmp19 = tmp18 & tmp16
tmp20 = tmp19 & tmp17
tmp21 = tl.load(in_ptr0 + (6 + x4), tmp20 & xmask, other=0.0)
tl.store(out_ptr0 + (x5 + 400 * x2), tmp11, xmask)
tl.store(out_ptr1 + (x5 + 400 * x2), tmp21, xmask)
@triton.jit
def triton_poi_fused_stack_17(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x4 = xindex
x2 = xindex // 16
x5 = xindex % 16
tmp0 = 2 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (7 + x4), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x5 + 400 * x2), tmp11, xmask)
@triton.jit
def triton_poi_fused_stack_18(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x3 = xindex
x2 = xindex // 16
x4 = xindex % 16
tmp0 = 2 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (8 + x3), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x4 + 400 * x2), tmp11, xmask)
@triton.jit
def triton_poi_fused_stack_19(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x4 = xindex
x2 = xindex // 16
x5 = xindex % 16
tmp0 = 2 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (9 + x4), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x5 + 400 * x2), tmp11, xmask)
@triton.jit
def triton_poi_fused_stack_20(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x4 = xindex
x2 = xindex // 16
x5 = xindex % 16
tmp0 = 2 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 2 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (10 + x4), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x5 + 400 * x2), tmp11, xmask)
@triton.jit
def triton_per_fused_div_mul_sum_21(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 256
rnumel = 25
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
x3 = xindex
r2 = rindex
x0 = xindex % 16
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0 + 16 * r2 + 400 * x1), rmask & xmask,
other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(rmask & xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = 1.0
tmp8 = tmp6 / tmp7
tmp9 = tmp8 * tmp7
tl.debug_barrier()
tl.store(in_out_ptr0 + x3, tmp9, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf25 = empty_strided_cuda((4, 4, 25, 4, 4), (1600, 400, 16, 4, 1),
torch.float32)
buf0 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16, 4,
1), 0)
get_raw_stream(0)
triton_poi_fused_stack_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf1 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16, 4,
1), 16)
triton_poi_fused_stack_1[grid(256)](arg0_1, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf2 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16, 4,
1), 32)
triton_poi_fused_stack_2[grid(256)](arg0_1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16, 4,
1), 48)
triton_poi_fused_stack_3[grid(256)](arg0_1, buf3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16, 4,
1), 64)
buf5 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16, 4,
1), 80)
triton_poi_fused_stack_4[grid(256)](arg0_1, buf4, buf5, 256, XBLOCK
=128, num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16, 4,
1), 96)
triton_poi_fused_stack_5[grid(256)](arg0_1, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16, 4,
1), 112)
triton_poi_fused_stack_6[grid(256)](arg0_1, buf7, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf8 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16, 4,
1), 128)
triton_poi_fused_stack_7[grid(256)](arg0_1, buf8, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf9 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16, 4,
1), 144)
buf10 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16,
4, 1), 160)
triton_poi_fused_stack_8[grid(256)](arg0_1, buf9, buf10, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf11 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16,
4, 1), 176)
triton_poi_fused_stack_9[grid(256)](arg0_1, buf11, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf12 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16,
4, 1), 192)
triton_poi_fused_stack_10[grid(256)](arg0_1, buf12, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf13 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16,
4, 1), 208)
triton_poi_fused_stack_11[grid(256)](arg0_1, buf13, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf14 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16,
4, 1), 224)
buf15 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16,
4, 1), 240)
triton_poi_fused_stack_12[grid(256)](arg0_1, buf14, buf15, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf16 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16,
4, 1), 256)
triton_poi_fused_stack_13[grid(256)](arg0_1, buf16, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf17 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16,
4, 1), 272)
triton_poi_fused_stack_14[grid(256)](arg0_1, buf17, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf18 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16,
4, 1), 288)
triton_poi_fused_stack_15[grid(256)](arg0_1, buf18, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf19 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16,
4, 1), 304)
buf20 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16,
4, 1), 320)
triton_poi_fused_stack_16[grid(256)](arg0_1, buf19, buf20, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf21 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16,
4, 1), 336)
triton_poi_fused_stack_17[grid(256)](arg0_1, buf21, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf22 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16,
4, 1), 352)
triton_poi_fused_stack_18[grid(256)](arg0_1, buf22, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf23 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16,
4, 1), 368)
triton_poi_fused_stack_19[grid(256)](arg0_1, buf23, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf24 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16,
4, 1), 384)
triton_poi_fused_stack_20[grid(256)](arg0_1, buf24, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
buf26 = empty_strided_cuda((4, 4, 1, 4, 4), (64, 16, 16, 4, 1),
torch.float32)
buf27 = reinterpret_tensor(buf26, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf26
triton_per_fused_div_mul_sum_21[grid(256)](buf27, arg1_1, buf25,
256, 25, XBLOCK=8, num_warps=2, num_stages=1)
del arg1_1
del buf0
del buf1
del buf10
del buf11
del buf12
del buf13
del buf14
del buf15
del buf16
del buf17
del buf18
del buf19
del buf2
del buf20
del buf21
del buf22
del buf23
del buf24
del buf25
del buf3
del buf4
del buf5
del buf6
del buf7
del buf8
del buf9
return buf27,
class KernelConvNew(nn.Module):
"""
the class of computing prediction
"""
def __init__(self, kernel_size=[5], sep_conv=False, core_bias=False):
super(KernelConvNew, self).__init__()
self.kernel_size = sorted(kernel_size)
self.sep_conv = sep_conv
self.core_bias = core_bias
def _sep_conv_core(self, core, batch_size, N, color, height, width):
"""
convert the sep_conv core to conv2d core
2p --> p^2
:param core: shape: batch*(N*2*K)*height*width
:return:
"""
kernel_total = sum(self.kernel_size)
core = core.view(batch_size, N, -1, color, height, width)
if not self.core_bias:
core_1, core_2 = torch.split(core, kernel_total, dim=2)
else:
core_1, core_2, core_3 = torch.split(core, kernel_total, dim=2)
core_out = {}
cur = 0
for K in self.kernel_size:
t1 = core_1[:, :, cur:cur + K, ...].view(batch_size, N, K, 1, 3,
height, width)
t2 = core_2[:, :, cur:cur + K, ...].view(batch_size, N, 1, K, 3,
height, width)
core_out[K] = torch.einsum('ijklno,ijlmno->ijkmno', [t1, t2]).view(
batch_size, N, K * K, color, height, width)
cur += K
return core_out, None if not self.core_bias else core_3.squeeze()
def _convert_dict(self, core, batch_size, N, color, height, width):
"""
make sure the core to be a dict, generally, only one kind of kernel size is suitable for the func.
:param core: shape: batch_size*(N*K*K)*height*width
:return: core_out, a dict
"""
core_out = {}
core = core.view(batch_size, N, -1, color, height, width)
core_out[self.kernel_size[0]] = core[:, :, 0:self.kernel_size[0] **
2, ...]
bias = None if not self.core_bias else core[:, :, -1, ...]
return core_out, bias
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
xenbaloch/efficientderain
|
KernelConv
| false
| 16,788
|
[
"MIT"
] | 109
|
d5646815fd14a5a03c859102ecd2f298db7e53be
|
https://github.com/xenbaloch/efficientderain/tree/d5646815fd14a5a03c859102ecd2f298db7e53be
|
SinusoidPositionalEmbedding
|
import torch
import torch.nn as nn
class SinusoidPositionalEmbedding(nn.Module):
def forward(self, x):
seq_len, n_model = x[0].shape
pos = x.new_tensor(range(seq_len)).unsqueeze(-1) / 10000 ** (x.
new_tensor(range(n_model)) // 2 * 2 / n_model)
pos[:, 0::2], pos[:, 1::2] = pos[:, 0::2].sin(), pos[:, 1::2].cos()
return pos
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_floor_divide_lift_fresh_mul_pow_0(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = x1
tmp1 = tl.full([1], 2, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 0.0
tmp6 = 1.0
tmp7 = tl.where(tmp4, tmp5, tmp6)
tmp8 = tl.full([1], 3, tl.int64)
tmp9 = tmp0 < tmp8
tmp10 = 2.0
tmp11 = 3.0
tmp12 = tl.where(tmp9, tmp10, tmp11)
tmp13 = tl.where(tmp2, tmp7, tmp12)
tmp14 = x0
tmp15 = tmp14 < tmp1
tmp16 = tmp14 < tmp3
tmp17 = tl.where(tmp16, tmp5, tmp6)
tmp18 = tmp14 < tmp8
tmp19 = tl.where(tmp18, tmp10, tmp11)
tmp20 = tl.where(tmp15, tmp17, tmp19)
tmp21 = 0.5
tmp22 = tmp20 * tmp21
tmp23 = libdevice.floor(tmp22)
tmp24 = tmp23 * tmp10
tmp25 = 0.25
tmp26 = tmp24 * tmp25
tmp27 = 10000.0
tmp28 = libdevice.pow(tmp27, tmp26)
tmp29 = tmp13 / tmp28
tl.store(out_ptr0 + x2, tmp29, xmask)
@triton.jit
def triton_poi_fused_copy_cos_sin_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = x2 % 2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 == tmp1
tmp3 = tl.load(in_ptr0 + (2 * (x0 // 2) + 4 * x1), tmp2 & xmask,
eviction_policy='evict_last', other=0.0)
tmp4 = tl_math.sin(tmp3)
tmp5 = tl.full(tmp4.shape, 0.0, tmp4.dtype)
tmp6 = tl.where(tmp2, tmp4, tmp5)
tmp7 = x1
tmp8 = tl.full([1], 2, tl.int64)
tmp9 = tmp7 < tmp8
tmp10 = tl.full([1], 1, tl.int64)
tmp11 = tmp7 < tmp10
tmp12 = 0.0
tmp13 = 1.0
tmp14 = tl.where(tmp11, tmp12, tmp13)
tmp15 = tl.full([1], 3, tl.int64)
tmp16 = tmp7 < tmp15
tmp17 = 2.0
tmp18 = 3.0
tmp19 = tl.where(tmp16, tmp17, tmp18)
tmp20 = tl.where(tmp9, tmp14, tmp19)
tmp21 = x0
tmp22 = tmp21 < tmp8
tmp23 = tmp21 < tmp10
tmp24 = tl.where(tmp23, tmp12, tmp13)
tmp25 = tmp21 < tmp15
tmp26 = tl.where(tmp25, tmp17, tmp18)
tmp27 = tl.where(tmp22, tmp24, tmp26)
tmp28 = 0.5
tmp29 = tmp27 * tmp28
tmp30 = libdevice.floor(tmp29)
tmp31 = tmp30 * tmp17
tmp32 = 0.25
tmp33 = tmp31 * tmp32
tmp34 = 10000.0
tmp35 = libdevice.pow(tmp34, tmp33)
tmp36 = tmp20 / tmp35
tmp37 = tl.where(tmp2, tmp6, tmp36)
tmp38 = tmp21 >= tmp10
tmp39 = (-1 + x0) % 2
tmp40 = tmp39 == tmp1
tmp41 = tmp38 & tmp40
tmp42 = tl.load(in_ptr0 + (1 + 2 * triton_helpers.div_floor_integer(-1 +
x0, 2) + 4 * x1), tmp41 & xmask, eviction_policy='evict_last',
other=0.0)
tmp43 = tl_math.cos(tmp42)
tmp44 = tl.full(tmp43.shape, 0.0, tmp43.dtype)
tmp45 = tl.where(tmp41, tmp43, tmp44)
tmp46 = tl.where(tmp41, tmp45, tmp37)
tl.store(in_out_ptr0 + x2, tmp46, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_floor_divide_lift_fresh_mul_pow_0[grid(16)](buf0,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf2 = buf1
del buf1
triton_poi_fused_copy_cos_sin_1[grid(16)](buf2, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf0
return buf2,
class SinusoidPositionalEmbeddingNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
yzhangcs/parser
|
SinusoidPositionalEmbedding
| false
| 16,789
|
[
"MIT"
] | 439
|
3abebde1c9fe0bf2e99adce845aaf2a04b194f8a
|
https://github.com/yzhangcs/parser/tree/3abebde1c9fe0bf2e99adce845aaf2a04b194f8a
|
OfflineTripletLoss
|
import torch
import torch.nn.functional as F
from torch import nn
class OfflineTripletLoss(nn.Module):
"""
Triplet loss
Takes embeddings of an anchor sample, a positive sample and a negative sample
"""
def __init__(self, margin=0.1):
super(OfflineTripletLoss, self).__init__()
self.margin = margin
def forward(self, inputs, size_average=True):
batchsize = inputs[0].size(0)
anchor = inputs[0][0:int(batchsize / 3)]
positive = inputs[0][int(batchsize / 3):int(batchsize * 2 / 3)]
negative = inputs[0][int(batchsize * 2 / 3):]
anchor = anchor.view(int(batchsize / 3), -1)
positive = positive.view(int(batchsize / 3), -1)
negative = negative.view(int(batchsize / 3), -1)
distance_positive = (anchor - positive).pow(2).sum(1)
distance_negative = (anchor - negative).pow(2).sum(1)
losses = F.relu(distance_positive - distance_negative + self.margin)
return losses.mean() if size_average else losses.sum()
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_mean_pow_relu_sub_sum_0(in_out_ptr0, in_ptr0,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 2
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2 = tl.load(in_ptr0 + (2 + r0), None)
tmp8 = tl.broadcast_to(tmp0, [XBLOCK, 1])
tmp9 = tl.load(in_ptr0 + 1)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, 1])
tmp3 = tmp1 - tmp2
tmp4 = tmp3 * tmp3
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp7 = tl.sum(tmp5, 1)[:, None]
tmp11 = tmp8 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp12 - tmp7
tmp14 = 0.1
tmp15 = tmp13 + tmp14
tmp16 = tl.full([1, 1], 0, tl.int32)
tmp17 = triton_helpers.maximum(tmp16, tmp15)
tmp18 = 1.0
tmp19 = tmp17 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp19, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1,), (1,), torch.float32)
buf1 = reinterpret_tensor(buf0, (), (), 0)
del buf0
get_raw_stream(0)
triton_per_fused_add_mean_pow_relu_sub_sum_0[grid(1)](buf1, arg0_1,
1, 2, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf1,
class OfflineTripletLossNew(nn.Module):
"""
Triplet loss
Takes embeddings of an anchor sample, a positive sample and a negative sample
"""
def __init__(self, margin=0.1):
super(OfflineTripletLossNew, self).__init__()
self.margin = margin
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
zhangxinyu-tj/PAST
|
OfflineTripletLoss
| false
| 16,790
|
[
"MIT"
] | 112
|
67f1f7a780e869aa7867167538edb03faa96dec5
|
https://github.com/zhangxinyu-tj/PAST/tree/67f1f7a780e869aa7867167538edb03faa96dec5
|
SinusoidRelativePositionalEmbedding
|
import torch
import torch.nn as nn
class SinusoidRelativePositionalEmbedding(nn.Module):
def forward(self, x):
seq_len, n_model = x[0].shape
pos = x.new_tensor(range(seq_len))
pos = (pos - pos.unsqueeze(-1)).unsqueeze(-1) / 10000 ** (x.
new_tensor(range(n_model)) // 2 * 2 / n_model)
pos[..., 0::2], pos[..., 1::2] = pos[..., 0::2].sin(), pos[..., 1::2
].cos()
return pos
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_floor_divide_lift_fresh_mul_pow_0(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x2 = xindex // 16
x0 = xindex % 4
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 2, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 0.0
tmp6 = 1.0
tmp7 = tl.where(tmp4, tmp5, tmp6)
tmp8 = tl.full([1], 3, tl.int64)
tmp9 = tmp0 < tmp8
tmp10 = 2.0
tmp11 = 3.0
tmp12 = tl.where(tmp9, tmp10, tmp11)
tmp13 = tl.where(tmp2, tmp7, tmp12)
tmp14 = x2
tmp15 = tmp14 < tmp1
tmp16 = tmp14 < tmp3
tmp17 = tl.where(tmp16, tmp5, tmp6)
tmp18 = tmp14 < tmp8
tmp19 = tl.where(tmp18, tmp10, tmp11)
tmp20 = tl.where(tmp15, tmp17, tmp19)
tmp21 = tmp13 - tmp20
tmp22 = x0
tmp23 = tmp22 < tmp1
tmp24 = tmp22 < tmp3
tmp25 = tl.where(tmp24, tmp5, tmp6)
tmp26 = tmp22 < tmp8
tmp27 = tl.where(tmp26, tmp10, tmp11)
tmp28 = tl.where(tmp23, tmp25, tmp27)
tmp29 = 0.5
tmp30 = tmp28 * tmp29
tmp31 = libdevice.floor(tmp30)
tmp32 = tmp31 * tmp10
tmp33 = 0.25
tmp34 = tmp32 * tmp33
tmp35 = 10000.0
tmp36 = libdevice.pow(tmp35, tmp34)
tmp37 = tmp21 / tmp36
tl.store(out_ptr0 + x3, tmp37, xmask)
@triton.jit
def triton_poi_fused_copy_cos_sin_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
x3 = xindex // 4
x1 = xindex // 4 % 4
x2 = xindex // 16
tmp0 = x4 % 2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 == tmp1
tmp3 = tl.load(in_ptr0 + (2 * (x0 // 2) + 4 * x3), tmp2 & xmask,
eviction_policy='evict_last', other=0.0)
tmp4 = tl_math.sin(tmp3)
tmp5 = tl.full(tmp4.shape, 0.0, tmp4.dtype)
tmp6 = tl.where(tmp2, tmp4, tmp5)
tmp7 = x1
tmp8 = tl.full([1], 2, tl.int64)
tmp9 = tmp7 < tmp8
tmp10 = tl.full([1], 1, tl.int64)
tmp11 = tmp7 < tmp10
tmp12 = 0.0
tmp13 = 1.0
tmp14 = tl.where(tmp11, tmp12, tmp13)
tmp15 = tl.full([1], 3, tl.int64)
tmp16 = tmp7 < tmp15
tmp17 = 2.0
tmp18 = 3.0
tmp19 = tl.where(tmp16, tmp17, tmp18)
tmp20 = tl.where(tmp9, tmp14, tmp19)
tmp21 = x2
tmp22 = tmp21 < tmp8
tmp23 = tmp21 < tmp10
tmp24 = tl.where(tmp23, tmp12, tmp13)
tmp25 = tmp21 < tmp15
tmp26 = tl.where(tmp25, tmp17, tmp18)
tmp27 = tl.where(tmp22, tmp24, tmp26)
tmp28 = tmp20 - tmp27
tmp29 = x0
tmp30 = tmp29 < tmp8
tmp31 = tmp29 < tmp10
tmp32 = tl.where(tmp31, tmp12, tmp13)
tmp33 = tmp29 < tmp15
tmp34 = tl.where(tmp33, tmp17, tmp18)
tmp35 = tl.where(tmp30, tmp32, tmp34)
tmp36 = 0.5
tmp37 = tmp35 * tmp36
tmp38 = libdevice.floor(tmp37)
tmp39 = tmp38 * tmp17
tmp40 = 0.25
tmp41 = tmp39 * tmp40
tmp42 = 10000.0
tmp43 = libdevice.pow(tmp42, tmp41)
tmp44 = tmp28 / tmp43
tmp45 = tl.where(tmp2, tmp6, tmp44)
tmp46 = tmp29 >= tmp10
tmp47 = (-1 + x0) % 2
tmp48 = tmp47 == tmp1
tmp49 = tmp46 & tmp48
tmp50 = tl.load(in_ptr0 + (1 + 2 * triton_helpers.div_floor_integer(-1 +
x0, 2) + 4 * x3), tmp49 & xmask, eviction_policy='evict_last',
other=0.0)
tmp51 = tl_math.cos(tmp50)
tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype)
tmp53 = tl.where(tmp49, tmp51, tmp52)
tmp54 = tl.where(tmp49, tmp53, tmp45)
tl.store(in_out_ptr0 + x4, tmp54, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_floor_divide_lift_fresh_mul_pow_0[grid(64)](buf0,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf2 = buf1
del buf1
triton_poi_fused_copy_cos_sin_1[grid(64)](buf2, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf0
return buf2,
class SinusoidRelativePositionalEmbeddingNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
yzhangcs/parser
|
SinusoidRelativePositionalEmbedding
| false
| 16,791
|
[
"MIT"
] | 439
|
3abebde1c9fe0bf2e99adce845aaf2a04b194f8a
|
https://github.com/yzhangcs/parser/tree/3abebde1c9fe0bf2e99adce845aaf2a04b194f8a
|
SoftCrossEntropy
|
import torch
from torch import nn
import torch.nn.functional as F
class SoftCrossEntropy(nn.Module):
def __init__(self):
super().__init__()
def forward(self, inputs, target):
log_likelihood = -F.log_softmax(inputs, dim=1)
sample_num, _class_num = target.shape
loss = torch.sum(torch.mul(log_likelihood, target)) / sample_num
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_per_fused__log_softmax_div_mul_neg_sum_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = rindex // 64
tmp0 = tl.load(in_ptr0 + r3, None)
tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr1 + r0, None, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tmp14 = -tmp13
tmp16 = tmp14 * tmp15
tmp17 = tl.broadcast_to(tmp16, [RBLOCK])
tmp19 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0))
tmp20 = 0.25
tmp21 = tmp19 * tmp20
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp21, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused__log_softmax_div_mul_neg_sum_1[grid(1)](buf2, buf0,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg1_1
del buf0
return buf2,
class SoftCrossEntropyNew(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
zake7749/WSDM-Cup-2019
|
SoftCrossEntropy
| false
| 16,792
|
[
"Apache-2.0"
] | 64
|
5e9c9ae4197a5dedf6dbccc712bb2bbaae99edee
|
https://github.com/zake7749/WSDM-Cup-2019/tree/5e9c9ae4197a5dedf6dbccc712bb2bbaae99edee
|
Quantization
|
import torch
import torch.nn as nn
class Quant(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
input = torch.clamp(input, 0, 1)
output = (input * 255.0).round() / 255.0
return output
@staticmethod
def backward(ctx, grad_output):
return grad_output
class Quantization(nn.Module):
def __init__(self):
super(Quantization, self).__init__()
def forward(self, input):
return Quant.apply(input)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clamp_div_mul_round_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 1.0
tmp4 = triton_helpers.minimum(tmp2, tmp3)
tmp5 = 255.0
tmp6 = tmp4 * tmp5
tmp7 = libdevice.nearbyint(tmp6)
tmp8 = 0.00392156862745098
tmp9 = tmp7 * tmp8
tl.store(out_ptr0 + x0, tmp9, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_div_mul_round_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class Quant(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
input = torch.clamp(input, 0, 1)
output = (input * 255.0).round() / 255.0
return output
@staticmethod
def backward(ctx, grad_output):
return grad_output
class QuantizationNew(nn.Module):
def __init__(self):
super(QuantizationNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
yzxing87/Invertible-ISP
|
Quantization
| false
| 16,793
|
[
"MIT"
] | 246
|
344dd333dd2a075f6a9e4ffc445dc387ca3014c4
|
https://github.com/yzxing87/Invertible-ISP/tree/344dd333dd2a075f6a9e4ffc445dc387ca3014c4
|
SoftMarginTriplet
|
import torch
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
class SoftMarginTriplet(_Loss):
__constants__ = ['reduction']
"""
inputs `x1`, `x2`, two 1D mini-batch `Tensor`s,
and a label 1D mini-batch tensor `y` with values (`1` or `-1`).
If `y == 1` then it assumed the first input should be ranked higher
(have a larger value) than the second input, and vice-versa for `y == -1`.
The loss function for each sample in the mini-batch is:
loss(x, y) = max(0, -y * (x1 - x2) + margin)
reduction='elementwise_mean'|'none'|'sum'
"""
def __init__(self, margin=0.0, size_average=None, reduce=None,
reduction='elementwise_mean'):
super(SoftMarginTriplet, self).__init__(size_average, reduce, reduction
)
self.margin = margin
def forward(self, dist_ap, dist_an, softmargin):
loss = F.relu(dist_ap - dist_an + softmargin * self.margin)
if self.reduction == 'elementwise_mean':
loss = loss.mean()
else:
loss = loss.sum()
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch.nn.modules.loss import _Loss
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mean_mul_relu_sub_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp3 = tl.load(in_ptr2 + r0, None)
tmp2 = tmp0 - tmp1
tmp4 = 0.0
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp7 = tl.full([1], 0, tl.int32)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp9 = tl.broadcast_to(tmp8, [RBLOCK])
tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0))
tmp12 = 256.0
tmp13 = tmp11 / tmp12
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp13, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_mean_mul_relu_sub_0[grid(1)](buf1, arg0_1,
arg1_1, arg2_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf1,
class SoftMarginTripletNew(_Loss):
__constants__ = ['reduction']
"""
inputs `x1`, `x2`, two 1D mini-batch `Tensor`s,
and a label 1D mini-batch tensor `y` with values (`1` or `-1`).
If `y == 1` then it assumed the first input should be ranked higher
(have a larger value) than the second input, and vice-versa for `y == -1`.
The loss function for each sample in the mini-batch is:
loss(x, y) = max(0, -y * (x1 - x2) + margin)
reduction='elementwise_mean'|'none'|'sum'
"""
def __init__(self, margin=0.0, size_average=None, reduce=None,
reduction='elementwise_mean'):
super(SoftMarginTripletNew, self).__init__(size_average, reduce,
reduction)
self.margin = margin
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
zhangxinyu-tj/PAST
|
SoftMarginTriplet
| false
| 16,794
|
[
"MIT"
] | 112
|
67f1f7a780e869aa7867167538edb03faa96dec5
|
https://github.com/zhangxinyu-tj/PAST/tree/67f1f7a780e869aa7867167538edb03faa96dec5
|
BCELoss
|
import torch
import torch.distributed
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.functional
import torch.utils.data
import torch.optim
import torch.optim.lr_scheduler
def bce_loss(pred, target, use_sigmoid=True):
"""Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning
Qualified and Distributed Bounding Boxes for Dense Object Detection
<https://arxiv.org/abs/2006.04388>`_.
Args:
pred (torch.Tensor): Predicted joint representation of classification
and quality (IoU) estimation with shape (N, C), C is the number of
classes.
target (tuple([torch.Tensor])): Target category label with shape (N,)
and target quality label with shape (N,).
beta (float): The beta parameter for calculating the modulating factor.
Defaults to 2.0.
Returns:
torch.Tensor: Loss tensor with shape (N,).
"""
if use_sigmoid:
func = F.binary_cross_entropy_with_logits
else:
func = F.binary_cross_entropy
pred_sigmoid = pred.sigmoid() if use_sigmoid else pred
loss = func(pred_sigmoid, target, reduction='none')
return loss.flatten()
class BCELoss(nn.Module):
"""
Args:
use_sigmoid (bool): Whether sigmoid operation is conducted in QFL.
Defaults to True.
"""
def __init__(self, use_sigmoid=True):
super(BCELoss, self).__init__()
self.use_sigmoid = use_sigmoid
def forward(self, pred, target):
"""Forward function.
Args:
pred (torch.Tensor): Predicted joint representation of
classification and quality (IoU) estimation with shape (N, C),
C is the number of classes.
target (tuple([torch.Tensor])): Target category label with shape
(N,) and target quality label with shape (N,).
"""
return bce_loss(pred, target, use_sigmoid=self.use_sigmoid)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.distributed
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.functional
import torch.utils.data
import torch.optim
import torch.optim.lr_scheduler
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_binary_cross_entropy_with_logits_sigmoid_0(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp3 = tl.load(in_ptr1 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tl.sigmoid(tmp3)
tmp5 = tmp2 * tmp4
tmp6 = 0.0
tmp7 = triton_helpers.minimum(tmp6, tmp4)
tmp8 = tl_math.abs(tmp4)
tmp9 = -tmp8
tmp10 = tl_math.exp(tmp9)
tmp11 = libdevice.log1p(tmp10)
tmp12 = tmp7 - tmp11
tmp13 = tmp5 - tmp12
tl.store(out_ptr0 + x0, tmp13, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_binary_cross_entropy_with_logits_sigmoid_0[grid(256)](
arg1_1, arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return reinterpret_tensor(buf0, (256,), (1,), 0),
def bce_loss(pred, target, use_sigmoid=True):
"""Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning
Qualified and Distributed Bounding Boxes for Dense Object Detection
<https://arxiv.org/abs/2006.04388>`_.
Args:
pred (torch.Tensor): Predicted joint representation of classification
and quality (IoU) estimation with shape (N, C), C is the number of
classes.
target (tuple([torch.Tensor])): Target category label with shape (N,)
and target quality label with shape (N,).
beta (float): The beta parameter for calculating the modulating factor.
Defaults to 2.0.
Returns:
torch.Tensor: Loss tensor with shape (N,).
"""
if use_sigmoid:
func = F.binary_cross_entropy_with_logits
else:
func = F.binary_cross_entropy
pred_sigmoid = pred.sigmoid() if use_sigmoid else pred
loss = func(pred_sigmoid, target, reduction='none')
return loss.flatten()
class BCELossNew(nn.Module):
"""
Args:
use_sigmoid (bool): Whether sigmoid operation is conducted in QFL.
Defaults to True.
"""
def __init__(self, use_sigmoid=True):
super(BCELossNew, self).__init__()
self.use_sigmoid = use_sigmoid
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
zhangzhengde0225/SwinTrack
|
BCELoss
| false
| 16,795
|
[
"MIT"
] | 143
|
526be17f8ef266cb924c6939bd8dda23e9b73249
|
https://github.com/zhangzhengde0225/SwinTrack/tree/526be17f8ef266cb924c6939bd8dda23e9b73249
|
DotAttention
|
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
class DotAttention(nn.Module):
def __init__(self, hidden_size):
super(DotAttention, self).__init__()
self.hidden_size = hidden_size
self.attn_vector = nn.Parameter(torch.Tensor(1, hidden_size),
requires_grad=True)
init.xavier_uniform(self.attn_vector.data)
def get_mask(self):
pass
def forward(self, inputs, lengths=None):
batch_size, _max_len = inputs.size()[:2]
"""
print("INPUTS", inputs.size())
print("ATTN", self.attn_vector # (1, hidden_size)
.unsqueeze(0) # (1, hidden_size, 1)
.transpose(2, 1)
.repeat(batch_size, 1, 1).size())"""
weights = torch.bmm(inputs, self.attn_vector.unsqueeze(0).transpose
(2, 1).repeat(batch_size, 1, 1))
attn_energies = F.softmax(F.relu(weights.squeeze()))
_sums = attn_energies.sum(-1).unsqueeze(1).expand_as(attn_energies)
attn_weights = attn_energies / _sums
weighted = torch.mul(inputs, attn_weights.unsqueeze(-1).expand_as(
inputs))
representations = weighted.sum(1).squeeze()
return representations, attn_weights
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.init as init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_repeat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused__softmax_relu_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp1, tmp3)
tmp6 = triton_helpers.maximum(tmp1, tmp5)
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = triton_helpers.maximum(tmp1, tmp8)
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = triton_helpers.maximum(tmp1, tmp11)
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tl_math.exp(tmp14)
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_squeeze_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp8 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x2, tmp14, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (1, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_repeat_0[grid(16)](primals_2, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(primals_1, buf0, out=buf1)
buf2 = reinterpret_tensor(buf0, (4, 4), (4, 1), 0)
del buf0
triton_poi_fused__softmax_relu_1[grid(16)](buf1, buf2, 16, XBLOCK=
16, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(16)](buf2, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf4 = buf2
del buf2
triton_poi_fused__softmax_2[grid(16)](buf3, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf5 = buf3
del buf3
triton_poi_fused_mul_squeeze_sum_3[grid(16)](primals_1, buf4, buf5,
16, XBLOCK=16, num_warps=1, num_stages=1)
return buf5, buf4, primals_1, buf1
class DotAttentionNew(nn.Module):
def __init__(self, hidden_size):
super(DotAttentionNew, self).__init__()
self.hidden_size = hidden_size
self.attn_vector = nn.Parameter(torch.Tensor(1, hidden_size),
requires_grad=True)
init.xavier_uniform(self.attn_vector.data)
def get_mask(self):
pass
def forward(self, input_0):
primals_2 = self.attn_vector
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0], output[1]
|
zake7749/DeepToxic
|
DotAttention
| false
| 16,796
|
[
"MIT"
] | 206
|
92710446c55fe60526099f808a7e1179402e199f
|
https://github.com/zake7749/DeepToxic/tree/92710446c55fe60526099f808a7e1179402e199f
|
IoULoss
|
import torch
import torch.distributed
import torch
import torch.nn as nn
import torch.nn.functional
import torch.utils.data
import torch.optim
import torch.optim.lr_scheduler
def fp16_clamp(x, min=None, max=None):
if not x.is_cuda and x.dtype == torch.float16:
return x.float().clamp(min, max).half()
return x.clamp(min, max)
def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-06):
"""Calculate overlap between two set of bboxes.
FP16 Contributed by https://github.com/open-mmlab/mmdetection/pull/4889
Note:
Assume bboxes1 is M x 4, bboxes2 is N x 4, when mode is 'iou',
there are some new generated variable when calculating IOU
using bbox_overlaps function:
1) is_aligned is False
area1: M x 1
area2: N x 1
lt: M x N x 2
rb: M x N x 2
wh: M x N x 2
overlap: M x N x 1
union: M x N x 1
ious: M x N x 1
Total memory:
S = (9 x N x M + N + M) * 4 Byte,
When using FP16, we can reduce:
R = (9 x N x M + N + M) * 4 / 2 Byte
R large than (N + M) * 4 * 2 is always true when N and M >= 1.
Obviously, N + M <= N * M < 3 * N * M, when N >=2 and M >=2,
N + 1 < 3 * N, when N or M is 1.
Given M = 40 (ground truth), N = 400000 (three anchor boxes
in per grid, FPN, R-CNNs),
R = 275 MB (one times)
A special case (dense detection), M = 512 (ground truth),
R = 3516 MB = 3.43 GB
When the batch size is B, reduce:
B x R
Therefore, CUDA memory runs out frequently.
Experiments on GeForce RTX 2080Ti (11019 MiB):
| dtype | M | N | Use | Real | Ideal |
|:----:|:----:|:----:|:----:|:----:|:----:|
| FP32 | 512 | 400000 | 8020 MiB | -- | -- |
| FP16 | 512 | 400000 | 4504 MiB | 3516 MiB | 3516 MiB |
| FP32 | 40 | 400000 | 1540 MiB | -- | -- |
| FP16 | 40 | 400000 | 1264 MiB | 276MiB | 275 MiB |
2) is_aligned is True
area1: N x 1
area2: N x 1
lt: N x 2
rb: N x 2
wh: N x 2
overlap: N x 1
union: N x 1
ious: N x 1
Total memory:
S = 11 x N * 4 Byte
When using FP16, we can reduce:
R = 11 x N * 4 / 2 Byte
So do the 'giou' (large than 'iou').
Time-wise, FP16 is generally faster than FP32.
When gpu_assign_thr is not -1, it takes more time on cpu
but not reduce memory.
There, we can reduce half the memory and keep the speed.
If ``is_aligned `` is ``False``, then calculate the overlaps between each
bbox of bboxes1 and bboxes2, otherwise the overlaps between each aligned
siam_pair of bboxes1 and bboxes2.
Args:
bboxes1 (Tensor): shape (B, m, 4) in <x1, y1, x2, y2> format or empty.
bboxes2 (Tensor): shape (B, n, 4) in <x1, y1, x2, y2> format or empty.
B indicates the batch dim, in shape (B1, B2, ..., Bn).
If ``is_aligned `` is ``True``, then m and n must be equal.
mode (str): "iou" (intersection over union), "iof" (intersection over
foreground) or "giou" (generalized intersection over union).
Default "iou".
is_aligned (bool, optional): If True, then m and n must be equal.
Default False.
eps (float, optional): A value added to the denominator for numerical
stability. Default 1e-6.
Returns:
Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,)
Example:
>>> bboxes1 = torch.FloatTensor([
>>> [0, 0, 10, 10],
>>> [10, 10, 20, 20],
>>> [32, 32, 38, 42],
>>> ])
>>> bboxes2 = torch.FloatTensor([
>>> [0, 0, 10, 20],
>>> [0, 10, 10, 19],
>>> [10, 10, 20, 20],
>>> ])
>>> overlaps = bbox_overlaps(bboxes1, bboxes2)
>>> assert overlaps.shape == (3, 3)
>>> overlaps = bbox_overlaps(bboxes1, bboxes2, is_aligned=True)
>>> assert overlaps.shape == (3, )
Example:
>>> empty = torch.empty(0, 4)
>>> nonempty = torch.FloatTensor([[0, 0, 10, 9]])
>>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1)
>>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0)
>>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0)
"""
assert mode in ['iou', 'iof', 'giou'], f'Unsupported mode {mode}'
assert bboxes1.size(-1) == 4 or bboxes1.size(0) == 0
assert bboxes2.size(-1) == 4 or bboxes2.size(0) == 0
assert bboxes1.shape[:-2] == bboxes2.shape[:-2]
batch_shape = bboxes1.shape[:-2]
rows = bboxes1.size(-2)
cols = bboxes2.size(-2)
if is_aligned:
assert rows == cols
if rows * cols == 0:
if is_aligned:
return bboxes1.new(batch_shape + (rows,))
else:
return bboxes1.new(batch_shape + (rows, cols))
area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * (bboxes1[..., 3] -
bboxes1[..., 1])
area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * (bboxes2[..., 3] -
bboxes2[..., 1])
if is_aligned:
lt = torch.max(bboxes1[..., :2], bboxes2[..., :2])
rb = torch.min(bboxes1[..., 2:], bboxes2[..., 2:])
wh = fp16_clamp(rb - lt, min=0)
overlap = wh[..., 0] * wh[..., 1]
if mode in ['iou', 'giou']:
union = area1 + area2 - overlap
else:
union = area1
if mode == 'giou':
enclosed_lt = torch.min(bboxes1[..., :2], bboxes2[..., :2])
enclosed_rb = torch.max(bboxes1[..., 2:], bboxes2[..., 2:])
else:
lt = torch.max(bboxes1[..., :, None, :2], bboxes2[..., None, :, :2])
rb = torch.min(bboxes1[..., :, None, 2:], bboxes2[..., None, :, 2:])
wh = fp16_clamp(rb - lt, min=0)
overlap = wh[..., 0] * wh[..., 1]
if mode in ['iou', 'giou']:
union = area1[..., None] + area2[..., None, :] - overlap
else:
union = area1[..., None]
if mode == 'giou':
enclosed_lt = torch.min(bboxes1[..., :, None, :2], bboxes2[...,
None, :, :2])
enclosed_rb = torch.max(bboxes1[..., :, None, 2:], bboxes2[...,
None, :, 2:])
eps = union.new_tensor([eps])
union = torch.max(union, eps)
ious = overlap / union
if mode in ['iou', 'iof']:
return ious
enclose_wh = fp16_clamp(enclosed_rb - enclosed_lt, min=0)
enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1]
enclose_area = torch.max(enclose_area, eps)
gious = ious - (enclose_area - union) / enclose_area
return gious
def iou_loss(pred, target, linear=False, eps=1e-06):
"""IoU loss.
Computing the IoU loss between a set of predicted bboxes and target bboxes.
The loss is calculated as negative log of IoU.
Args:
pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (torch.Tensor): Corresponding gt bboxes, shape (n, 4).
linear (bool, optional): If True, use linear scale of loss instead of
log scale. Default: False.
eps (float): Eps to avoid log(0).
Return:
torch.Tensor: Loss tensor.
"""
ious = bbox_overlaps(pred, target, is_aligned=True).clamp(min=eps)
if linear:
loss = 1 - ious
else:
loss = -ious.log()
return loss
class IoULoss(nn.Module):
"""IoULoss.
Computing the IoU loss between a set of predicted bboxes and target bboxes.
Args:
linear (bool): If True, use linear scale of loss instead of log scale.
Default: False.
eps (float): Eps to avoid log(0).
reduction (str): Options are "none", "mean" and "sum".
loss_weight (float): Weight of loss.
"""
def __init__(self, linear=False, eps=1e-06):
super(IoULoss, self).__init__()
self.linear = linear
self.eps = eps
def forward(self, pred, target):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
"""
loss = iou_loss(pred, target, self.linear, self.eps)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.distributed
import torch
import torch.nn as nn
import torch.nn.functional
import torch.utils.data
import torch.optim
import torch.optim.lr_scheduler
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_clamp_div_lift_fresh_log_maximum_mul_neg_sub_0(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = triton_helpers.minimum(tmp0, tmp1)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp2 - tmp5
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp11 = triton_helpers.minimum(tmp9, tmp10)
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp11 - tmp14
tmp16 = triton_helpers.maximum(tmp15, tmp7)
tmp17 = tmp8 * tmp16
tmp18 = tmp0 - tmp3
tmp19 = tmp9 - tmp12
tmp20 = tmp18 * tmp19
tmp21 = tmp1 - tmp4
tmp22 = tmp10 - tmp13
tmp23 = tmp21 * tmp22
tmp24 = tmp20 + tmp23
tmp25 = tmp24 - tmp17
tmp26 = 9.999999974752427e-07
tmp27 = triton_helpers.maximum(tmp25, tmp26)
tmp28 = tmp17 / tmp27
tmp29 = 1e-06
tmp30 = triton_helpers.maximum(tmp28, tmp29)
tmp31 = tl_math.log(tmp30)
tmp32 = -tmp31
tl.store(in_out_ptr0 + x0, tmp32, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_add_clamp_div_lift_fresh_log_maximum_mul_neg_sub_0[
grid(64)](buf2, arg0_1, arg1_1, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del arg0_1
del arg1_1
return buf2,
def fp16_clamp(x, min=None, max=None):
if not x.is_cuda and x.dtype == torch.float16:
return x.float().clamp(min, max).half()
return x.clamp(min, max)
def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-06):
"""Calculate overlap between two set of bboxes.
FP16 Contributed by https://github.com/open-mmlab/mmdetection/pull/4889
Note:
Assume bboxes1 is M x 4, bboxes2 is N x 4, when mode is 'iou',
there are some new generated variable when calculating IOU
using bbox_overlaps function:
1) is_aligned is False
area1: M x 1
area2: N x 1
lt: M x N x 2
rb: M x N x 2
wh: M x N x 2
overlap: M x N x 1
union: M x N x 1
ious: M x N x 1
Total memory:
S = (9 x N x M + N + M) * 4 Byte,
When using FP16, we can reduce:
R = (9 x N x M + N + M) * 4 / 2 Byte
R large than (N + M) * 4 * 2 is always true when N and M >= 1.
Obviously, N + M <= N * M < 3 * N * M, when N >=2 and M >=2,
N + 1 < 3 * N, when N or M is 1.
Given M = 40 (ground truth), N = 400000 (three anchor boxes
in per grid, FPN, R-CNNs),
R = 275 MB (one times)
A special case (dense detection), M = 512 (ground truth),
R = 3516 MB = 3.43 GB
When the batch size is B, reduce:
B x R
Therefore, CUDA memory runs out frequently.
Experiments on GeForce RTX 2080Ti (11019 MiB):
| dtype | M | N | Use | Real | Ideal |
|:----:|:----:|:----:|:----:|:----:|:----:|
| FP32 | 512 | 400000 | 8020 MiB | -- | -- |
| FP16 | 512 | 400000 | 4504 MiB | 3516 MiB | 3516 MiB |
| FP32 | 40 | 400000 | 1540 MiB | -- | -- |
| FP16 | 40 | 400000 | 1264 MiB | 276MiB | 275 MiB |
2) is_aligned is True
area1: N x 1
area2: N x 1
lt: N x 2
rb: N x 2
wh: N x 2
overlap: N x 1
union: N x 1
ious: N x 1
Total memory:
S = 11 x N * 4 Byte
When using FP16, we can reduce:
R = 11 x N * 4 / 2 Byte
So do the 'giou' (large than 'iou').
Time-wise, FP16 is generally faster than FP32.
When gpu_assign_thr is not -1, it takes more time on cpu
but not reduce memory.
There, we can reduce half the memory and keep the speed.
If ``is_aligned `` is ``False``, then calculate the overlaps between each
bbox of bboxes1 and bboxes2, otherwise the overlaps between each aligned
siam_pair of bboxes1 and bboxes2.
Args:
bboxes1 (Tensor): shape (B, m, 4) in <x1, y1, x2, y2> format or empty.
bboxes2 (Tensor): shape (B, n, 4) in <x1, y1, x2, y2> format or empty.
B indicates the batch dim, in shape (B1, B2, ..., Bn).
If ``is_aligned `` is ``True``, then m and n must be equal.
mode (str): "iou" (intersection over union), "iof" (intersection over
foreground) or "giou" (generalized intersection over union).
Default "iou".
is_aligned (bool, optional): If True, then m and n must be equal.
Default False.
eps (float, optional): A value added to the denominator for numerical
stability. Default 1e-6.
Returns:
Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,)
Example:
>>> bboxes1 = torch.FloatTensor([
>>> [0, 0, 10, 10],
>>> [10, 10, 20, 20],
>>> [32, 32, 38, 42],
>>> ])
>>> bboxes2 = torch.FloatTensor([
>>> [0, 0, 10, 20],
>>> [0, 10, 10, 19],
>>> [10, 10, 20, 20],
>>> ])
>>> overlaps = bbox_overlaps(bboxes1, bboxes2)
>>> assert overlaps.shape == (3, 3)
>>> overlaps = bbox_overlaps(bboxes1, bboxes2, is_aligned=True)
>>> assert overlaps.shape == (3, )
Example:
>>> empty = torch.empty(0, 4)
>>> nonempty = torch.FloatTensor([[0, 0, 10, 9]])
>>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1)
>>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0)
>>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0)
"""
assert mode in ['iou', 'iof', 'giou'], f'Unsupported mode {mode}'
assert bboxes1.size(-1) == 4 or bboxes1.size(0) == 0
assert bboxes2.size(-1) == 4 or bboxes2.size(0) == 0
assert bboxes1.shape[:-2] == bboxes2.shape[:-2]
batch_shape = bboxes1.shape[:-2]
rows = bboxes1.size(-2)
cols = bboxes2.size(-2)
if is_aligned:
assert rows == cols
if rows * cols == 0:
if is_aligned:
return bboxes1.new(batch_shape + (rows,))
else:
return bboxes1.new(batch_shape + (rows, cols))
area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * (bboxes1[..., 3] -
bboxes1[..., 1])
area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * (bboxes2[..., 3] -
bboxes2[..., 1])
if is_aligned:
lt = torch.max(bboxes1[..., :2], bboxes2[..., :2])
rb = torch.min(bboxes1[..., 2:], bboxes2[..., 2:])
wh = fp16_clamp(rb - lt, min=0)
overlap = wh[..., 0] * wh[..., 1]
if mode in ['iou', 'giou']:
union = area1 + area2 - overlap
else:
union = area1
if mode == 'giou':
enclosed_lt = torch.min(bboxes1[..., :2], bboxes2[..., :2])
enclosed_rb = torch.max(bboxes1[..., 2:], bboxes2[..., 2:])
else:
lt = torch.max(bboxes1[..., :, None, :2], bboxes2[..., None, :, :2])
rb = torch.min(bboxes1[..., :, None, 2:], bboxes2[..., None, :, 2:])
wh = fp16_clamp(rb - lt, min=0)
overlap = wh[..., 0] * wh[..., 1]
if mode in ['iou', 'giou']:
union = area1[..., None] + area2[..., None, :] - overlap
else:
union = area1[..., None]
if mode == 'giou':
enclosed_lt = torch.min(bboxes1[..., :, None, :2], bboxes2[...,
None, :, :2])
enclosed_rb = torch.max(bboxes1[..., :, None, 2:], bboxes2[...,
None, :, 2:])
eps = union.new_tensor([eps])
union = torch.max(union, eps)
ious = overlap / union
if mode in ['iou', 'iof']:
return ious
enclose_wh = fp16_clamp(enclosed_rb - enclosed_lt, min=0)
enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1]
enclose_area = torch.max(enclose_area, eps)
gious = ious - (enclose_area - union) / enclose_area
return gious
def iou_loss(pred, target, linear=False, eps=1e-06):
"""IoU loss.
Computing the IoU loss between a set of predicted bboxes and target bboxes.
The loss is calculated as negative log of IoU.
Args:
pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (torch.Tensor): Corresponding gt bboxes, shape (n, 4).
linear (bool, optional): If True, use linear scale of loss instead of
log scale. Default: False.
eps (float): Eps to avoid log(0).
Return:
torch.Tensor: Loss tensor.
"""
ious = bbox_overlaps(pred, target, is_aligned=True).clamp(min=eps)
if linear:
loss = 1 - ious
else:
loss = -ious.log()
return loss
class IoULossNew(nn.Module):
"""IoULoss.
Computing the IoU loss between a set of predicted bboxes and target bboxes.
Args:
linear (bool): If True, use linear scale of loss instead of log scale.
Default: False.
eps (float): Eps to avoid log(0).
reduction (str): Options are "none", "mean" and "sum".
loss_weight (float): Weight of loss.
"""
def __init__(self, linear=False, eps=1e-06):
super(IoULossNew, self).__init__()
self.linear = linear
self.eps = eps
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
zhangzhengde0225/SwinTrack
|
IoULoss
| false
| 16,797
|
[
"MIT"
] | 143
|
526be17f8ef266cb924c6939bd8dda23e9b73249
|
https://github.com/zhangzhengde0225/SwinTrack/tree/526be17f8ef266cb924c6939bd8dda23e9b73249
|
DIoULoss
|
import torch
import torch.distributed
import torch
import torch.nn as nn
import torch.nn.functional
import torch.utils.data
import torch.optim
import torch.optim.lr_scheduler
def diou(pred, target, eps=1e-07):
lt = torch.max(pred[:, :2], target[:, :2])
rb = torch.min(pred[:, 2:], target[:, 2:])
wh = (rb - lt).clamp(min=0)
overlap = wh[:, 0] * wh[:, 1]
ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1])
ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1])
union = ap + ag - overlap + eps
ious = overlap / union
enclose_x1y1 = torch.min(pred[:, :2], target[:, :2])
enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:])
enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0)
cw = enclose_wh[:, 0]
ch = enclose_wh[:, 1]
c2 = cw ** 2 + ch ** 2 + eps
b1_x1, b1_y1 = pred[:, 0], pred[:, 1]
b1_x2, b1_y2 = pred[:, 2], pred[:, 3]
b2_x1, b2_y1 = target[:, 0], target[:, 1]
b2_x2, b2_y2 = target[:, 2], target[:, 3]
left = (b2_x1 + b2_x2 - (b1_x1 + b1_x2)) ** 2 / 4
right = (b2_y1 + b2_y2 - (b1_y1 + b1_y2)) ** 2 / 4
rho2 = left + right
dious = ious - rho2 / c2
return dious
def diou_loss(pred, target, eps=1e-07):
"""`Implementation of Distance-IoU Loss: Faster and Better
Learning for Bounding Box Regression, https://arxiv.org/abs/1911.08287`_.
Code is modified from https://github.com/Zzh-tju/DIoU.
Args:
pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (Tensor): Corresponding gt bboxes, shape (n, 4).
eps (float): Eps to avoid log(0).
Return:
Tensor: Loss tensor.
"""
dious = diou(pred, target, eps)
loss = 1 - dious
return loss
class DIoULoss(nn.Module):
def __init__(self, eps=1e-06):
super(DIoULoss, self).__init__()
self.eps = eps
def forward(self, pred, target):
return diou_loss(pred, target, self.eps)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.distributed
import torch
import torch.nn as nn
import torch.nn.functional
import torch.utils.data
import torch.optim
import torch.optim.lr_scheduler
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mul_pow_rsub_sub_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp4 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask)
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp10 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask)
tmp12 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp13 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask)
tmp2 = triton_helpers.minimum(tmp0, tmp1)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp2 - tmp5
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp11 = triton_helpers.minimum(tmp9, tmp10)
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp11 - tmp14
tmp16 = triton_helpers.maximum(tmp15, tmp7)
tmp17 = tmp8 * tmp16
tmp18 = tmp0 - tmp3
tmp19 = tmp9 - tmp12
tmp20 = tmp18 * tmp19
tmp21 = tmp1 - tmp4
tmp22 = tmp10 - tmp13
tmp23 = tmp21 * tmp22
tmp24 = tmp20 + tmp23
tmp25 = tmp24 - tmp17
tmp26 = tmp4 + tmp1
tmp27 = tmp3 + tmp0
tmp28 = tmp26 - tmp27
tmp29 = tmp28 * tmp28
tmp30 = 0.25
tmp31 = tmp29 * tmp30
tmp32 = tmp13 + tmp10
tmp33 = tmp12 + tmp9
tmp34 = tmp32 - tmp33
tmp35 = tmp34 * tmp34
tmp36 = tmp35 * tmp30
tmp37 = tmp31 + tmp36
tmp38 = triton_helpers.maximum(tmp0, tmp1)
tmp39 = triton_helpers.minimum(tmp3, tmp4)
tmp40 = tmp38 - tmp39
tmp41 = triton_helpers.maximum(tmp40, tmp7)
tmp42 = tmp41 * tmp41
tmp43 = triton_helpers.maximum(tmp9, tmp10)
tmp44 = triton_helpers.minimum(tmp12, tmp13)
tmp45 = tmp43 - tmp44
tmp46 = triton_helpers.maximum(tmp45, tmp7)
tmp47 = tmp46 * tmp46
tmp48 = tmp42 + tmp47
tmp49 = 1e-06
tmp50 = tmp48 + tmp49
tmp51 = tmp37 / tmp50
tmp52 = tmp25 + tmp49
tmp53 = tmp17 / tmp52
tmp54 = tmp53 - tmp51
tmp55 = 1.0
tmp56 = tmp55 - tmp54
tl.store(in_out_ptr0 + x2, tmp56, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf3 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_add_div_mul_pow_rsub_sub_0[grid(64)](buf3, arg0_1,
arg1_1, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
return buf3,
def diou(pred, target, eps=1e-07):
lt = torch.max(pred[:, :2], target[:, :2])
rb = torch.min(pred[:, 2:], target[:, 2:])
wh = (rb - lt).clamp(min=0)
overlap = wh[:, 0] * wh[:, 1]
ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1])
ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1])
union = ap + ag - overlap + eps
ious = overlap / union
enclose_x1y1 = torch.min(pred[:, :2], target[:, :2])
enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:])
enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0)
cw = enclose_wh[:, 0]
ch = enclose_wh[:, 1]
c2 = cw ** 2 + ch ** 2 + eps
b1_x1, b1_y1 = pred[:, 0], pred[:, 1]
b1_x2, b1_y2 = pred[:, 2], pred[:, 3]
b2_x1, b2_y1 = target[:, 0], target[:, 1]
b2_x2, b2_y2 = target[:, 2], target[:, 3]
left = (b2_x1 + b2_x2 - (b1_x1 + b1_x2)) ** 2 / 4
right = (b2_y1 + b2_y2 - (b1_y1 + b1_y2)) ** 2 / 4
rho2 = left + right
dious = ious - rho2 / c2
return dious
def diou_loss(pred, target, eps=1e-07):
"""`Implementation of Distance-IoU Loss: Faster and Better
Learning for Bounding Box Regression, https://arxiv.org/abs/1911.08287`_.
Code is modified from https://github.com/Zzh-tju/DIoU.
Args:
pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (Tensor): Corresponding gt bboxes, shape (n, 4).
eps (float): Eps to avoid log(0).
Return:
Tensor: Loss tensor.
"""
dious = diou(pred, target, eps)
loss = 1 - dious
return loss
class DIoULossNew(nn.Module):
def __init__(self, eps=1e-06):
super(DIoULossNew, self).__init__()
self.eps = eps
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
zhangzhengde0225/SwinTrack
|
DIoULoss
| false
| 16,798
|
[
"MIT"
] | 143
|
526be17f8ef266cb924c6939bd8dda23e9b73249
|
https://github.com/zhangzhengde0225/SwinTrack/tree/526be17f8ef266cb924c6939bd8dda23e9b73249
|
VarifocalLoss
|
import torch
import torch.distributed
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.functional
import torch.utils.data
import torch.optim
import torch.optim.lr_scheduler
def varifocal_loss(pred, target, alpha=0.75, gamma=2.0, iou_weighted=True,
use_sigmoid=True):
"""`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the
number of classes
target (torch.Tensor): The learning target of the iou-aware
classification score with shape (N, C), C is the number of classes.
alpha (float, optional): A balance factor for the negative part of
Varifocal Loss, which is different from the alpha of Focal Loss.
Defaults to 0.75.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
iou_weighted (bool, optional): Whether to weight the loss of the
positive example with the iou target. Defaults to True.
use_sigmoid (bool, optional): Whether the prediction is
used for sigmoid or softmax. Defaults to True.
"""
assert pred.size() == target.size()
if use_sigmoid:
pred_sigmoid = pred.sigmoid()
else:
pred_sigmoid = pred
target = target.type_as(pred)
if iou_weighted:
focal_weight = target * (target > 0.0).float() + alpha * (pred_sigmoid
- target).abs().pow(gamma) * (target <= 0.0).float()
else:
focal_weight = (target > 0.0).float() + alpha * (pred_sigmoid - target
).abs().pow(gamma) * (target <= 0.0).float()
loss = F.binary_cross_entropy(pred_sigmoid, target, reduction='none'
) * focal_weight
return loss
class VarifocalLoss(nn.Module):
def __init__(self, use_sigmoid=True, alpha=0.75, gamma=2.0,
iou_weighted=True):
"""`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_
Args:
use_sigmoid (bool, optional): Whether the prediction is
used for sigmoid or softmax. Defaults to True.
alpha (float, optional): A balance factor for the negative part of
Varifocal Loss, which is different from the alpha of Focal
Loss. Defaults to 0.75.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
iou_weighted (bool, optional): Whether to weight the loss of the
positive examples with the iou target. Defaults to True.
"""
super(VarifocalLoss, self).__init__()
assert alpha >= 0.0
self.use_sigmoid = use_sigmoid
self.alpha = alpha
self.gamma = gamma
self.iou_weighted = iou_weighted
def forward(self, pred, target):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
Returns:
torch.Tensor: The calculated loss
"""
return varifocal_loss(pred, target, alpha=self.alpha, gamma=self.
gamma, iou_weighted=self.iou_weighted, use_sigmoid=self.use_sigmoid
)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.distributed
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.functional
import torch.utils.data
import torch.optim
import torch.optim.lr_scheduler
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy_abs_add_binary_cross_entropy_gt_le_mul_pow_sigmoid_sub_0(
in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp3 = tl.load(in_ptr1 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 - tmp1
tmp4 = tl.sigmoid(tmp3)
tmp5 = -tmp4
tmp6 = libdevice.log1p(tmp5)
tmp7 = -100.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp2 * tmp8
tmp10 = tl_math.log(tmp4)
tmp11 = triton_helpers.maximum(tmp10, tmp7)
tmp12 = tmp0 * tmp11
tmp13 = tmp9 - tmp12
tmp14 = 0.0
tmp15 = tmp0 > tmp14
tmp16 = tmp15.to(tl.float32)
tmp17 = tmp0 * tmp16
tmp18 = tmp4 - tmp0
tmp19 = tl_math.abs(tmp18)
tmp20 = tmp19 * tmp19
tmp21 = 0.75
tmp22 = tmp20 * tmp21
tmp23 = tmp0 <= tmp14
tmp24 = tmp23.to(tl.float32)
tmp25 = tmp22 * tmp24
tmp26 = tmp17 + tmp25
tmp27 = tmp13 * tmp26
tl.store(out_ptr0 + x0, tmp27, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__to_copy_abs_add_binary_cross_entropy_gt_le_mul_pow_sigmoid_sub_0[
grid(256)](arg1_1, arg0_1, buf0, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del arg0_1
del arg1_1
return buf0,
def varifocal_loss(pred, target, alpha=0.75, gamma=2.0, iou_weighted=True,
use_sigmoid=True):
"""`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the
number of classes
target (torch.Tensor): The learning target of the iou-aware
classification score with shape (N, C), C is the number of classes.
alpha (float, optional): A balance factor for the negative part of
Varifocal Loss, which is different from the alpha of Focal Loss.
Defaults to 0.75.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
iou_weighted (bool, optional): Whether to weight the loss of the
positive example with the iou target. Defaults to True.
use_sigmoid (bool, optional): Whether the prediction is
used for sigmoid or softmax. Defaults to True.
"""
assert pred.size() == target.size()
if use_sigmoid:
pred_sigmoid = pred.sigmoid()
else:
pred_sigmoid = pred
target = target.type_as(pred)
if iou_weighted:
focal_weight = target * (target > 0.0).float() + alpha * (pred_sigmoid
- target).abs().pow(gamma) * (target <= 0.0).float()
else:
focal_weight = (target > 0.0).float() + alpha * (pred_sigmoid - target
).abs().pow(gamma) * (target <= 0.0).float()
loss = F.binary_cross_entropy(pred_sigmoid, target, reduction='none'
) * focal_weight
return loss
class VarifocalLossNew(nn.Module):
def __init__(self, use_sigmoid=True, alpha=0.75, gamma=2.0,
iou_weighted=True):
"""`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_
Args:
use_sigmoid (bool, optional): Whether the prediction is
used for sigmoid or softmax. Defaults to True.
alpha (float, optional): A balance factor for the negative part of
Varifocal Loss, which is different from the alpha of Focal
Loss. Defaults to 0.75.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
iou_weighted (bool, optional): Whether to weight the loss of the
positive examples with the iou target. Defaults to True.
"""
super(VarifocalLossNew, self).__init__()
assert alpha >= 0.0
self.use_sigmoid = use_sigmoid
self.alpha = alpha
self.gamma = gamma
self.iou_weighted = iou_weighted
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
zhangzhengde0225/SwinTrack
|
VarifocalLoss
| false
| 16,799
|
[
"MIT"
] | 143
|
526be17f8ef266cb924c6939bd8dda23e9b73249
|
https://github.com/zhangzhengde0225/SwinTrack/tree/526be17f8ef266cb924c6939bd8dda23e9b73249
|
CXLoss
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.utils.data
class CXLoss(nn.Module):
def __init__(self, sigma=0.1, b=1.0, similarity='consine'):
super(CXLoss, self).__init__()
self.similarity = similarity
self.sigma = sigma
self.b = b
def center_by_T(self, featureI, featureT):
meanT = featureT.mean(0, keepdim=True).mean(2, keepdim=True).mean(3,
keepdim=True)
return featureI - meanT, featureT - meanT
def l2_normalize_channelwise(self, features):
norms = features.norm(p=2, dim=1, keepdim=True)
features = features.div(norms)
return features
def patch_decomposition(self, features):
N, C, H, W = features.shape
assert N == 1
P = H * W
patches = features.view(1, 1, C, P).permute((3, 2, 0, 1))
return patches
def calc_relative_distances(self, raw_dist, axis=1):
epsilon = 1e-05
div = torch.min(raw_dist, dim=axis, keepdim=True)[0]
relative_dist = raw_dist / (div + epsilon)
return relative_dist
def calc_CX(self, dist, axis=1):
W = torch.exp((self.b - dist) / self.sigma)
W_sum = W.sum(dim=axis, keepdim=True)
return W.div(W_sum)
def forward(self, featureT, featureI):
"""
:param featureT: target
:param featureI: inference
:return:
"""
featureI, featureT = self.center_by_T(featureI, featureT)
featureI = self.l2_normalize_channelwise(featureI)
featureT = self.l2_normalize_channelwise(featureT)
dist = []
N = featureT.size()[0]
for i in range(N):
featureT_i = featureT[i, :, :, :].unsqueeze(0)
featureI_i = featureI[i, :, :, :].unsqueeze(0)
featureT_patch = self.patch_decomposition(featureT_i)
dist_i = F.conv2d(featureI_i, featureT_patch)
dist.append(dist_i)
dist = torch.cat(dist, dim=0)
raw_dist = (1.0 - dist) / 2.0
relative_dist = self.calc_relative_distances(raw_dist)
CX = self.calc_CX(relative_dist)
CX = CX.max(dim=3)[0].max(dim=2)[0]
CX = CX.mean(1)
CX = -torch.log(CX)
CX = torch.mean(CX)
return CX
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 16 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (128 + x0 + 16 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (192 + x0 + 16 * x1), xmask)
tmp9 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp10 = tl.load(in_ptr0 + (68 + x0 + 16 * x1), xmask)
tmp12 = tl.load(in_ptr0 + (132 + x0 + 16 * x1), xmask)
tmp14 = tl.load(in_ptr0 + (196 + x0 + 16 * x1), xmask)
tmp18 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp19 = tl.load(in_ptr0 + (72 + x0 + 16 * x1), xmask)
tmp21 = tl.load(in_ptr0 + (136 + x0 + 16 * x1), xmask)
tmp23 = tl.load(in_ptr0 + (200 + x0 + 16 * x1), xmask)
tmp27 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp28 = tl.load(in_ptr0 + (76 + x0 + 16 * x1), xmask)
tmp30 = tl.load(in_ptr0 + (140 + x0 + 16 * x1), xmask)
tmp32 = tl.load(in_ptr0 + (204 + x0 + 16 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = tmp15 / tmp7
tmp17 = tmp8 + tmp16
tmp20 = tmp18 + tmp19
tmp22 = tmp20 + tmp21
tmp24 = tmp22 + tmp23
tmp25 = tmp24 / tmp7
tmp26 = tmp17 + tmp25
tmp29 = tmp27 + tmp28
tmp31 = tmp29 + tmp30
tmp33 = tmp31 + tmp32
tmp34 = tmp33 / tmp7
tmp35 = tmp26 + tmp34
tmp36 = tmp35 / tmp7
tl.store(out_ptr0 + x2, tmp36, xmask)
@triton.jit
def triton_poi_fused_mean_sub_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr2 + x3, xmask)
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tmp12 = tmp11 - tmp9
tl.store(out_ptr0 + x3, tmp10, xmask)
tl.store(out_ptr1 + x3, tmp12, xmask)
@triton.jit
def triton_poi_fused_div_linalg_vector_norm_2(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tmp0 / tmp12
tl.store(out_ptr0 + x3, tmp13, xmask)
@triton.jit
def triton_poi_fused_convolution_3(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (x1 + 16 * y0), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 4 * x1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_4(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x1), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_5(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (64 + x1 + 16 * y0), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + 4 * x1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_6(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (64 + y0 + 16 * x1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_7(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (128 + x1 + 16 * y0), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + 4 * x1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_8(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (128 + y0 + 16 * x1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_9(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (192 + x1 + 16 * y0), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + 4 * x1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_10(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (192 + y0 + 16 * x1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_per_fused_add_cat_div_exp_min_rsub_sum_11(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
x1 = xindex // 16
r2 = rindex
x0 = xindex % 16
x3 = xindex
tmp0 = x1
tl.full([1, 1], 0, tl.int64)
tmp3 = tl.full([1, 1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (r2 + 16 * x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1, 1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (r2 + 16 * x0), tmp9 & xmask, eviction_policy
='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1, 1], 3, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + (r2 + 16 * x0), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tl.full([1, 1], 4, tl.int64)
tmp19 = tl.load(in_ptr3 + (r2 + 16 * x0), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tmp23 = 1.0
tmp24 = tmp23 - tmp22
tmp25 = 0.5
tmp26 = tmp24 * tmp25
tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK])
tmp29 = tl.where(xmask, tmp27, float('inf'))
tmp30 = triton_helpers.min2(tmp29, 1)[:, None]
tmp31 = 1e-05
tmp32 = tmp30 + tmp31
tmp33 = tmp26 / tmp32
tmp34 = tmp23 - tmp33
tmp35 = 10.0
tmp36 = tmp34 * tmp35
tmp37 = tl_math.exp(tmp36)
tmp38 = tl.broadcast_to(tmp37, [XBLOCK, RBLOCK])
tmp40 = tl.where(xmask, tmp38, 0)
tmp41 = tl.sum(tmp40, 1)[:, None]
tl.store(out_ptr0 + (r2 + 16 * x3), tmp26, xmask)
tl.store(out_ptr1 + x3, tmp30, xmask)
tl.store(out_ptr2 + x3, tmp41, xmask)
@triton.jit
def triton_poi_fused_add_div_exp_max_rsub_12(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + 4 * x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp13 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr2 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp23 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp29 = tl.load(in_ptr2 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp32 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp33 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp39 = tl.load(in_ptr2 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp2 = 1e-05
tmp3 = tmp1 + tmp2
tmp4 = tmp0 / tmp3
tmp5 = 1.0
tmp6 = tmp5 - tmp4
tmp7 = 10.0
tmp8 = tmp6 * tmp7
tmp9 = tl_math.exp(tmp8)
tmp11 = tmp9 / tmp10
tmp14 = tmp13 + tmp2
tmp15 = tmp12 / tmp14
tmp16 = tmp5 - tmp15
tmp17 = tmp16 * tmp7
tmp18 = tl_math.exp(tmp17)
tmp20 = tmp18 / tmp19
tmp21 = triton_helpers.maximum(tmp11, tmp20)
tmp24 = tmp23 + tmp2
tmp25 = tmp22 / tmp24
tmp26 = tmp5 - tmp25
tmp27 = tmp26 * tmp7
tmp28 = tl_math.exp(tmp27)
tmp30 = tmp28 / tmp29
tmp31 = triton_helpers.maximum(tmp21, tmp30)
tmp34 = tmp33 + tmp2
tmp35 = tmp32 / tmp34
tmp36 = tmp5 - tmp35
tmp37 = tmp36 * tmp7
tmp38 = tl_math.exp(tmp37)
tmp40 = tmp38 / tmp39
tmp41 = triton_helpers.maximum(tmp31, tmp40)
tl.store(out_ptr0 + x2, tmp41, xmask)
@triton.jit
def triton_per_fused_max_mean_13(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (16 + r1 + 64 * x0), xmask, other=0.0)
tmp3 = tl.load(in_ptr0 + (32 + r1 + 64 * x0), xmask, other=0.0)
tmp5 = tl.load(in_ptr0 + (48 + r1 + 64 * x0), xmask, other=0.0)
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_per_fused_log_max_mean_neg_14(in_out_ptr0, in_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = 16.0
tmp2 = tmp0 / tmp1
tmp3 = tl_math.log(tmp2)
tmp4 = -tmp3
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp7 = tl.sum(tmp5, 1)[:, None]
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp9, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 4, 1, 4), (16, 4, 16, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mean_sub_1[grid(256)](arg1_1, buf0, arg0_1, buf1,
buf3, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_div_linalg_vector_norm_2[grid(256)](buf1, buf2,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf4 = buf1
del buf1
triton_poi_fused_div_linalg_vector_norm_2[grid(256)](buf3, buf4,
256, XBLOCK=256, num_warps=4, num_stages=1)
del buf3
buf5 = empty_strided_cuda((1, 4, 4, 4), (64, 1, 16, 4), torch.float32)
triton_poi_fused_convolution_3[grid(4, 16)](buf2, buf5, 4, 16,
XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((16, 4, 1, 1), (4, 1, 4, 4), torch.float32)
triton_poi_fused_convolution_4[grid(16, 4)](buf4, buf6, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf7 = extern_kernels.convolution(buf5, buf6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (1, 16, 4, 4), (256, 1, 64, 16))
buf8 = reinterpret_tensor(buf6, (1, 4, 4, 4), (64, 1, 16, 4), 0)
del buf6
triton_poi_fused_convolution_5[grid(4, 16)](buf2, buf8, 4, 16,
XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf5, (16, 4, 1, 1), (4, 1, 4, 4), 0)
del buf5
triton_poi_fused_convolution_6[grid(16, 4)](buf4, buf9, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf10 = extern_kernels.convolution(buf8, buf9, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (1, 16, 4, 4), (256, 1, 64, 16))
buf11 = reinterpret_tensor(buf9, (1, 4, 4, 4), (64, 1, 16, 4), 0)
del buf9
triton_poi_fused_convolution_7[grid(4, 16)](buf2, buf11, 4, 16,
XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1)
buf12 = reinterpret_tensor(buf8, (16, 4, 1, 1), (4, 1, 4, 4), 0)
del buf8
triton_poi_fused_convolution_8[grid(16, 4)](buf4, buf12, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf13 = extern_kernels.convolution(buf11, buf12, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (1, 16, 4, 4), (256, 1, 64, 16))
buf14 = reinterpret_tensor(buf12, (1, 4, 4, 4), (64, 1, 16, 4), 0)
del buf12
triton_poi_fused_convolution_9[grid(4, 16)](buf2, buf14, 4, 16,
XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1)
del buf2
buf15 = reinterpret_tensor(buf11, (16, 4, 1, 1), (4, 1, 4, 4), 0)
del buf11
triton_poi_fused_convolution_10[grid(16, 4)](buf4, buf15, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
del buf4
buf16 = extern_kernels.convolution(buf14, buf15, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (1, 16, 4, 4), (256, 1, 64, 16))
buf17 = empty_strided_cuda((4, 16, 4, 4), (256, 1, 64, 16), torch.
float32)
buf18 = reinterpret_tensor(buf15, (4, 1, 4, 4), (16, 64, 4, 1), 0)
del buf15
buf20 = reinterpret_tensor(buf14, (4, 1, 4, 4), (16, 64, 4, 1), 0)
del buf14
triton_per_fused_add_cat_div_exp_min_rsub_sum_11[grid(64)](buf7,
buf10, buf13, buf16, buf17, buf18, buf20, 64, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del buf10
del buf13
del buf16
buf21 = reinterpret_tensor(buf7, (4, 16, 4), (64, 1, 16), 0)
del buf7
triton_poi_fused_add_div_exp_max_rsub_12[grid(256)](buf17, buf18,
buf20, buf21, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf17
del buf18
del buf20
buf22 = empty_strided_cuda((4,), (1,), torch.float32)
triton_per_fused_max_mean_13[grid(4)](buf21, buf22, 4, 16, XBLOCK=1,
num_warps=2, num_stages=1)
del buf21
buf23 = empty_strided_cuda((), (), torch.float32)
buf24 = buf23
del buf23
triton_per_fused_log_max_mean_neg_14[grid(1)](buf24, buf22, 1, 4,
XBLOCK=1, num_warps=2, num_stages=1)
del buf22
return buf24,
class CXLossNew(nn.Module):
def __init__(self, sigma=0.1, b=1.0, similarity='consine'):
super(CXLossNew, self).__init__()
self.similarity = similarity
self.sigma = sigma
self.b = b
def center_by_T(self, featureI, featureT):
meanT = featureT.mean(0, keepdim=True).mean(2, keepdim=True).mean(3,
keepdim=True)
return featureI - meanT, featureT - meanT
def l2_normalize_channelwise(self, features):
norms = features.norm(p=2, dim=1, keepdim=True)
features = features.div(norms)
return features
def patch_decomposition(self, features):
N, C, H, W = features.shape
assert N == 1
P = H * W
patches = features.view(1, 1, C, P).permute((3, 2, 0, 1))
return patches
def calc_relative_distances(self, raw_dist, axis=1):
epsilon = 1e-05
div = torch.min(raw_dist, dim=axis, keepdim=True)[0]
relative_dist = raw_dist / (div + epsilon)
return relative_dist
def calc_CX(self, dist, axis=1):
W = torch.exp((self.b - dist) / self.sigma)
W_sum = W.sum(dim=axis, keepdim=True)
return W.div(W_sum)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
yizhiwang96/deepvecfont
|
CXLoss
| false
| 16,800
|
[
"MIT"
] | 68
|
3ba4adb0406f16a6f387c5e12dd12286c9c341e8
|
https://github.com/yizhiwang96/deepvecfont/tree/3ba4adb0406f16a6f387c5e12dd12286c9c341e8
|
MaskedMHCA
|
import math
import torch
import torch.nn as nn
import torch.utils.data
from torch.nn import functional as F
class LayerNorm(nn.Module):
"""
LayerNorm that supports inputs of size B, C, T
"""
def __init__(self, num_channels, eps=1e-05, affine=True, device=None,
dtype=None):
super().__init__()
factory_kwargs = {'device': device, 'dtype': dtype}
self.num_channels = num_channels
self.eps = eps
self.affine = affine
if self.affine:
self.weight = nn.Parameter(torch.ones([1, num_channels, 1], **
factory_kwargs))
self.bias = nn.Parameter(torch.zeros([1, num_channels, 1], **
factory_kwargs))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
def forward(self, x):
assert x.dim() == 3
assert x.shape[1] == self.num_channels
mu = torch.mean(x, dim=1, keepdim=True)
res_x = x - mu
sigma = torch.mean(res_x ** 2, dim=1, keepdim=True)
out = res_x / torch.sqrt(sigma + self.eps)
if self.affine:
out *= self.weight
out += self.bias
return out
class MaskedConv1D(nn.Module):
"""
Masked 1D convolution. Interface remains the same as Conv1d.
Only support a sub set of 1d convs
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros'):
super().__init__()
assert kernel_size % 2 == 1 and kernel_size // 2 == padding
self.stride = stride
self.conv = nn.Conv1d(in_channels, out_channels, kernel_size,
stride, padding, dilation, groups, bias, padding_mode)
if bias:
torch.nn.init.constant_(self.conv.bias, 0.0)
def forward(self, x, mask):
_B, _C, T = x.size()
assert T % self.stride == 0
out_conv = self.conv(x)
if self.stride > 1:
out_mask = F.interpolate(mask.float(), size=T // self.stride,
mode='nearest')
else:
out_mask = mask.float()
out_conv = out_conv * out_mask.detach()
out_mask = out_mask.bool()
return out_conv, out_mask
class MaskedMHCA(nn.Module):
"""
Multi Head Conv Attention with mask
Add a depthwise convolution within a standard MHA
The extra conv op can be used to
(1) encode relative position information (relacing position encoding);
(2) downsample the features if needed;
(3) match the feature channels
Note: With current implementation, the downsampled feature will be aligned
to every s+1 time step, where s is the downsampling stride. This allows us
to easily interpolate the corresponding positional embeddings.
Modified from https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
def __init__(self, n_embd, n_head, n_qx_stride=1, n_kv_stride=1,
attn_pdrop=0.0, proj_pdrop=0.0):
super().__init__()
assert n_embd % n_head == 0
self.n_embd = n_embd
self.n_head = n_head
self.n_channels = n_embd // n_head
self.scale = 1.0 / math.sqrt(self.n_channels)
assert n_qx_stride == 1 or n_qx_stride % 2 == 0
assert n_kv_stride == 1 or n_kv_stride % 2 == 0
self.n_qx_stride = n_qx_stride
self.n_kv_stride = n_kv_stride
kernel_size = self.n_qx_stride + 1 if self.n_qx_stride > 1 else 3
stride, padding = self.n_kv_stride, kernel_size // 2
self.query_conv = MaskedConv1D(self.n_embd, self.n_embd,
kernel_size, stride=stride, padding=padding, groups=self.n_embd,
bias=False)
self.query_norm = LayerNorm(self.n_embd)
kernel_size = self.n_kv_stride + 1 if self.n_kv_stride > 1 else 3
stride, padding = self.n_kv_stride, kernel_size // 2
self.key_conv = MaskedConv1D(self.n_embd, self.n_embd, kernel_size,
stride=stride, padding=padding, groups=self.n_embd, bias=False)
self.key_norm = LayerNorm(self.n_embd)
self.value_conv = MaskedConv1D(self.n_embd, self.n_embd,
kernel_size, stride=stride, padding=padding, groups=self.n_embd,
bias=False)
self.value_norm = LayerNorm(self.n_embd)
self.key = nn.Conv1d(self.n_embd, self.n_embd, 1)
self.query = nn.Conv1d(self.n_embd, self.n_embd, 1)
self.value = nn.Conv1d(self.n_embd, self.n_embd, 1)
self.attn_drop = nn.Dropout(attn_pdrop)
self.proj_drop = nn.Dropout(proj_pdrop)
self.proj = nn.Conv1d(self.n_embd, self.n_embd, 1)
def forward(self, x, mask):
B, C, _T = x.size()
q, qx_mask = self.query_conv(x, mask)
q = self.query_norm(q)
k, kv_mask = self.key_conv(x, mask)
k = self.key_norm(k)
v, _ = self.value_conv(x, mask)
v = self.value_norm(v)
q = self.query(q)
k = self.key(k)
v = self.value(v)
k = k.view(B, self.n_head, self.n_channels, -1).transpose(2, 3)
q = q.view(B, self.n_head, self.n_channels, -1).transpose(2, 3)
v = v.view(B, self.n_head, self.n_channels, -1).transpose(2, 3)
att = q * self.scale @ k.transpose(-2, -1)
att = att.masked_fill(torch.logical_not(kv_mask[:, :, None, :]),
float('-inf'))
att = F.softmax(att, dim=-1)
att = self.attn_drop(att)
out = att @ (v * kv_mask[:, :, :, None].float())
out = out.transpose(2, 3).contiguous().view(B, C, -1)
out = self.proj_drop(self.proj(out)) * qx_mask.float()
return out, qx_mask
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'n_embd': 4, 'n_head': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.nn as nn
import torch.utils.data
from torch.nn import functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mean_mul_pow_sub_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 16 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp4 = tl.load(in_ptr1 + (4 + x0 + 16 * x1), xmask)
tmp7 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp8 = tl.load(in_ptr1 + (8 + x0 + 16 * x1), xmask)
tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp12 = tl.load(in_ptr1 + (12 + x0 + 16 * x1), xmask)
tmp29 = tl.load(in_ptr2 + (x0 + 16 * x1), xmask)
tmp31 = tl.load(in_ptr2 + (4 + x0 + 16 * x1), xmask)
tmp34 = tl.load(in_ptr2 + (8 + x0 + 16 * x1), xmask)
tmp37 = tl.load(in_ptr2 + (12 + x0 + 16 * x1), xmask)
tmp53 = tl.load(in_ptr3 + (x0 + 16 * x1), xmask)
tmp55 = tl.load(in_ptr3 + (4 + x0 + 16 * x1), xmask)
tmp58 = tl.load(in_ptr3 + (8 + x0 + 16 * x1), xmask)
tmp61 = tl.load(in_ptr3 + (12 + x0 + 16 * x1), xmask)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tmp30 = tmp29 * tmp1
tmp32 = tmp31 * tmp4
tmp33 = tmp30 + tmp32
tmp35 = tmp34 * tmp8
tmp36 = tmp33 + tmp35
tmp38 = tmp37 * tmp12
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp15
tmp41 = tmp30 - tmp40
tmp42 = tmp41 * tmp41
tmp43 = tmp32 - tmp40
tmp44 = tmp43 * tmp43
tmp45 = tmp42 + tmp44
tmp46 = tmp35 - tmp40
tmp47 = tmp46 * tmp46
tmp48 = tmp45 + tmp47
tmp49 = tmp38 - tmp40
tmp50 = tmp49 * tmp49
tmp51 = tmp48 + tmp50
tmp52 = tmp51 / tmp15
tmp54 = tmp53 * tmp1
tmp56 = tmp55 * tmp4
tmp57 = tmp54 + tmp56
tmp59 = tmp58 * tmp8
tmp60 = tmp57 + tmp59
tmp62 = tmp61 * tmp12
tmp63 = tmp60 + tmp62
tmp64 = tmp63 / tmp15
tmp65 = tmp54 - tmp64
tmp66 = tmp65 * tmp65
tmp67 = tmp56 - tmp64
tmp68 = tmp67 * tmp67
tmp69 = tmp66 + tmp68
tmp70 = tmp59 - tmp64
tmp71 = tmp70 * tmp70
tmp72 = tmp69 + tmp71
tmp73 = tmp62 - tmp64
tmp74 = tmp73 * tmp73
tmp75 = tmp72 + tmp74
tmp76 = tmp75 / tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
tl.store(out_ptr1 + x2, tmp28, xmask)
tl.store(out_ptr2 + x2, tmp40, xmask)
tl.store(out_ptr3 + x2, tmp52, xmask)
tl.store(out_ptr4 + x2, tmp64, xmask)
tl.store(out_ptr5 + x2, tmp76, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_div_mean_mul_sqrt_sub_1(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9,
in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, out_ptr0,
out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
x1 = xindex % 4
x3 = xindex // 16
x2 = xindex // 4 % 4
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp4 = tl.load(in_ptr2 + (x1 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp6 = tl.load(in_ptr3 + (x1 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr5 + x2, xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr6 + x0, xmask)
tmp17 = tl.load(in_ptr7 + (x1 + 4 * x3), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr8 + (x1 + 4 * x3), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr9 + x2, xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr10 + x2, xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr11 + x0, xmask)
tmp29 = tl.load(in_ptr12 + (x1 + 4 * x3), xmask, eviction_policy=
'evict_last')
tmp31 = tl.load(in_ptr13 + (x1 + 4 * x3), xmask, eviction_policy=
'evict_last')
tmp35 = tl.load(in_ptr14 + x2, xmask, eviction_policy='evict_last')
tmp37 = tl.load(in_ptr15 + x2, xmask, eviction_policy='evict_last')
tmp1 = tmp0 != 0
tmp3 = tmp2 * tmp0
tmp5 = tmp3 - tmp4
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tmp10 = tmp5 / tmp9
tmp12 = tmp10 * tmp11
tmp14 = tmp12 + tmp13
tmp16 = tmp15 * tmp0
tmp18 = tmp16 - tmp17
tmp20 = tmp19 + tmp7
tmp21 = libdevice.sqrt(tmp20)
tmp22 = tmp18 / tmp21
tmp24 = tmp22 * tmp23
tmp26 = tmp24 + tmp25
tmp28 = tmp27 * tmp0
tmp30 = tmp28 - tmp29
tmp32 = tmp31 + tmp7
tmp33 = libdevice.sqrt(tmp32)
tmp34 = tmp30 / tmp33
tmp36 = tmp34 * tmp35
tmp38 = tmp36 + tmp37
tl.store(out_ptr0 + x0, tmp1, xmask)
tl.store(out_ptr1 + x0, tmp14, xmask)
tl.store(out_ptr2 + x0, tmp26, xmask)
tl.store(out_ptr3 + x0, tmp38, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_logical_not_masked_fill_4(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp1 = tmp0 != 0
tmp2 = tmp1 == 0
tmp4 = float('-inf')
tmp5 = tl.where(tmp2, tmp4, tmp3)
tmp7 = tmp6 != 0
tmp8 = tmp7 == 0
tmp10 = tl.where(tmp8, tmp4, tmp9)
tmp11 = triton_helpers.maximum(tmp5, tmp10)
tmp13 = tmp12 != 0
tmp14 = tmp13 == 0
tmp16 = tl.where(tmp14, tmp4, tmp15)
tmp17 = triton_helpers.maximum(tmp11, tmp16)
tmp19 = tmp18 != 0
tmp20 = tmp19 == 0
tmp22 = tl.where(tmp20, tmp4, tmp21)
tmp23 = triton_helpers.maximum(tmp17, tmp22)
tmp24 = tmp5 - tmp23
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp10 - tmp23
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tmp16 - tmp23
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp28 + tmp30
tmp32 = tmp22 - tmp23
tmp33 = tl_math.exp(tmp32)
tmp34 = tmp31 + tmp33
tl.store(out_ptr0 + x2, tmp23, xmask)
tl.store(out_ptr1 + x2, tmp34, xmask)
@triton.jit
def triton_poi_fused__softmax_logical_not_masked_fill_5(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex
x4 = xindex // 4
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_out_ptr0 + x3, xmask)
tmp6 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp1 = tmp0 != 0
tmp2 = tmp1 == 0
tmp4 = float('-inf')
tmp5 = tl.where(tmp2, tmp4, tmp3)
tmp7 = tmp5 - tmp6
tmp8 = tl_math.exp(tmp7)
tmp10 = tmp8 / tmp9
tl.store(in_out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused__to_copy_mul_6(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp3 != 0
tmp5 = tmp4.to(tl.float32)
tmp6 = tmp2 * tmp5
tl.store(in_out_ptr0 + x3, tmp6, xmask)
@triton.jit
def triton_poi_fused__to_copy_convolution_mul_7(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, xmask).to(tl.int1)
tmp2 = tmp0 + tmp1
tmp4 = tmp3.to(tl.float32)
tmp5 = tmp2 * tmp4
tl.store(in_out_ptr0 + x3, tmp5, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 1, 3), (3, 3, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (1, 4, 1), (4, 1, 1))
assert_size_stride(primals_5, (1, 4, 1), (4, 1, 1))
assert_size_stride(primals_6, (4, 1, 3), (3, 3, 1))
assert_size_stride(primals_7, (1, 4, 1), (4, 1, 1))
assert_size_stride(primals_8, (1, 4, 1), (4, 1, 1))
assert_size_stride(primals_9, (4, 1, 3), (3, 3, 1))
assert_size_stride(primals_10, (1, 4, 1), (4, 1, 1))
assert_size_stride(primals_11, (1, 4, 1), (4, 1, 1))
assert_size_stride(primals_12, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_15, (4,), (1,))
assert_size_stride(primals_16, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_17, (4,), (1,))
assert_size_stride(primals_18, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_19, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,),
padding=(1,), dilation=(1,), transposed=False, output_padding=(
0,), groups=4, bias=None)
assert_size_stride(buf0, (4, 4, 4), (16, 4, 1))
buf5 = extern_kernels.convolution(primals_1, primals_6, stride=(1,),
padding=(1,), dilation=(1,), transposed=False, output_padding=(
0,), groups=4, bias=None)
assert_size_stride(buf5, (4, 4, 4), (16, 4, 1))
buf9 = extern_kernels.convolution(primals_1, primals_9, stride=(1,),
padding=(1,), dilation=(1,), transposed=False, output_padding=(
0,), groups=4, bias=None)
assert_size_stride(buf9, (4, 4, 4), (16, 4, 1))
buf2 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf3 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf6 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf7 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf10 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf11 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_mul_pow_sub_0[grid(16)](buf0, primals_3, buf5,
buf9, buf2, buf3, buf6, buf7, buf10, buf11, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__to_copy_add_div_mean_mul_sqrt_sub_1[grid(64)](
primals_3, buf0, buf2, buf3, primals_4, primals_5, buf5, buf6,
buf7, primals_7, primals_8, buf9, buf10, buf11, primals_10,
primals_11, buf1, buf4, buf8, buf12, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf10
del buf11
del buf2
del buf3
del buf6
del buf7
del primals_11
del primals_5
del primals_8
buf13 = extern_kernels.convolution(buf4, primals_12, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf13, (4, 4, 4), (16, 4, 1))
buf14 = extern_kernels.convolution(buf8, primals_14, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf14, (4, 4, 4), (16, 4, 1))
buf15 = extern_kernels.convolution(buf12, primals_16, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf15, (4, 4, 4), (16, 4, 1))
buf16 = reinterpret_tensor(buf13, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf13
triton_poi_fused_mul_2[grid(64)](buf16, primals_13, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_13
buf17 = buf14
del buf14
triton_poi_fused_convolution_3[grid(64)](buf17, primals_15, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_15
buf18 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf16, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf17, (16, 1, 4), (4, 0, 1), 0), out=buf18)
buf19 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf20 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_logical_not_masked_fill_4[grid(64)](primals_3
, buf18, buf19, buf20, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf21 = reinterpret_tensor(buf18, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf18
triton_poi_fused__softmax_logical_not_masked_fill_5[grid(256)](buf21,
primals_3, buf19, buf20, 256, XBLOCK=128, num_warps=4, num_stages=1
)
del buf19
buf22 = reinterpret_tensor(buf15, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf15
triton_poi_fused__to_copy_mul_6[grid(64)](buf22, primals_17,
primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_17
buf23 = reinterpret_tensor(buf20, (16, 4, 1), (4, 1, 1), 0)
del buf20
extern_kernels.bmm(reinterpret_tensor(buf21, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf22, (16, 4, 1), (4, 1, 0), 0), out=buf23)
buf24 = extern_kernels.convolution(reinterpret_tensor(buf23, (4, 4,
4), (16, 4, 1), 0), primals_18, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf24, (4, 4, 4), (16, 4, 1))
buf25 = buf24
del buf24
triton_poi_fused__to_copy_convolution_mul_7[grid(64)](buf25,
primals_19, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_19
return (buf25, buf1, primals_1, primals_2, primals_3, primals_4,
primals_6, primals_7, primals_9, primals_10, primals_12, primals_14,
primals_16, primals_18, buf0, buf1, buf4, buf5, buf8, buf9, buf12,
buf21, reinterpret_tensor(buf23, (4, 4, 4), (16, 4, 1), 0),
reinterpret_tensor(buf22, (16, 1, 4), (4, 1, 1), 0),
reinterpret_tensor(buf16, (16, 1, 4), (4, 1, 1), 0),
reinterpret_tensor(buf17, (16, 4, 1), (4, 1, 4), 0))
class LayerNorm(nn.Module):
"""
LayerNorm that supports inputs of size B, C, T
"""
def __init__(self, num_channels, eps=1e-05, affine=True, device=None,
dtype=None):
super().__init__()
factory_kwargs = {'device': device, 'dtype': dtype}
self.num_channels = num_channels
self.eps = eps
self.affine = affine
if self.affine:
self.weight = nn.Parameter(torch.ones([1, num_channels, 1], **
factory_kwargs))
self.bias = nn.Parameter(torch.zeros([1, num_channels, 1], **
factory_kwargs))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
def forward(self, x):
assert x.dim() == 3
assert x.shape[1] == self.num_channels
mu = torch.mean(x, dim=1, keepdim=True)
res_x = x - mu
sigma = torch.mean(res_x ** 2, dim=1, keepdim=True)
out = res_x / torch.sqrt(sigma + self.eps)
if self.affine:
out *= self.weight
out += self.bias
return out
class MaskedConv1D(nn.Module):
"""
Masked 1D convolution. Interface remains the same as Conv1d.
Only support a sub set of 1d convs
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros'):
super().__init__()
assert kernel_size % 2 == 1 and kernel_size // 2 == padding
self.stride = stride
self.conv = nn.Conv1d(in_channels, out_channels, kernel_size,
stride, padding, dilation, groups, bias, padding_mode)
if bias:
torch.nn.init.constant_(self.conv.bias, 0.0)
def forward(self, x, mask):
_B, _C, T = x.size()
assert T % self.stride == 0
out_conv = self.conv(x)
if self.stride > 1:
out_mask = F.interpolate(mask.float(), size=T // self.stride,
mode='nearest')
else:
out_mask = mask.float()
out_conv = out_conv * out_mask.detach()
out_mask = out_mask.bool()
return out_conv, out_mask
class MaskedMHCANew(nn.Module):
"""
Multi Head Conv Attention with mask
Add a depthwise convolution within a standard MHA
The extra conv op can be used to
(1) encode relative position information (relacing position encoding);
(2) downsample the features if needed;
(3) match the feature channels
Note: With current implementation, the downsampled feature will be aligned
to every s+1 time step, where s is the downsampling stride. This allows us
to easily interpolate the corresponding positional embeddings.
Modified from https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
def __init__(self, n_embd, n_head, n_qx_stride=1, n_kv_stride=1,
attn_pdrop=0.0, proj_pdrop=0.0):
super().__init__()
assert n_embd % n_head == 0
self.n_embd = n_embd
self.n_head = n_head
self.n_channels = n_embd // n_head
self.scale = 1.0 / math.sqrt(self.n_channels)
assert n_qx_stride == 1 or n_qx_stride % 2 == 0
assert n_kv_stride == 1 or n_kv_stride % 2 == 0
self.n_qx_stride = n_qx_stride
self.n_kv_stride = n_kv_stride
kernel_size = self.n_qx_stride + 1 if self.n_qx_stride > 1 else 3
stride, padding = self.n_kv_stride, kernel_size // 2
self.query_conv = MaskedConv1D(self.n_embd, self.n_embd,
kernel_size, stride=stride, padding=padding, groups=self.n_embd,
bias=False)
self.query_norm = LayerNorm(self.n_embd)
kernel_size = self.n_kv_stride + 1 if self.n_kv_stride > 1 else 3
stride, padding = self.n_kv_stride, kernel_size // 2
self.key_conv = MaskedConv1D(self.n_embd, self.n_embd, kernel_size,
stride=stride, padding=padding, groups=self.n_embd, bias=False)
self.key_norm = LayerNorm(self.n_embd)
self.value_conv = MaskedConv1D(self.n_embd, self.n_embd,
kernel_size, stride=stride, padding=padding, groups=self.n_embd,
bias=False)
self.value_norm = LayerNorm(self.n_embd)
self.key = nn.Conv1d(self.n_embd, self.n_embd, 1)
self.query = nn.Conv1d(self.n_embd, self.n_embd, 1)
self.value = nn.Conv1d(self.n_embd, self.n_embd, 1)
self.attn_drop = nn.Dropout(attn_pdrop)
self.proj_drop = nn.Dropout(proj_pdrop)
self.proj = nn.Conv1d(self.n_embd, self.n_embd, 1)
def forward(self, input_0, input_1):
primals_2 = self.query_conv.conv.weight
primals_4 = self.query_norm.weight
primals_5 = self.query_norm.bias
primals_6 = self.key_conv.conv.weight
primals_7 = self.key_norm.weight
primals_8 = self.key_norm.bias
primals_9 = self.value_conv.conv.weight
primals_10 = self.value_norm.weight
primals_11 = self.value_norm.bias
primals_12 = self.key.weight
primals_13 = self.key.bias
primals_14 = self.query.weight
primals_15 = self.query.bias
primals_16 = self.value.weight
primals_17 = self.value.bias
primals_18 = self.proj.weight
primals_19 = self.proj.bias
primals_1 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19])
return output[0], output[1]
|
yjh0410/actionformer_release
|
MaskedMHCA
| false
| 16,801
|
[
"MIT"
] | 61
|
7a97422111d3e29c8d2e14088c850c6975855ea7
|
https://github.com/yjh0410/actionformer_release/tree/7a97422111d3e29c8d2e14088c850c6975855ea7
|
Attention
|
import torch
import torch.utils.data
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils import weight_norm
class FCNet(nn.Module):
def __init__(self, in_size, out_size, activate=None, drop=0.0):
super(FCNet, self).__init__()
self.lin = weight_norm(nn.Linear(in_size, out_size), dim=None)
self.drop_value = drop
self.drop = nn.Dropout(drop)
self.activate = activate.lower() if activate is not None else None
if activate == 'relu':
self.ac_fn = nn.ReLU()
elif activate == 'sigmoid':
self.ac_fn = nn.Sigmoid()
elif activate == 'tanh':
self.ac_fn = nn.Tanh()
def forward(self, x):
if self.drop_value > 0:
x = self.drop(x)
x = self.lin(x)
if self.activate is not None:
x = self.ac_fn(x)
return x
class Attention(nn.Module):
def __init__(self, v_features, q_features, mid_features, glimpses, drop=0.0
):
super(Attention, self).__init__()
self.lin_v = FCNet(v_features, mid_features, activate='relu')
self.lin_q = FCNet(q_features, mid_features, activate='relu')
self.lin = FCNet(mid_features, glimpses, drop=drop)
def forward(self, v, q):
"""
v = batch, num_obj, dim
q = batch, dim
"""
v = self.lin_v(v)
q = self.lin_q(q)
batch, num_obj, _ = v.shape
_, q_dim = q.shape
q = q.unsqueeze(1).expand(batch, num_obj, q_dim)
x = v * q
x = self.lin(x)
x = F.softmax(x, dim=1)
return x
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'v_features': 4, 'q_features': 4, 'mid_features': 4,
'glimpses': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.utils.data
import torch
import torch.nn as nn
from torch.nn.utils import weight_norm
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_div_mul_norm_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp6 = tl.load(in_ptr1 + 0)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.sum(tmp2, 1)[:, None]
tmp5 = libdevice.sqrt(tmp4)
tmp8 = tmp7 / tmp5
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp5, None)
tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp9, None)
@triton.jit
def triton_poi_fused_mul_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp1, tmp3)
tmp5 = tmp2 * tmp4
tl.store(out_ptr0 + x3, tmp5, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (), ())
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (), ())
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (), ())
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_div_mul_norm_0[grid(1)](buf1, primals_2, primals_1,
buf2, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_4, (16,
4), (4, 1), 0), reinterpret_tensor(buf2, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_3
buf4 = empty_strided_cuda((), (), torch.float32)
buf5 = buf4
del buf4
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_per_fused_div_mul_norm_0[grid(1)](buf5, primals_6, primals_5,
buf6, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, primals_8, reinterpret_tensor(buf6,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf7)
del primals_7
buf8 = empty_strided_cuda((), (), torch.float32)
buf9 = buf8
del buf8
buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_per_fused_div_mul_norm_0[grid(1)](buf9, primals_10,
primals_9, buf10, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_mul_relu_1[grid(64)](buf3, buf7, buf11, 64, XBLOCK
=64, num_warps=1, num_stages=1)
buf12 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_11, reinterpret_tensor(buf11, (16, 4),
(4, 1), 0), reinterpret_tensor(buf10, (4, 4), (1, 4), 0), alpha
=1, beta=1, out=buf12)
del primals_11
buf13 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(64)](buf12, buf13, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf14 = reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0)
del buf12
triton_poi_fused__softmax_3[grid(64)](buf13, buf14, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf13
return (buf14, buf2, buf6, buf10, primals_1, primals_2, primals_5,
primals_6, primals_8, primals_9, primals_10, buf1,
reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), buf3, buf5, buf7,
buf9, reinterpret_tensor(buf11, (16, 4), (4, 1), 0), buf14, buf10)
class FCNet(nn.Module):
def __init__(self, in_size, out_size, activate=None, drop=0.0):
super(FCNet, self).__init__()
self.lin = weight_norm(nn.Linear(in_size, out_size), dim=None)
self.drop_value = drop
self.drop = nn.Dropout(drop)
self.activate = activate.lower() if activate is not None else None
if activate == 'relu':
self.ac_fn = nn.ReLU()
elif activate == 'sigmoid':
self.ac_fn = nn.Sigmoid()
elif activate == 'tanh':
self.ac_fn = nn.Tanh()
def forward(self, x):
if self.drop_value > 0:
x = self.drop(x)
x = self.lin(x)
if self.activate is not None:
x = self.ac_fn(x)
return x
class AttentionNew(nn.Module):
def __init__(self, v_features, q_features, mid_features, glimpses, drop=0.0
):
super(AttentionNew, self).__init__()
self.lin_v = FCNet(v_features, mid_features, activate='relu')
self.lin_q = FCNet(q_features, mid_features, activate='relu')
self.lin = FCNet(mid_features, glimpses, drop=drop)
def forward(self, input_0, input_1):
primals_3 = self.lin_v.lin.bias
primals_1 = self.lin_v.lin.weight_g
primals_2 = self.lin_v.lin.weight_v
primals_7 = self.lin_q.lin.bias
primals_5 = self.lin_q.lin.weight_g
primals_6 = self.lin_q.lin.weight_v
primals_11 = self.lin.lin.bias
primals_9 = self.lin.lin.weight_g
primals_8 = self.lin.lin.weight_v
primals_4 = input_0
primals_10 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
zaynmi/semantic-equivalent-da-for-vqa
|
Attention
| false
| 16,802
|
[
"MIT"
] | 298
|
f121fb3e8fee8af5f1935a7526f19e0d884bd95b
|
https://github.com/zaynmi/semantic-equivalent-da-for-vqa/tree/f121fb3e8fee8af5f1935a7526f19e0d884bd95b
|
EltwiseProdScoring
|
import torch
import torch.nn as nn
class EltwiseProdScoring(nn.Module):
"""
Linearly mapping h and v to the same dimension, and do a elementwise
multiplication and a linear scoring
"""
def __init__(self, h_dim, a_dim, dot_dim=256):
"""Initialize layer."""
super(EltwiseProdScoring, self).__init__()
self.linear_in_h = nn.Linear(h_dim, dot_dim, bias=True)
self.linear_in_a = nn.Linear(a_dim, dot_dim, bias=True)
self.linear_out = nn.Linear(dot_dim, 1, bias=True)
def forward(self, h, all_u_t, mask=None):
"""Propagate h through the network.
h: batch x h_dim
all_u_t: batch x a_num x a_dim
"""
target = self.linear_in_h(h).unsqueeze(1)
context = self.linear_in_a(all_u_t)
eltprod = torch.mul(target, context)
logits = self.linear_out(eltprod).squeeze(2)
return logits
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'h_dim': 4, 'a_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 4096
x2 = xindex // 16384
x3 = xindex % 16384
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4096 * x2), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + x3, None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x4, tmp2, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (256, 4), (4, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (1, 256), (256, 1))
assert_size_stride(primals_8, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 256), (1, 4),
0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 256), (1, 4),
0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4, 256), (16384, 4096, 1024,
256, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(65536)](buf0, buf1, buf2, 65536, XBLOCK
=256, num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((256, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_8, reinterpret_tensor(buf2, (256, 256),
(256, 1), 0), reinterpret_tensor(primals_7, (256, 1), (1, 256),
0), alpha=1, beta=1, out=buf4)
del primals_8
return reinterpret_tensor(buf4, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0
), buf1, reinterpret_tensor(buf2, (256, 256), (256, 1), 0), primals_7
class EltwiseProdScoringNew(nn.Module):
"""
Linearly mapping h and v to the same dimension, and do a elementwise
multiplication and a linear scoring
"""
def __init__(self, h_dim, a_dim, dot_dim=256):
"""Initialize layer."""
super(EltwiseProdScoringNew, self).__init__()
self.linear_in_h = nn.Linear(h_dim, dot_dim, bias=True)
self.linear_in_a = nn.Linear(a_dim, dot_dim, bias=True)
self.linear_out = nn.Linear(dot_dim, 1, bias=True)
def forward(self, input_0, input_1):
primals_1 = self.linear_in_h.weight
primals_2 = self.linear_in_h.bias
primals_4 = self.linear_in_a.weight
primals_5 = self.linear_in_a.bias
primals_7 = self.linear_out.weight
primals_8 = self.linear_out.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
|
zhangybzbo/speaker_follower
|
EltwiseProdScoring
| false
| 16,803
|
[
"BSD-2-Clause",
"MIT"
] | 117
|
e4d109ee26b2f57066adc9720443abf842ee9a9d
|
https://github.com/zhangybzbo/speaker_follower/tree/e4d109ee26b2f57066adc9720443abf842ee9a9d
|
RelativePositionalEmbedding
|
import torch
import torch.nn as nn
class RelativePositionalEmbedding(nn.Module):
def __init__(self, n_model, max_len=1024):
super().__init__()
self.embed = nn.Embedding(max_len, n_model)
self.reset_parameters()
@torch.no_grad()
def reset_parameters(self):
w = self.embed.weight
max_len, n_model = w.shape
pos = torch.cat((w.new_tensor(range(-max_len // 2, 0)), w.
new_tensor(range(max_len // 2))))
w = pos.unsqueeze(-1) / 10000 ** (w.new_tensor(range(n_model)) // 2 *
2 / n_model)
w[:, 0::2], w[:, 1::2] = w[:, 0::2].sin(), w[:, 1::2].cos()
self.embed.weight.copy_(w)
def forward(self, x):
pos = x.new_tensor(range(x.shape[1])).long()
offset = sum(divmod(self.embed.weight.shape[0], 2))
return self.embed(pos - pos.unsqueeze(-1) + offset)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_model': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy_add_lift_fresh_sub_0(out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 2, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 0.0
tmp6 = 1.0
tmp7 = tl.where(tmp4, tmp5, tmp6)
tmp8 = tl.full([1], 3, tl.int64)
tmp9 = tmp0 < tmp8
tmp10 = 2.0
tmp11 = 3.0
tmp12 = tl.where(tmp9, tmp10, tmp11)
tmp13 = tl.where(tmp2, tmp7, tmp12)
tmp14 = tmp13.to(tl.int32)
tmp15 = x1
tmp16 = tmp15 < tmp1
tmp17 = tmp15 < tmp3
tmp18 = tl.where(tmp17, tmp5, tmp6)
tmp19 = tmp15 < tmp8
tmp20 = tl.where(tmp19, tmp10, tmp11)
tmp21 = tl.where(tmp16, tmp18, tmp20)
tmp22 = tmp21.to(tl.int32)
tmp23 = tmp14 - tmp22
tmp24 = tl.full([1], 512, tl.int64)
tmp25 = tmp23 + tmp24
tl.store(out_ptr0 + x2, tmp25, xmask)
@triton.jit
def triton_poi_fused_embedding_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x2 = xindex // 16
x0 = xindex % 4
x4 = xindex
tmp0 = x1
tmp1 = tl.full([1], 2, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 0.0
tmp6 = 1.0
tmp7 = tl.where(tmp4, tmp5, tmp6)
tmp8 = tl.full([1], 3, tl.int64)
tmp9 = tmp0 < tmp8
tmp10 = 2.0
tmp11 = 3.0
tmp12 = tl.where(tmp9, tmp10, tmp11)
tmp13 = tl.where(tmp2, tmp7, tmp12)
tmp14 = tmp13.to(tl.int32)
tmp15 = x2
tmp16 = tmp15 < tmp1
tmp17 = tmp15 < tmp3
tmp18 = tl.where(tmp17, tmp5, tmp6)
tmp19 = tmp15 < tmp8
tmp20 = tl.where(tmp19, tmp10, tmp11)
tmp21 = tl.where(tmp16, tmp18, tmp20)
tmp22 = tmp21.to(tl.int32)
tmp23 = tmp14 - tmp22
tmp24 = tl.full([1], 512, tl.int64)
tmp25 = tmp23 + tmp24
tmp26 = tl.load(in_ptr0 + (x0 + 4 * tmp25), xmask)
tl.store(out_ptr0 + x4, tmp26, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1024, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
get_raw_stream(0)
triton_poi_fused__to_copy_add_lift_fresh_sub_0[grid(16)](buf0, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_embedding_1[grid(64)](primals_2, buf1, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del primals_2
return buf1, buf0
class RelativePositionalEmbeddingNew(nn.Module):
def __init__(self, n_model, max_len=1024):
super().__init__()
self.embed = nn.Embedding(max_len, n_model)
self.reset_parameters()
@torch.no_grad()
def reset_parameters(self):
w = self.embed.weight
max_len, n_model = w.shape
pos = torch.cat((w.new_tensor(range(-max_len // 2, 0)), w.
new_tensor(range(max_len // 2))))
w = pos.unsqueeze(-1) / 10000 ** (w.new_tensor(range(n_model)) // 2 *
2 / n_model)
w[:, 0::2], w[:, 1::2] = w[:, 0::2].sin(), w[:, 1::2].cos()
self.embed.weight.copy_(w)
def forward(self, input_0):
primals_2 = self.embed.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
yzhangcs/parser
|
RelativePositionalEmbedding
| false
| 16,804
|
[
"MIT"
] | 439
|
3abebde1c9fe0bf2e99adce845aaf2a04b194f8a
|
https://github.com/yzhangcs/parser/tree/3abebde1c9fe0bf2e99adce845aaf2a04b194f8a
|
Classifier
|
import torch
import torch.utils.data
import torch
import torch.nn as nn
from torch.nn.utils import weight_norm
class FCNet(nn.Module):
def __init__(self, in_size, out_size, activate=None, drop=0.0):
super(FCNet, self).__init__()
self.lin = weight_norm(nn.Linear(in_size, out_size), dim=None)
self.drop_value = drop
self.drop = nn.Dropout(drop)
self.activate = activate.lower() if activate is not None else None
if activate == 'relu':
self.ac_fn = nn.ReLU()
elif activate == 'sigmoid':
self.ac_fn = nn.Sigmoid()
elif activate == 'tanh':
self.ac_fn = nn.Tanh()
def forward(self, x):
if self.drop_value > 0:
x = self.drop(x)
x = self.lin(x)
if self.activate is not None:
x = self.ac_fn(x)
return x
class Classifier(nn.Module):
def __init__(self, in_features, mid_features, out_features, drop=0.0):
super(Classifier, self).__init__()
self.lin11 = FCNet(in_features[0], mid_features, activate='relu')
self.lin12 = FCNet(in_features[1], mid_features, activate='relu')
self.lin2 = FCNet(mid_features, mid_features, activate='relu')
self.lin3 = FCNet(mid_features, out_features, drop=drop)
def forward(self, v, q):
x = self.lin11(v) * self.lin12(q)
x = self.lin2(x)
x = self.lin3(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': [4, 4], 'mid_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.utils.data
import torch
import torch.nn as nn
from torch.nn.utils import weight_norm
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_div_mul_norm_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp6 = tl.load(in_ptr1 + 0)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.sum(tmp2, 1)[:, None]
tmp5 = libdevice.sqrt(tmp4)
tmp8 = tmp7 / tmp5
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp5, None)
tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp9, None)
@triton.jit
def triton_poi_fused_mul_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp3 = tl.load(in_ptr1 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp1, tmp3)
tmp5 = tmp2 * tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14) = args
args.clear()
assert_size_stride(primals_1, (), ())
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (), ())
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_9, (), ())
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (), ())
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_div_mul_norm_0[grid(1)](buf1, primals_2, primals_1,
buf2, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_4, (64,
4), (4, 1), 0), reinterpret_tensor(buf2, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_3
buf4 = empty_strided_cuda((), (), torch.float32)
buf5 = buf4
del buf4
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_per_fused_div_mul_norm_0[grid(1)](buf5, primals_6, primals_5,
buf6, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(primals_8, (64,
4), (4, 1), 0), reinterpret_tensor(buf6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf7)
del primals_7
buf8 = empty_strided_cuda((), (), torch.float32)
buf9 = buf8
del buf8
buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_per_fused_div_mul_norm_0[grid(1)](buf9, primals_10,
primals_9, buf10, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_relu_1[grid(256)](buf3, buf7, buf11, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf12 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf11, (64, 4), (4, 1), 0),
reinterpret_tensor(buf10, (4, 4), (1, 4), 0), out=buf12)
buf13 = empty_strided_cuda((), (), torch.float32)
buf14 = buf13
del buf13
buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_per_fused_div_mul_norm_0[grid(1)](buf14, primals_13,
primals_12, buf15, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
buf16 = reinterpret_tensor(buf12, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf12
buf18 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(256)](buf16,
primals_11, buf18, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
buf17 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_14, reinterpret_tensor(buf16, (64, 4),
(4, 1), 0), reinterpret_tensor(buf15, (4, 4), (1, 4), 0), alpha
=1, beta=1, out=buf17)
del primals_14
return (reinterpret_tensor(buf17, (4, 4, 4, 4), (64, 16, 4, 1), 0),
buf2, buf6, buf10, buf15, primals_1, primals_2, primals_5,
primals_6, primals_9, primals_10, primals_12, primals_13, buf1,
reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), buf3, buf5,
reinterpret_tensor(primals_8, (64, 4), (4, 1), 0), buf7, buf9,
reinterpret_tensor(buf11, (64, 4), (4, 1), 0), buf14,
reinterpret_tensor(buf16, (64, 4), (4, 1), 0), buf15, buf18, buf10)
class FCNet(nn.Module):
def __init__(self, in_size, out_size, activate=None, drop=0.0):
super(FCNet, self).__init__()
self.lin = weight_norm(nn.Linear(in_size, out_size), dim=None)
self.drop_value = drop
self.drop = nn.Dropout(drop)
self.activate = activate.lower() if activate is not None else None
if activate == 'relu':
self.ac_fn = nn.ReLU()
elif activate == 'sigmoid':
self.ac_fn = nn.Sigmoid()
elif activate == 'tanh':
self.ac_fn = nn.Tanh()
def forward(self, x):
if self.drop_value > 0:
x = self.drop(x)
x = self.lin(x)
if self.activate is not None:
x = self.ac_fn(x)
return x
class ClassifierNew(nn.Module):
def __init__(self, in_features, mid_features, out_features, drop=0.0):
super(ClassifierNew, self).__init__()
self.lin11 = FCNet(in_features[0], mid_features, activate='relu')
self.lin12 = FCNet(in_features[1], mid_features, activate='relu')
self.lin2 = FCNet(mid_features, mid_features, activate='relu')
self.lin3 = FCNet(mid_features, out_features, drop=drop)
def forward(self, input_0, input_1):
primals_3 = self.lin11.lin.bias
primals_1 = self.lin11.lin.weight_g
primals_2 = self.lin11.lin.weight_v
primals_7 = self.lin12.lin.bias
primals_5 = self.lin12.lin.weight_g
primals_6 = self.lin12.lin.weight_v
primals_11 = self.lin2.lin.bias
primals_9 = self.lin2.lin.weight_g
primals_10 = self.lin2.lin.weight_v
primals_14 = self.lin3.lin.bias
primals_12 = self.lin3.lin.weight_g
primals_13 = self.lin3.lin.weight_v
primals_4 = input_0
primals_8 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14])
return output[0]
|
zaynmi/semantic-equivalent-da-for-vqa
|
Classifier
| false
| 16,805
|
[
"MIT"
] | 298
|
f121fb3e8fee8af5f1935a7526f19e0d884bd95b
|
https://github.com/zaynmi/semantic-equivalent-da-for-vqa/tree/f121fb3e8fee8af5f1935a7526f19e0d884bd95b
|
GumbelSigmoid
|
import torch
import torch.nn as nn
class GumbelSigmoid(nn.Module):
def __init__(self, max_T, decay_alpha):
super(GumbelSigmoid, self).__init__()
self.max_T = max_T
self.decay_alpha = decay_alpha
self.softmax = nn.Softmax(dim=1)
self.p_value = 1e-08
self.register_buffer('cur_T', torch.tensor(max_T))
def forward(self, x):
if self.training:
_cur_T = self.cur_T
else:
_cur_T = 0.03
r = 1 - x
x = (x + self.p_value).log()
r = (r + self.p_value).log()
x_N = torch.rand_like(x)
r_N = torch.rand_like(r)
x_N = -1 * (x_N + self.p_value).log()
r_N = -1 * (r_N + self.p_value).log()
x_N = -1 * (x_N + self.p_value).log()
r_N = -1 * (r_N + self.p_value).log()
x = x + x_N
x = x / (_cur_T + self.p_value)
r = r + r_N
r = r / (_cur_T + self.p_value)
x = torch.cat((x, r), dim=1)
x = self.softmax(x)
x = x[:, [0], :, :]
if self.training:
self.cur_T = self.cur_T * self.decay_alpha
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'max_T': 4, 'decay_alpha': 4}]
|
import torch
from torch import device
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 8
x0 = xindex % 16
x2 = xindex // 128
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = 1e-08
tmp7 = tmp5 + tmp6
tmp8 = tl_math.log(tmp7)
tmp9 = tl.load(in_ptr1 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp10 = tmp9 + tmp6
tmp11 = tl_math.log(tmp10)
tmp12 = -1.0
tmp13 = tmp11 * tmp12
tmp14 = tmp13 + tmp6
tmp15 = tl_math.log(tmp14)
tmp16 = tmp15 * tmp12
tmp17 = tmp8 + tmp16
tmp18 = 33.333322222225924
tmp19 = tmp17 * tmp18
tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype)
tmp21 = tl.where(tmp4, tmp19, tmp20)
tmp22 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp25 = tl.load(in_ptr0 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp22 &
xmask, other=0.0)
tmp26 = 1.0
tmp27 = tmp26 - tmp25
tmp28 = tmp27 + tmp6
tmp29 = tl_math.log(tmp28)
tmp30 = tl.load(in_ptr2 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp22 &
xmask, other=0.0)
tmp31 = tmp30 + tmp6
tmp32 = tl_math.log(tmp31)
tmp33 = tmp32 * tmp12
tmp34 = tmp33 + tmp6
tmp35 = tl_math.log(tmp34)
tmp36 = tmp35 * tmp12
tmp37 = tmp29 + tmp36
tmp38 = tmp37 * tmp18
tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype)
tmp40 = tl.where(tmp22, tmp38, tmp39)
tmp41 = tl.where(tmp4, tmp21, tmp40)
tl.store(out_ptr0 + x3, tmp41, xmask)
@triton.jit
def triton_per_fused__softmax_index_1(in_out_ptr0, in_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 16
x1 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 128 * x1), xmask, other=0.0)
tmp11 = tl.load(in_ptr0 + (x0 + 128 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp12 = tmp11 - tmp4
tmp13 = tl_math.exp(tmp12)
tmp14 = tmp13 / tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + x3, tmp14, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = torch.ops.aten.rand.default([4, 4, 4, 4], dtype=torch.
float32, device=device(type='cuda', index=0), pin_memory=False)
buf1 = buf0
del buf0
buf2 = torch.ops.aten.rand.default([4, 4, 4, 4], dtype=torch.
float32, device=device(type='cuda', index=0), pin_memory=False)
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](arg0_1, buf1, buf3, buf4, 512,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del buf1
del buf3
buf6 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf7 = reinterpret_tensor(buf6, (4, 1, 4, 4), (16, 16, 4, 1), 0)
del buf6
triton_per_fused__softmax_index_1[grid(64)](buf7, buf4, 64, 8,
XBLOCK=1, num_warps=2, num_stages=1)
del buf4
return buf7,
class GumbelSigmoidNew(nn.Module):
def __init__(self, max_T, decay_alpha):
super(GumbelSigmoidNew, self).__init__()
self.max_T = max_T
self.decay_alpha = decay_alpha
self.softmax = nn.Softmax(dim=1)
self.p_value = 1e-08
self.register_buffer('cur_T', torch.tensor(max_T))
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
zdaxie/SpatiallyAdaptiveInference-Detection
|
GumbelSigmoid
| false
| 16,806
|
[
"Apache-2.0"
] | 55
|
323801deac6f0641d00ecb23f6885df8483cc447
|
https://github.com/zdaxie/SpatiallyAdaptiveInference-Detection/tree/323801deac6f0641d00ecb23f6885df8483cc447
|
AdaptiveInstanceNorm
|
import torch
import torch.utils.data
import torch
import torch.nn as nn
import torch.sparse
class AdaptiveInstanceNorm(nn.Module):
def __init__(self, in_channel, style_dim):
super().__init__()
self.norm = nn.InstanceNorm2d(in_channel)
self.linear = nn.Linear(style_dim, in_channel * 2)
self.linear.weight.data.normal_()
self.linear.bias.data.zero_()
self.linear.bias.data[:in_channel] = 1
self.linear.bias.data[in_channel:] = 0
def forward(self, input, style):
style = self.linear(style).unsqueeze(2).unsqueeze(3)
gamma, beta = style.chunk(2, 1)
out = self.norm(input)
out = gamma * out + beta
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_channel': 4, 'style_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.utils.data
import torch
import torch.nn as nn
import torch.sparse
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_mul_0(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
x3 = xindex // 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp22 = tl.load(in_ptr1 + (x2 + 8 * x3), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr1 + (4 + x2 + 8 * x3), xmask, eviction_policy=
'evict_last')
tmp29 = tl.load(in_ptr2 + (4 + x2), xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 16.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tmp24 = tmp22 + tmp23
tmp25 = tmp0 - tmp10
tmp26 = tmp25 * tmp21
tmp27 = tmp24 * tmp26
tmp30 = tmp28 + tmp29
tmp31 = tmp27 + tmp30
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp21, xmask)
tl.store(out_ptr1 + (r1 + 16 * x0), tmp31, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (8, 4), (4, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 8),
(1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf2 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf4 = reinterpret_tensor(buf2, (1, 16, 1, 1), (16, 1, 1, 1), 0)
del buf2
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_add_mul_0[grid(16)](buf4,
primals_4, buf0, primals_2, buf1, buf5, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del buf0
del primals_2
return buf5, primals_3, primals_4, buf1, buf4
class AdaptiveInstanceNormNew(nn.Module):
def __init__(self, in_channel, style_dim):
super().__init__()
self.norm = nn.InstanceNorm2d(in_channel)
self.linear = nn.Linear(style_dim, in_channel * 2)
self.linear.weight.data.normal_()
self.linear.bias.data.zero_()
self.linear.bias.data[:in_channel] = 1
self.linear.bias.data[in_channel:] = 0
def forward(self, input_0, input_1):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_4 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
zhengqili/Crowdsampling-the-Plenoptic-Function
|
AdaptiveInstanceNorm
| false
| 16,807
|
[
"MIT"
] | 70
|
3164e9f9574d597690f83dfdfb34cc470d2dcb88
|
https://github.com/zhengqili/Crowdsampling-the-Plenoptic-Function/tree/3164e9f9574d597690f83dfdfb34cc470d2dcb88
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.