entry_point
stringlengths 1
65
| original_triton_python_code
stringlengths 208
619k
| optimised_triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
listlengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
|
|---|---|---|---|---|---|---|---|---|---|---|
Res
|
import torch
from torch import nn
import torch.distributions
class Res(nn.Module):
def __init__(self, H):
super().__init__()
self.u1 = nn.Linear(H, H)
self.u2 = nn.Linear(H, H)
self.v1 = nn.Linear(H, H)
self.v2 = nn.Linear(H, H)
self.w = nn.Linear(H, H)
def forward(self, y):
y = self.w(y)
y = y + torch.relu(self.v1(torch.relu(self.u1(y))))
return y + torch.relu(self.v2(torch.relu(self.u2(y))))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'H': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
import torch.distributions
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_1(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = tmp0 + tmp5
tmp7 = 0.0
tmp8 = tmp5 <= tmp7
tl.store(out_ptr0 + x2, tmp6, xmask)
tl.store(out_ptr1 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_4, (4, 4), (1, 4
), 0), out=buf1)
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf2,
primals_5, buf12, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_add_relu_threshold_backward_1[grid(256)](buf0,
buf3, primals_7, buf4, buf11, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_7
buf5 = buf3
del buf3
extern_kernels.mm(reinterpret_tensor(buf4, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf5)
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf6,
primals_9, buf10, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_9
buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf6, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_add_relu_threshold_backward_1[grid(256)](buf4,
buf7, primals_11, buf8, buf9, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del buf7
del primals_11
return buf8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf2, (64, 4), (4, 1), 0
), reinterpret_tensor(buf4, (64, 4), (4, 1), 0), reinterpret_tensor(
buf6, (64, 4), (4, 1), 0
), buf9, primals_10, buf10, primals_8, buf11, primals_6, buf12, primals_4
class ResNew(nn.Module):
def __init__(self, H):
super().__init__()
self.u1 = nn.Linear(H, H)
self.u2 = nn.Linear(H, H)
self.v1 = nn.Linear(H, H)
self.v2 = nn.Linear(H, H)
self.w = nn.Linear(H, H)
def forward(self, input_0):
primals_1 = self.u1.weight
primals_2 = self.u1.bias
primals_4 = self.u2.weight
primals_5 = self.u2.bias
primals_6 = self.v1.weight
primals_7 = self.v1.bias
primals_8 = self.v2.weight
primals_9 = self.v2.bias
primals_10 = self.w.weight
primals_11 = self.w.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
w-cheng/pytorch-struct
|
Res
| false
| 13,071
|
[
"MIT"
] | 0
|
e51fecc1473925e4c44de135c4a3240fcb20fa40
|
https://github.com/w-cheng/pytorch-struct/tree/e51fecc1473925e4c44de135c4a3240fcb20fa40
|
DAInsHead
|
import torch
import torch.utils.data
from torchvision.transforms import functional as F
from torch import nn
import torch.nn.functional as F
class DAInsHead(nn.Module):
"""
Adds a simple Instance-level Domain Classifier head
"""
def __init__(self, in_channels):
"""
Arguments:
in_channels (int): number of channels of the input feature
"""
super(DAInsHead, self).__init__()
self.fc1_da = nn.Linear(in_channels, 1024)
self.fc2_da = nn.Linear(1024, 1024)
self.fc3_da = nn.Linear(1024, 1)
for l in [self.fc1_da, self.fc2_da]:
nn.init.normal_(l.weight, std=0.01)
nn.init.constant_(l.bias, 0)
nn.init.normal_(self.fc3_da.weight, std=0.05)
nn.init.constant_(self.fc3_da.bias, 0)
def forward(self, x):
x = F.relu(self.fc1_da(x))
x = F.dropout(x, p=0.5, training=self.training)
x = F.relu(self.fc2_da(x))
x = F.dropout(x, p=0.5, training=self.training)
x = self.fc3_da(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1024
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (1024, 4), (4, 1))
assert_size_stride(primals_2, (1024,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1024, 1024), (1024, 1))
assert_size_stride(primals_5, (1024,), (1,))
assert_size_stride(primals_6, (1, 1024), (1024, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 1024), (1024, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 1024), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 1024), (16384, 4096, 1024,
1), 0)
del buf0
buf7 = empty_strided_cuda((4, 4, 4, 1024), (16384, 4096, 1024, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(65536)](buf1,
primals_2, buf7, 65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 1024), (1024, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 1024), (1024, 1), 0
), reinterpret_tensor(primals_4, (1024, 1024), (1, 1024), 0),
out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 1024), (16384, 4096, 1024,
1), 0)
del buf2
buf6 = empty_strided_cuda((4, 4, 4, 1024), (16384, 4096, 1024, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(65536)](buf3,
primals_5, buf6, 65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 1024),
(1024, 1), 0), reinterpret_tensor(primals_6, (1024, 1), (1,
1024), 0), alpha=1, beta=1, out=buf5)
del primals_7
return reinterpret_tensor(buf5, (4, 4, 4, 1), (16, 4, 1, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 1024), (1024, 1), 0
), reinterpret_tensor(buf3, (64, 1024), (1024, 1), 0
), primals_6, buf6, primals_4, buf7
class DAInsHeadNew(nn.Module):
"""
Adds a simple Instance-level Domain Classifier head
"""
def __init__(self, in_channels):
"""
Arguments:
in_channels (int): number of channels of the input feature
"""
super(DAInsHeadNew, self).__init__()
self.fc1_da = nn.Linear(in_channels, 1024)
self.fc2_da = nn.Linear(1024, 1024)
self.fc3_da = nn.Linear(1024, 1)
for l in [self.fc1_da, self.fc2_da]:
nn.init.normal_(l.weight, std=0.01)
nn.init.constant_(l.bias, 0)
nn.init.normal_(self.fc3_da.weight, std=0.05)
nn.init.constant_(self.fc3_da.bias, 0)
def forward(self, input_0):
primals_1 = self.fc1_da.weight
primals_2 = self.fc1_da.bias
primals_4 = self.fc2_da.weight
primals_5 = self.fc2_da.bias
primals_6 = self.fc3_da.weight
primals_7 = self.fc3_da.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
thesuperorange/Domain-Adaptive-Faster-RCNN-PyTorch
|
DAInsHead
| false
| 13,072
|
[
"MIT"
] | 0
|
bcde744a486b25ec1d6e4b023da3ce0c8e5d72a7
|
https://github.com/thesuperorange/Domain-Adaptive-Faster-RCNN-PyTorch/tree/bcde744a486b25ec1d6e4b023da3ce0c8e5d72a7
|
Net
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout2d(0.25)
self.dropout2 = nn.Dropout2d(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = x.view((-1, 1, 28, 28))
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
def get_inputs():
return [torch.rand([4, 1, 28, 28])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 86528
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 676 % 32
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 576 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_2(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 12
x1 = xindex // 12
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 48 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 48 * x1), None, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (24 + 2 * x0 + 48 * x1), None, eviction_policy
='evict_last')
tmp12 = tl.load(in_ptr0 + (25 + 2 * x0 + 48 * x1), None,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x2, tmp15, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_per_fused__log_softmax_4(in_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 1, 28, 28), (784, 784, 28, 1))
assert_size_stride(primals_2, (32, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_3, (32,), (1,))
assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (128, 9216), (9216, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (10, 128), (128, 1))
assert_size_stride(primals_9, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 26, 26), (21632, 676, 26, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(86528)](buf1, primals_3,
86528, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_3
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 24, 24), (36864, 576, 24, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(147456)](buf3, primals_5,
147456, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 64, 12, 12), (9216, 144, 12, 1),
torch.int8)
buf5 = empty_strided_cuda((4, 64, 12, 12), (9216, 144, 12, 1),
torch.float32)
triton_poi_fused_max_pool2d_with_indices_2[grid(36864)](buf3, buf4,
buf5, 36864, XBLOCK=256, num_warps=4, num_stages=1)
buf6 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf5, (4, 9216), (9216, 1), 0),
reinterpret_tensor(primals_6, (9216, 128), (1, 9216), 0), out=buf6)
buf7 = buf6
del buf6
triton_poi_fused_relu_3[grid(512)](buf7, primals_7, 512, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_7
buf8 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_9, buf7, reinterpret_tensor(primals_8,
(128, 10), (1, 128), 0), alpha=1, beta=1, out=buf8)
del primals_9
buf11 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
triton_per_fused__log_softmax_4[grid(4)](buf8, buf11, 4, 10, XBLOCK
=1, num_warps=2, num_stages=1)
del buf8
return (buf11, primals_2, primals_4, primals_1, buf1, buf3, buf4,
reinterpret_tensor(buf5, (4, 9216), (9216, 1), 0), buf7, buf11,
primals_8, primals_6)
class NetNew(nn.Module):
def __init__(self):
super(NetNew, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout2d(0.25)
self.dropout2 = nn.Dropout2d(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
v01dXYZ/petastorm
|
Net
| false
| 13,073
|
[
"Apache-2.0"
] | 0
|
d6f4e82eb2c3a6c2b4c16c060c7350331b60a51a
|
https://github.com/v01dXYZ/petastorm/tree/d6f4e82eb2c3a6c2b4c16c060c7350331b60a51a
|
GlobalAttention
|
import torch
import torch.nn as nn
import torch.cuda
def aeq(*args):
"""
Assert all arguments have the same value
"""
arguments = (arg for arg in args)
first = next(arguments)
assert all(arg == first for arg in arguments
), 'Not all arguments have the same value: ' + str(args)
def sequence_mask(lengths, max_len=None):
"""
Creates a boolean mask from sequence lengths.
"""
batch_size = lengths.numel()
max_len = max_len or lengths.max()
return torch.arange(0, max_len).type_as(lengths).repeat(batch_size, 1).lt(
lengths.unsqueeze(1))
class Bottle(nn.Module):
def forward(self, input):
if len(input.size()) <= 2:
return super(Bottle, self).forward(input)
size = input.size()[:2]
out = super(Bottle, self).forward(input.view(size[0] * size[1], -1))
return out.contiguous().view(size[0], size[1], -1)
class BottleLinear(Bottle, nn.Linear):
pass
class GlobalAttention(nn.Module):
"""
Global attention takes a matrix and a query vector. It
then computes a parameterized convex combination of the matrix
based on the input query.
Constructs a unit mapping a query `q` of size `dim`
and a source matrix `H` of size `n x dim`, to an output
of size `dim`.
.. mermaid::
graph BT
A[Query]
subgraph RNN
C[H 1]
D[H 2]
E[H N]
end
F[Attn]
G[Output]
A --> F
C --> F
D --> F
E --> F
C -.-> G
D -.-> G
E -.-> G
F --> G
All models compute the output as
:math:`c = \\sum_{j=1}^{SeqLength} a_j H_j` where
:math:`a_j` is the softmax of a score function.
Then then apply a projection layer to [q, c].
However they
differ on how they compute the attention score.
* Luong Attention (dot, general):
* dot: :math:`score(H_j,q) = H_j^T q`
* general: :math:`score(H_j, q) = H_j^T W_a q`
* Bahdanau Attention (mlp):
* :math:`score(H_j, q) = v_a^T tanh(W_a q + U_a h_j)`
Args:
dim (int): dimensionality of query and key
coverage (bool): use coverage term
attn_type (str): type of attention to use, options [dot,general,mlp]
"""
def __init__(self, dim, coverage=False, attn_type='dot'):
super(GlobalAttention, self).__init__()
self.dim = dim
self.attn_type = attn_type
assert self.attn_type in ['dot', 'general', 'mlp'
], 'Please select a valid attention type.'
if self.attn_type == 'general':
self.linear_in = nn.Linear(dim, dim, bias=False)
elif self.attn_type == 'mlp':
self.linear_context = BottleLinear(dim, dim, bias=False)
self.linear_query = nn.Linear(dim, dim, bias=True)
self.v = BottleLinear(dim, 1, bias=False)
out_bias = self.attn_type == 'mlp'
self.linear_out = nn.Linear(dim * 2, dim, bias=out_bias)
self.sm = nn.Softmax(dim=1)
self.tanh = nn.Tanh()
if coverage:
self.linear_cover = nn.Linear(1, dim, bias=False)
def score(self, h_t, h_s):
"""
Args:
h_t (`FloatTensor`): sequence of queries `[batch x tgt_len x dim]`
h_s (`FloatTensor`): sequence of sources `[batch x src_len x dim]`
Returns:
:obj:`FloatTensor`:
raw attention scores (unnormalized) for each src index
`[batch x tgt_len x src_len]`
"""
src_batch, src_len, src_dim = h_s.size()
tgt_batch, tgt_len, tgt_dim = h_t.size()
aeq(src_batch, tgt_batch)
aeq(src_dim, tgt_dim)
aeq(self.dim, src_dim)
if self.attn_type in ['general', 'dot']:
if self.attn_type == 'general':
h_t_ = h_t.view(tgt_batch * tgt_len, tgt_dim)
h_t_ = self.linear_in(h_t_)
h_t = h_t_.view(tgt_batch, tgt_len, tgt_dim)
h_s_ = h_s.transpose(1, 2)
return torch.bmm(h_t, h_s_)
else:
dim = self.dim
wq = self.linear_query(h_t.view(-1, dim))
wq = wq.view(tgt_batch, tgt_len, 1, dim)
wq = wq.expand(tgt_batch, tgt_len, src_len, dim)
uh = self.linear_context(h_s.contiguous().view(-1, dim))
uh = uh.view(src_batch, 1, src_len, dim)
uh = uh.expand(src_batch, tgt_len, src_len, dim)
wquh = self.tanh(wq + uh)
return self.v(wquh.view(-1, dim)).view(tgt_batch, tgt_len, src_len)
def forward(self, input, context, context_lengths=None, coverage=None):
"""
Args:
input (`FloatTensor`): query vectors `[batch x tgt_len x dim]`
context (`FloatTensor`): source vectors `[batch x src_len x dim]`
context_lengths (`LongTensor`): the source context lengths `[batch]`
coverage (`FloatTensor`): None (not supported yet)
Returns:
(`FloatTensor`, `FloatTensor`):
* Computed vector `[tgt_len x batch x dim]`
* Attention distribtutions for each query
`[tgt_len x batch x src_len]`
"""
if input.dim() == 2:
one_step = True
input = input.unsqueeze(1)
else:
one_step = False
batch, sourceL, dim = context.size()
batch_, targetL, dim_ = input.size()
aeq(batch, batch_)
aeq(dim, dim_)
aeq(self.dim, dim)
if coverage is not None:
batch_, sourceL_ = coverage.size()
aeq(batch, batch_)
aeq(sourceL, sourceL_)
if coverage is not None:
cover = coverage.view(-1).unsqueeze(1)
context += self.linear_cover(cover).view_as(context)
context = self.tanh(context)
align = self.score(input, context)
if context_lengths is not None:
mask = sequence_mask(context_lengths)
mask = mask.unsqueeze(1)
align.data.masked_fill_(~mask, -float('inf'))
align_vectors = self.sm(align.view(batch * targetL, sourceL))
align_vectors = align_vectors.view(batch, targetL, sourceL)
c = torch.bmm(align_vectors, context)
concat_c = torch.cat([c, input], 2).view(batch * targetL, dim * 2)
attn_h = self.linear_out(concat_c).view(batch, targetL, dim)
if self.attn_type in ['general', 'dot']:
attn_h = self.tanh(attn_h)
if one_step:
attn_h = attn_h.squeeze(1)
align_vectors = align_vectors.squeeze(1)
batch_, dim_ = attn_h.size()
aeq(batch, batch_)
aeq(dim, dim_)
batch_, sourceL_ = align_vectors.size()
aeq(batch, batch_)
aeq(sourceL, sourceL_)
else:
attn_h = attn_h.transpose(0, 1).contiguous()
align_vectors = align_vectors.transpose(0, 1).contiguous()
targetL_, batch_, dim_ = attn_h.size()
aeq(targetL, targetL_)
aeq(batch, batch_)
aeq(dim, dim_)
targetL_, batch_, sourceL_ = align_vectors.size()
aeq(targetL, targetL_)
aeq(batch, batch_)
aeq(sourceL, sourceL_)
return attn_h, align_vectors
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.cuda
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(out_ptr0 + x3, tmp1, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask)
tl.store(out_ptr0 + x3, tmp0, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(primals_1, reinterpret_tensor(primals_2, (4, 4,
4), (16, 1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf0, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf2 = reinterpret_tensor(buf0, (16, 4), (4, 1), 0)
del buf0
triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0)
del buf1
extern_kernels.bmm(reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1),
0), primals_2, out=buf3)
del primals_2
buf4 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
triton_poi_fused_cat_2[grid(128)](buf3, primals_1, buf4, 128,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf5 = reinterpret_tensor(buf3, (16, 4), (4, 1), 0)
del buf3
extern_kernels.mm(reinterpret_tensor(buf4, (16, 8), (8, 1), 0),
reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf5)
del primals_3
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_3[grid(64)](buf5, buf6, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_4[grid(64)](buf2, buf7, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf2
return buf6, buf7, reinterpret_tensor(buf4, (16, 8), (8, 1), 0), buf5
def aeq(*args):
"""
Assert all arguments have the same value
"""
arguments = (arg for arg in args)
first = next(arguments)
assert all(arg == first for arg in arguments
), 'Not all arguments have the same value: ' + str(args)
def sequence_mask(lengths, max_len=None):
"""
Creates a boolean mask from sequence lengths.
"""
batch_size = lengths.numel()
max_len = max_len or lengths.max()
return torch.arange(0, max_len).type_as(lengths).repeat(batch_size, 1).lt(
lengths.unsqueeze(1))
class Bottle(nn.Module):
def forward(self, input):
if len(input.size()) <= 2:
return super(Bottle, self).forward(input)
size = input.size()[:2]
out = super(Bottle, self).forward(input.view(size[0] * size[1], -1))
return out.contiguous().view(size[0], size[1], -1)
class BottleLinear(Bottle, nn.Linear):
pass
class GlobalAttentionNew(nn.Module):
"""
Global attention takes a matrix and a query vector. It
then computes a parameterized convex combination of the matrix
based on the input query.
Constructs a unit mapping a query `q` of size `dim`
and a source matrix `H` of size `n x dim`, to an output
of size `dim`.
.. mermaid::
graph BT
A[Query]
subgraph RNN
C[H 1]
D[H 2]
E[H N]
end
F[Attn]
G[Output]
A --> F
C --> F
D --> F
E --> F
C -.-> G
D -.-> G
E -.-> G
F --> G
All models compute the output as
:math:`c = \\sum_{j=1}^{SeqLength} a_j H_j` where
:math:`a_j` is the softmax of a score function.
Then then apply a projection layer to [q, c].
However they
differ on how they compute the attention score.
* Luong Attention (dot, general):
* dot: :math:`score(H_j,q) = H_j^T q`
* general: :math:`score(H_j, q) = H_j^T W_a q`
* Bahdanau Attention (mlp):
* :math:`score(H_j, q) = v_a^T tanh(W_a q + U_a h_j)`
Args:
dim (int): dimensionality of query and key
coverage (bool): use coverage term
attn_type (str): type of attention to use, options [dot,general,mlp]
"""
def __init__(self, dim, coverage=False, attn_type='dot'):
super(GlobalAttentionNew, self).__init__()
self.dim = dim
self.attn_type = attn_type
assert self.attn_type in ['dot', 'general', 'mlp'
], 'Please select a valid attention type.'
if self.attn_type == 'general':
self.linear_in = nn.Linear(dim, dim, bias=False)
elif self.attn_type == 'mlp':
self.linear_context = BottleLinear(dim, dim, bias=False)
self.linear_query = nn.Linear(dim, dim, bias=True)
self.v = BottleLinear(dim, 1, bias=False)
out_bias = self.attn_type == 'mlp'
self.linear_out = nn.Linear(dim * 2, dim, bias=out_bias)
self.sm = nn.Softmax(dim=1)
self.tanh = nn.Tanh()
if coverage:
self.linear_cover = nn.Linear(1, dim, bias=False)
def score(self, h_t, h_s):
"""
Args:
h_t (`FloatTensor`): sequence of queries `[batch x tgt_len x dim]`
h_s (`FloatTensor`): sequence of sources `[batch x src_len x dim]`
Returns:
:obj:`FloatTensor`:
raw attention scores (unnormalized) for each src index
`[batch x tgt_len x src_len]`
"""
src_batch, src_len, src_dim = h_s.size()
tgt_batch, tgt_len, tgt_dim = h_t.size()
aeq(src_batch, tgt_batch)
aeq(src_dim, tgt_dim)
aeq(self.dim, src_dim)
if self.attn_type in ['general', 'dot']:
if self.attn_type == 'general':
h_t_ = h_t.view(tgt_batch * tgt_len, tgt_dim)
h_t_ = self.linear_in(h_t_)
h_t = h_t_.view(tgt_batch, tgt_len, tgt_dim)
h_s_ = h_s.transpose(1, 2)
return torch.bmm(h_t, h_s_)
else:
dim = self.dim
wq = self.linear_query(h_t.view(-1, dim))
wq = wq.view(tgt_batch, tgt_len, 1, dim)
wq = wq.expand(tgt_batch, tgt_len, src_len, dim)
uh = self.linear_context(h_s.contiguous().view(-1, dim))
uh = uh.view(src_batch, 1, src_len, dim)
uh = uh.expand(src_batch, tgt_len, src_len, dim)
wquh = self.tanh(wq + uh)
return self.v(wquh.view(-1, dim)).view(tgt_batch, tgt_len, src_len)
def forward(self, input_0, input_1):
primals_3 = self.linear_out.weight
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0], output[1]
|
vvjn/MultimodalNMT
|
GlobalAttention
| false
| 13,074
|
[
"MIT"
] | 0
|
2d69588a5b640290602b4f6d7e4120ae9742c1c2
|
https://github.com/vvjn/MultimodalNMT/tree/2d69588a5b640290602b4f6d7e4120ae9742c1c2
|
BertAttention
|
from _paritybench_helpers import _mock_config
import math
import torch
from torch import nn
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
'The hidden size (%d) is not a multiple of the number of attention heads (%d)'
% (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.output_score = config.output_score
self.output_sum = config.output_sum
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1,
-2))
attention_scores = attention_scores / math.sqrt(self.
attention_head_size)
attention_scores = attention_scores + attention_mask
attention_probs = nn.Softmax(dim=-1)(attention_scores)
if self.output_score is True:
attention_probs_sum = attention_scores
else:
attention_probs_sum = attention_probs
if self.output_sum is True:
attention_probs_sum = attention_probs_sum.sum(dim=1)
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.
all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer, attention_probs_sum
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output, att_layer = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output, att_layer
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, num_attention_heads=
4, attention_probs_dropout_prob=0.5, output_score=4, output_sum=4,
hidden_dropout_prob=0.5)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_add_div_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp5 * tmp1
tmp8 = tmp6 + tmp7
tmp9 = triton_helpers.maximum(tmp4, tmp8)
tmp11 = tmp10 * tmp1
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp9, tmp13)
tmp16 = tmp15 * tmp1
tmp18 = tmp16 + tmp17
tmp19 = triton_helpers.maximum(tmp14, tmp18)
tmp20 = tmp4 - tmp19
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp8 - tmp19
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp19
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp24 + tmp26
tmp28 = tmp18 - tmp19
tmp29 = tl_math.exp(tmp28)
tmp30 = tmp27 + tmp29
tl.store(out_ptr0 + x2, tmp19, xmask)
tl.store(out_ptr1 + x2, tmp30, xmask)
@triton.jit
def triton_poi_fused__softmax_add_div_2(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex % 64
x5 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp7 / tmp8
tl.store(in_out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_mean_pow_sub_4(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_sqrt_sub_5(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x2, xmask)
tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 - tmp4
tmp7 = 1e-12
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tmp10 = tmp5 / tmp9
tmp11 = tmp0 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_2
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf0
triton_poi_fused_clone_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf1
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_add_div_1[grid(64)](buf5, primals_8, buf6,
buf7, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf8 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused__softmax_add_div_2[grid(256)](buf8, primals_8,
buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_8
buf9 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf7
triton_poi_fused_clone_0[grid(16, 4)](buf2, primals_7, buf9, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_7
buf10 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10)
buf11 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf6
triton_poi_fused_clone_3[grid(16, 4)](buf10, buf11, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0)
del buf10
extern_kernels.addmm(primals_10, reinterpret_tensor(buf11, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf12)
del primals_10
buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_mean_pow_sub_4[grid(16)](buf12, primals_3,
buf13, buf14, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_sqrt_sub_5[grid(64)](primals_11,
buf12, primals_3, buf13, buf14, primals_12, buf15, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del buf13
del buf14
del primals_12
return buf15, buf8, primals_3, primals_11, buf8, reinterpret_tensor(buf11,
(16, 4), (4, 1), 0), buf12, primals_9, reinterpret_tensor(buf9, (16,
1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0)
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
'The hidden size (%d) is not a multiple of the number of attention heads (%d)'
% (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.output_score = config.output_score
self.output_sum = config.output_sum
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1,
-2))
attention_scores = attention_scores / math.sqrt(self.
attention_head_size)
attention_scores = attention_scores + attention_mask
attention_probs = nn.Softmax(dim=-1)(attention_scores)
if self.output_score is True:
attention_probs_sum = attention_scores
else:
attention_probs_sum = attention_probs
if self.output_sum is True:
attention_probs_sum = attention_probs_sum.sum(dim=1)
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.
all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer, attention_probs_sum
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttentionNew(nn.Module):
def __init__(self, config):
super(BertAttentionNew, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_0, input_1):
primals_1 = self.self.query.weight
primals_2 = self.self.query.bias
primals_4 = self.self.key.weight
primals_5 = self.self.key.bias
primals_6 = self.self.value.weight
primals_7 = self.self.value.bias
primals_9 = self.output.dense.weight
primals_10 = self.output.dense.bias
primals_11 = self.output.LayerNorm.weight
primals_12 = self.output.LayerNorm.bias
primals_3 = input_0
primals_8 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0], output[1]
|
vincentlux/TextBrewer
|
BertAttention
| false
| 13,075
|
[
"Apache-2.0"
] | 0
|
51ffbf390a0b69ee51b6ad6f5045be63e21c98e3
|
https://github.com/vincentlux/TextBrewer/tree/51ffbf390a0b69ee51b6ad6f5045be63e21c98e3
|
ConvNet
|
import torch
import torch.nn as nn
class ConvNet(nn.Module):
"""
A network with a single convolution layer. This is used for testing flop
count for convolution layers.
"""
def __init__(self, conv_dim: 'int', input_dim: 'int', output_dim: 'int',
kernel_size: 'int', spatial_dim: 'int', stride: 'int', padding:
'int', groups_num: 'int') ->None:
super(ConvNet, self).__init__()
if conv_dim == 1:
convLayer = nn.Conv1d
elif conv_dim == 2:
convLayer = nn.Conv2d
else:
convLayer = nn.Conv3d
self.conv = convLayer(input_dim, output_dim, kernel_size, stride,
padding, groups=groups_num)
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
x = self.conv(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'conv_dim': 4, 'input_dim': 4, 'output_dim': 4,
'kernel_size': 4, 'spatial_dim': 4, 'stride': 1, 'padding': 4,
'groups_num': 1}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 2916
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 729
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1,
4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1,
1), padding=(4, 4, 4), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (1, 4, 9, 9, 9), (2916, 729, 81, 9, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(2916)](buf1, primals_2, 2916,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return reinterpret_tensor(buf1, (4, 9, 9, 9), (729, 81, 9, 1), 0
), primals_1, reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256,
64, 16, 4, 1), 0)
class ConvNetNew(nn.Module):
"""
A network with a single convolution layer. This is used for testing flop
count for convolution layers.
"""
def __init__(self, conv_dim: 'int', input_dim: 'int', output_dim: 'int',
kernel_size: 'int', spatial_dim: 'int', stride: 'int', padding:
'int', groups_num: 'int') ->None:
super(ConvNetNew, self).__init__()
if conv_dim == 1:
convLayer = nn.Conv1d
elif conv_dim == 2:
convLayer = nn.Conv2d
else:
convLayer = nn.Conv3d
self.conv = convLayer(input_dim, output_dim, kernel_size, stride,
padding, groups=groups_num)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
wangg12/fvcore
|
ConvNet
| false
| 13,076
|
[
"Apache-2.0"
] | 0
|
aca6e95b3319144ec3c66385ff348c1557a2147f
|
https://github.com/wangg12/fvcore/tree/aca6e95b3319144ec3c66385ff348c1557a2147f
|
AEC
|
import torch
import numpy as np
import torch.nn.functional as F
from torch import nn
class AEC(nn.Module):
def __init__(self, hidden_nodes, conv_width, pixel_patchsize,
lambda_activation):
super(AEC, self).__init__()
self.hidden_nodes = hidden_nodes
self.conv_width = conv_width
self.pixel_patchsize = pixel_patchsize
self.temporal_conv_kernel_size = (conv_width, pixel_patchsize,
pixel_patchsize)
self.lambda_activation = lambda_activation
self.tconv = nn.utils.weight_norm(nn.Conv3d(1, hidden_nodes,
kernel_size=self.temporal_conv_kernel_size, stride=1), dim=0,
name='weight')
self.tdeconv = nn.ConvTranspose3d(hidden_nodes, 1, kernel_size=np.
transpose(self.temporal_conv_kernel_size), stride=1)
torch.nn.init.normal_(self.tconv.weight, mean=0, std=1)
def format_data_numpy(self, x):
x = np.detach().cpu().numpy()
return x
def encode(self, x):
noise = 0
return F.relu(self.tconv(x + noise))
def decode(self, z):
return self.tdeconv(z)
def forward(self, x):
activations = self.encode(x)
decoded = self.decode(activations)
return activations, decoded
def loss_func(self, x, xhat, activations):
recon_loss = nn.MSELoss()(xhat, x)
mean_activation = activations.mean((0, 2))
goal_activation = torch.ones_like(mean_activation)
activation_loss = torch.abs(mean_activation - goal_activation).mean()
loss = recon_loss + 1000 * activation_loss
return loss
def calc_snr(self, x, xhat):
"""
Calculate the ssignal to noise ratio for a reconstructed signal
Params:
x(Array): The signal before transformation
x_hat (Array): The signal after transformation. Must be of same type of s.
Returns:
snr (float): The signal to noise ratio of the transformed signal s_prime
"""
x = x.flatten()
xhat = x.flatten()
signal = x.mean()
noise = (xhat - x).std()
snr = 10 * (signal / noise).log10()
return snr
def get_inputs():
return [torch.rand([4, 1, 64, 64, 64])]
def get_init_inputs():
return [[], {'hidden_nodes': 4, 'conv_width': 4, 'pixel_patchsize': 4,
'lambda_activation': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import numpy as np
import torch.nn.functional as F
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, None)
tmp1 = 0.0
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x0, tmp2, None)
@triton.jit
def triton_per_fused__weight_norm_interface_1(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp8 = tmp7 / tmp6
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr0 + (r1 + 64 * x0), tmp9, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 3631696
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 226981 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 64, 64, 64), (262144, 262144, 4096,
64, 1))
assert_size_stride(primals_2, (4, 1, 1, 1, 1), (1, 1, 1, 1, 1))
assert_size_stride(primals_3, (4, 1, 4, 4, 4), (64, 64, 16, 4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 1, 4, 4, 4), (64, 64, 16, 4, 1))
assert_size_stride(primals_6, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 64, 64, 64), (262144, 262144, 4096,
64, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(1048576)](primals_1, buf0, 1048576,
XBLOCK=1024, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 1, 1, 1, 1), (1, 4, 4, 4, 4), torch.
float32)
buf2 = reinterpret_tensor(buf1, (4, 1, 1, 1, 1), (1, 1, 1, 1, 1), 0)
del buf1
buf3 = empty_strided_cuda((4, 1, 4, 4, 4), (64, 64, 16, 4, 1),
torch.float32)
triton_per_fused__weight_norm_interface_1[grid(4)](buf2, primals_3,
primals_2, buf3, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
buf4 = extern_kernels.convolution(buf0, buf3, stride=(1, 1, 1),
padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 61, 61, 61), (907924, 226981, 3721,
61, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(3631696)](buf5, primals_4,
3631696, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_4
buf6 = extern_kernels.convolution(buf5, primals_5, stride=(1, 1, 1),
padding=(0, 0, 0), dilation=(1, 1, 1), transposed=True,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 1, 64, 64, 64), (262144, 262144, 4096,
64, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_3[grid(1048576)](buf7, primals_6,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_6
return (buf5, buf7, buf3, primals_2, primals_3, primals_5, buf0, buf2,
buf3, buf5)
class AECNew(nn.Module):
def __init__(self, hidden_nodes, conv_width, pixel_patchsize,
lambda_activation):
super(AECNew, self).__init__()
self.hidden_nodes = hidden_nodes
self.conv_width = conv_width
self.pixel_patchsize = pixel_patchsize
self.temporal_conv_kernel_size = (conv_width, pixel_patchsize,
pixel_patchsize)
self.lambda_activation = lambda_activation
self.tconv = nn.utils.weight_norm(nn.Conv3d(1, hidden_nodes,
kernel_size=self.temporal_conv_kernel_size, stride=1), dim=0,
name='weight')
self.tdeconv = nn.ConvTranspose3d(hidden_nodes, 1, kernel_size=np.
transpose(self.temporal_conv_kernel_size), stride=1)
torch.nn.init.normal_(self.tconv.weight, mean=0, std=1)
def format_data_numpy(self, x):
x = np.detach().cpu().numpy()
return x
def encode(self, x):
noise = 0
return F.relu(self.tconv(x + noise))
def decode(self, z):
return self.tdeconv(z)
def loss_func(self, x, xhat, activations):
recon_loss = nn.MSELoss()(xhat, x)
mean_activation = activations.mean((0, 2))
goal_activation = torch.ones_like(mean_activation)
activation_loss = torch.abs(mean_activation - goal_activation).mean()
loss = recon_loss + 1000 * activation_loss
return loss
def calc_snr(self, x, xhat):
"""
Calculate the ssignal to noise ratio for a reconstructed signal
Params:
x(Array): The signal before transformation
x_hat (Array): The signal after transformation. Must be of same type of s.
Returns:
snr (float): The signal to noise ratio of the transformed signal s_prime
"""
x = x.flatten()
xhat = x.flatten()
signal = x.mean()
noise = (xhat - x).std()
snr = 10 * (signal / noise).log10()
return snr
def forward(self, input_0):
primals_4 = self.tconv.bias
primals_2 = self.tconv.weight_g
primals_3 = self.tconv.weight_v
primals_5 = self.tdeconv.weight
primals_6 = self.tdeconv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0], output[1]
|
vdutell/biophys_encoder
|
AEC
| false
| 13,077
|
[
"Apache-2.0"
] | 0
|
2ca8011338c4f1eb6b50e7cb74e07d105d1e9669
|
https://github.com/vdutell/biophys_encoder/tree/2ca8011338c4f1eb6b50e7cb74e07d105d1e9669
|
UpConv
|
import torch
import torch.nn as nn
from collections import OrderedDict
class UpConv(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.up_conv = nn.Sequential(OrderedDict([('up', nn.Upsample(
scale_factor=2)), ('conv', nn.Conv2d(in_channels, in_channels //
2, kernel_size=3, padding=1))]))
def forward(self, x):
return self.up_conv(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from collections import OrderedDict
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 8 % 8
x0 = xindex % 8
x2 = xindex // 64
x4 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tmp5 = x0
tmp6 = tmp5.to(tl.float32)
tmp7 = tmp6 * tmp2
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.load(in_ptr0 + (tmp8 + 4 * tmp4 + 16 * x2), xmask,
eviction_policy='evict_last')
tl.store(out_ptr0 + x4, tmp9, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 64 % 2
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__unsafe_index_0[grid(1024)](primals_1, buf0, 1024,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 2, 8, 8), (128, 64, 8, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(512)](buf2, primals_3, 512,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
return buf2, primals_2, buf0
class UpConvNew(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.up_conv = nn.Sequential(OrderedDict([('up', nn.Upsample(
scale_factor=2)), ('conv', nn.Conv2d(in_channels, in_channels //
2, kernel_size=3, padding=1))]))
def forward(self, input_0):
primals_2 = self.up_conv.conv.weight
primals_3 = self.up_conv.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
wan2000/ssdf-perception
|
UpConv
| false
| 13,078
|
[
"MIT"
] | 0
|
df91bfb60f0d1b324fecada3d99d3498ca5794b0
|
https://github.com/wan2000/ssdf-perception/tree/df91bfb60f0d1b324fecada3d99d3498ca5794b0
|
ThreeNet
|
import torch
import torch.nn as nn
class ThreeNet(nn.Module):
"""
A network with three layers. This is used for testing a network with more
than one operation. The network has a convolution layer followed by two
fully connected layers.
"""
def __init__(self, input_dim: 'int', conv_dim: 'int', linear_dim: 'int'
) ->None:
super(ThreeNet, self).__init__()
self.conv = nn.Conv2d(input_dim, conv_dim, 1, 1)
out_dim = 1
self.pool = nn.AdaptiveAvgPool2d((out_dim, out_dim))
self.linear1 = nn.Linear(conv_dim, linear_dim)
self.linear2 = nn.Linear(linear_dim, 1)
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
x = self.conv(x)
x = self.pool(x)
x = torch.flatten(x, 1)
x = self.linear1(x)
x = self.linear2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'conv_dim': 4, 'linear_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_convolution_mean_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r2 + 16 * x3), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = 16.0
tmp8 = tmp6 / tmp7
tl.debug_barrier()
tl.store(in_out_ptr0 + x3, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (1, 4), (4, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_convolution_mean_0[grid(16)](buf2, buf0, primals_2,
16, 16, XBLOCK=8, num_warps=2, num_stages=1)
del buf0
del primals_2
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (4, 4), (4,
1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha
=1, beta=1, out=buf3)
del primals_5
buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6,
(4, 1), (1, 4), 0), alpha=1, beta=1, out=buf5)
del primals_7
return buf5, primals_1, primals_3, reinterpret_tensor(buf2, (4, 4), (4,
1), 0), buf3, primals_6, primals_4
class ThreeNetNew(nn.Module):
"""
A network with three layers. This is used for testing a network with more
than one operation. The network has a convolution layer followed by two
fully connected layers.
"""
def __init__(self, input_dim: 'int', conv_dim: 'int', linear_dim: 'int'
) ->None:
super(ThreeNetNew, self).__init__()
self.conv = nn.Conv2d(input_dim, conv_dim, 1, 1)
out_dim = 1
self.pool = nn.AdaptiveAvgPool2d((out_dim, out_dim))
self.linear1 = nn.Linear(conv_dim, linear_dim)
self.linear2 = nn.Linear(linear_dim, 1)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_4 = self.linear1.weight
primals_5 = self.linear1.bias
primals_6 = self.linear2.weight
primals_7 = self.linear2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
wangg12/fvcore
|
ThreeNet
| false
| 13,079
|
[
"Apache-2.0"
] | 0
|
aca6e95b3319144ec3c66385ff348c1557a2147f
|
https://github.com/wangg12/fvcore/tree/aca6e95b3319144ec3c66385ff348c1557a2147f
|
Attention_layer
|
import torch
from torch import nn
class Attention_layer(nn.Module):
def __init__(self, sequence_length):
super(Attention_layer, self).__init__()
self.input_size = sequence_length
self.output_size = sequence_length
self.dense = nn.Linear(sequence_length, sequence_length)
self.softmax = nn.Softmax(dim=-1)
def forward(self, input_tensor):
y = self.softmax(self.dense(input_tensor.permute(0, 2, 1)))
y = y.permute(0, 2, 1)
y = input_tensor * y
return y
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'sequence_length': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_add_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + 1)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + 2)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp14 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr1 + 3)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp7 = tmp4 + tmp6
tmp8 = triton_helpers.maximum(tmp3, tmp7)
tmp12 = tmp9 + tmp11
tmp13 = triton_helpers.maximum(tmp8, tmp12)
tmp17 = tmp14 + tmp16
tmp18 = triton_helpers.maximum(tmp13, tmp17)
tmp19 = tmp3 - tmp18
tmp20 = tl_math.exp(tmp19)
tmp21 = tmp7 - tmp18
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp20 + tmp22
tmp24 = tmp12 - tmp18
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp23 + tmp25
tmp27 = tmp17 - tmp18
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp26 + tmp28
tl.store(out_ptr0 + x0, tmp18, xmask)
tl.store(out_ptr1 + x0, tmp29, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask & ymask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + y0, ymask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr3 + (x2 + 4 * y1), xmask & ymask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr4 + (x2 + 4 * y1), xmask & ymask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp8 = tmp6 / tmp7
tmp9 = tmp0 * tmp8
tl.store(out_ptr0 + (x2 + 4 * y3), tmp9, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(16, 4)](primals_1, buf0, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused__softmax_add_1[grid(16)](buf1, primals_3, buf2,
buf3, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_mul_2[grid(16, 4)](primals_1, buf1, primals_3,
buf2, buf3, buf4, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1,
num_stages=1)
del buf2
del buf3
return buf4, primals_1, primals_3, reinterpret_tensor(buf0, (16, 4), (4,
1), 0), buf1
class Attention_layerNew(nn.Module):
def __init__(self, sequence_length):
super(Attention_layerNew, self).__init__()
self.input_size = sequence_length
self.output_size = sequence_length
self.dense = nn.Linear(sequence_length, sequence_length)
self.softmax = nn.Softmax(dim=-1)
def forward(self, input_0):
primals_2 = self.dense.weight
primals_3 = self.dense.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
w6688j/ChatBot-PyTorch
|
Attention_layer
| false
| 13,080
|
[
"Apache-2.0"
] | 0
|
84f5a3267d16c650b90727ce80e4952901faa902
|
https://github.com/w6688j/ChatBot-PyTorch/tree/84f5a3267d16c650b90727ce80e4952901faa902
|
SPPNet
|
import math
import torch
import torch.nn.functional as F
import torch.nn as nn
def spatial_pyramid_pool(previous_conv, num_sample, previous_conv_size,
out_pool_size):
"""
previous_conv: a tensor vector of previous convolution layer
num_sample: an int number of image in the batch
previous_conv_size: an int vector [height, width] of the matrix features size of previous convolution layer
out_pool_size: a int vector of expected output size of max pooling layer
returns: a tensor vector with shape [1 x n] is the concentration of multi-level pooling
"""
for i in range(len(out_pool_size)):
h, w = previous_conv_size
h_wid = math.ceil(h / out_pool_size[i])
w_wid = math.ceil(w / out_pool_size[i])
h_str = math.floor(h / out_pool_size[i])
w_str = math.floor(w / out_pool_size[i])
max_pool = nn.MaxPool2d(kernel_size=(h_wid, w_wid), stride=(h_str,
w_str))
x = max_pool(previous_conv)
if i == 0:
spp = x.view(num_sample, -1)
else:
spp = torch.cat((spp, x.view(num_sample, -1)), 1)
return spp
class SPPNet(nn.Module):
"""
A CNN model which adds spp layer so that we can input single-size tensor
"""
def __init__(self, n_classes=102, init_weights=True):
super(SPPNet, self).__init__()
"""
'wc1',[3,96,11,11]
'wc2',[96,256,5,5]
'wc3',[256,384,3,3]
'wc4':[384,384,3,3]
'wc5':[384,256,3,3]
'fc6':[spatial_pool_dim*256,4096]
'fc7':[4096,4096]
'out',[4096,n_classes])
"""
self.output_num = [4, 2, 1]
self.conv1 = nn.Conv2d(3, 96, kernel_size=7, stride=2)
self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2)
self.conv2 = nn.Conv2d(96, 256, kernel_size=5, stride=2)
self.pool2 = nn.MaxPool2d(kernel_size=3, stride=2)
self.conv3 = nn.Conv2d(256, 384, kernel_size=3)
self.conv4 = nn.Conv2d(384, 384, kernel_size=3)
self.conv5 = nn.Conv2d(384, 256, kernel_size=3)
self.fc1 = nn.Linear(sum([(i * i) for i in self.output_num]) * 256,
4096)
self.fc2 = nn.Linear(4096, 4096)
self.out = nn.Linear(4096, n_classes)
if init_weights:
self._initialize_weights()
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.local_response_norm(x, size=4)
x = self.pool1(x)
x = F.relu(self.conv2(x))
x = F.local_response_norm(x, size=4)
x = self.pool2(x)
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = F.relu(self.conv5(x))
spp = spatial_pyramid_pool(x, x.size(0), [int(x.size(2)), int(x.
size(3))], self.output_num)
fc1 = F.relu(self.fc1(spp))
fc2 = F.relu(self.fc2(fc1))
output = self.out(fc2)
return output
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out',
nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def get_inputs():
return [torch.rand([4, 3, 256, 256])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 288
xnumel = 49
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 49 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 147 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 65536 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 196608 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 96
y1 = yindex // 96
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 96 * x2 + 2400 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 384
y1 = yindex // 384
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 384 * x2 + 3456 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 384
y1 = yindex // 384
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 384 * x2 + 3456 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 6000000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 96
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_constant_pad_nd_7(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 62500
xnumel = 99
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 15625
y1 = yindex // 15625
tmp0 = -2 + x2
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1, 1], 96, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (-2 + x2 + 96 * y3), tmp5 & xmask & ymask,
eviction_policy='evict_last', other=0.0)
tmp7 = tl.full([1, 1], 0, tl.int32)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp9 = tmp8 * tmp8
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp5, tmp9, tmp10)
tl.store(out_ptr0 + (y0 + 15648 * x2 + 1549152 * y1), tmp11, xmask & ymask)
@triton.jit
def triton_poi_fused_avg_pool3d_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 6000000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 15625
x1 = xindex // 15625 % 96
x2 = xindex // 1500000
x3 = xindex // 15625
tmp0 = tl.load(in_ptr0 + (x0 + 15648 * x1 + 1549152 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (15648 + x0 + 15648 * x1 + 1549152 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (31296 + x0 + 15648 * x1 + 1549152 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (46944 + x0 + 15648 * x1 + 1549152 * x2), xmask)
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x0 + 15648 * x3), tmp8, xmask)
@triton.jit
def triton_poi_fused_add_div_mul_pow_relu_9(in_ptr0, in_ptr1, out_ptr0,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 62500
xnumel = 96
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 15625
y1 = yindex // 15625
tmp0 = tl.load(in_ptr0 + (x2 + 96 * y3), xmask & ymask, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr1 + (y0 + 15648 * x2 + 1502208 * y1), xmask &
ymask, eviction_policy='evict_last')
tmp1 = tl.full([1, 1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = 0.0001
tmp5 = tmp3 * tmp4
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = 0.75
tmp9 = libdevice.pow(tmp7, tmp8)
tmp10 = tmp2 / tmp9
tl.store(out_ptr0 + (x2 + 96 * y3), tmp10, xmask & ymask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_10(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1476096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 96
x1 = xindex // 96 % 62
x2 = xindex // 5952 % 62
x3 = xindex // 369024
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 192 * x1 + 24000 * x2 + 1500000 * x3), xmask
)
tmp1 = tl.load(in_ptr0 + (96 + x0 + 192 * x1 + 24000 * x2 + 1500000 *
x3), xmask)
tmp3 = tl.load(in_ptr0 + (192 + x0 + 192 * x1 + 24000 * x2 + 1500000 *
x3), xmask)
tmp5 = tl.load(in_ptr0 + (12000 + x0 + 192 * x1 + 24000 * x2 + 1500000 *
x3), xmask)
tmp7 = tl.load(in_ptr0 + (12096 + x0 + 192 * x1 + 24000 * x2 + 1500000 *
x3), xmask)
tmp9 = tl.load(in_ptr0 + (12192 + x0 + 192 * x1 + 24000 * x2 + 1500000 *
x3), xmask)
tmp11 = tl.load(in_ptr0 + (24000 + x0 + 192 * x1 + 24000 * x2 + 1500000 *
x3), xmask)
tmp13 = tl.load(in_ptr0 + (24096 + x0 + 192 * x1 + 24000 * x2 + 1500000 *
x3), xmask)
tmp15 = tl.load(in_ptr0 + (24192 + x0 + 192 * x1 + 24000 * x2 + 1500000 *
x3), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp17 = tmp1 > tmp0
tmp18 = tl.full([1], 1, tl.int8)
tmp19 = tl.full([1], 0, tl.int8)
tmp20 = tl.where(tmp17, tmp18, tmp19)
tmp21 = tmp3 > tmp2
tmp22 = tl.full([1], 2, tl.int8)
tmp23 = tl.where(tmp21, tmp22, tmp20)
tmp24 = tmp5 > tmp4
tmp25 = tl.full([1], 3, tl.int8)
tmp26 = tl.where(tmp24, tmp25, tmp23)
tmp27 = tmp7 > tmp6
tmp28 = tl.full([1], 4, tl.int8)
tmp29 = tl.where(tmp27, tmp28, tmp26)
tmp30 = tmp9 > tmp8
tmp31 = tl.full([1], 5, tl.int8)
tmp32 = tl.where(tmp30, tmp31, tmp29)
tmp33 = tmp11 > tmp10
tmp34 = tl.full([1], 6, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp13 > tmp12
tmp37 = tl.full([1], 7, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tmp15 > tmp14
tmp40 = tl.full([1], 8, tl.int8)
tmp41 = tl.where(tmp39, tmp40, tmp38)
tl.store(out_ptr0 + x4, tmp16, xmask)
tl.store(out_ptr1 + x4, tmp41, xmask)
@triton.jit
def triton_poi_fused_convolution_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 861184
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_constant_pad_nd_12(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 3364
xnumel = 259
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 841
y1 = yindex // 841
tmp0 = -2 + x2
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1, 1], 256, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (-2 + x2 + 256 * y3), tmp5 & xmask & ymask,
eviction_policy='evict_last', other=0.0)
tmp7 = tl.full([1, 1], 0, tl.int32)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp9 = tmp8 * tmp8
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp5, tmp9, tmp10)
tl.store(out_ptr0 + (y0 + 841 * x2 + 217824 * y1), tmp11, xmask & ymask)
@triton.jit
def triton_poi_fused_avg_pool3d_13(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 861184
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 215296
x1 = xindex // 215296
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 217824 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (841 + x0 + 217824 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (1682 + x0 + 217824 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (2523 + x0 + 217824 * x1), xmask)
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_div_mul_pow_relu_14(in_ptr0, in_ptr1, out_ptr0,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 3364
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 841
y1 = yindex // 841
tmp0 = tl.load(in_ptr0 + (x2 + 256 * y3), xmask & ymask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (y0 + 841 * x2 + 215296 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.full([1, 1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = 0.0001
tmp5 = tmp3 * tmp4
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = 0.75
tmp9 = libdevice.pow(tmp7, tmp8)
tmp10 = tmp2 / tmp9
tl.store(out_ptr0 + (x2 + 256 * y3), tmp10, xmask & ymask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_15(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 256
x1 = xindex // 256 % 14
x2 = xindex // 3584 % 14
x3 = xindex // 50176
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 512 * x1 + 14848 * x2 + 215296 * x3), None)
tmp1 = tl.load(in_ptr0 + (256 + x0 + 512 * x1 + 14848 * x2 + 215296 *
x3), None)
tmp3 = tl.load(in_ptr0 + (512 + x0 + 512 * x1 + 14848 * x2 + 215296 *
x3), None)
tmp5 = tl.load(in_ptr0 + (7424 + x0 + 512 * x1 + 14848 * x2 + 215296 *
x3), None)
tmp7 = tl.load(in_ptr0 + (7680 + x0 + 512 * x1 + 14848 * x2 + 215296 *
x3), None)
tmp9 = tl.load(in_ptr0 + (7936 + x0 + 512 * x1 + 14848 * x2 + 215296 *
x3), None)
tmp11 = tl.load(in_ptr0 + (14848 + x0 + 512 * x1 + 14848 * x2 + 215296 *
x3), None)
tmp13 = tl.load(in_ptr0 + (15104 + x0 + 512 * x1 + 14848 * x2 + 215296 *
x3), None)
tmp15 = tl.load(in_ptr0 + (15360 + x0 + 512 * x1 + 14848 * x2 + 215296 *
x3), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp17 = tmp1 > tmp0
tmp18 = tl.full([1], 1, tl.int8)
tmp19 = tl.full([1], 0, tl.int8)
tmp20 = tl.where(tmp17, tmp18, tmp19)
tmp21 = tmp3 > tmp2
tmp22 = tl.full([1], 2, tl.int8)
tmp23 = tl.where(tmp21, tmp22, tmp20)
tmp24 = tmp5 > tmp4
tmp25 = tl.full([1], 3, tl.int8)
tmp26 = tl.where(tmp24, tmp25, tmp23)
tmp27 = tmp7 > tmp6
tmp28 = tl.full([1], 4, tl.int8)
tmp29 = tl.where(tmp27, tmp28, tmp26)
tmp30 = tmp9 > tmp8
tmp31 = tl.full([1], 5, tl.int8)
tmp32 = tl.where(tmp30, tmp31, tmp29)
tmp33 = tmp11 > tmp10
tmp34 = tl.full([1], 6, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp13 > tmp12
tmp37 = tl.full([1], 7, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tmp15 > tmp14
tmp40 = tl.full([1], 8, tl.int8)
tmp41 = tl.where(tmp39, tmp40, tmp38)
tl.store(out_ptr0 + x4, tmp16, None)
tl.store(out_ptr1 + x4, tmp41, None)
@triton.jit
def triton_poi_fused_convolution_relu_16(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 384
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_17(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 384
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_18(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_19(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 256
x1 = xindex // 256 % 4
x2 = xindex // 1024
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 512 * x1 + 4096 * x2), None)
tmp1 = tl.load(in_ptr0 + (256 + x0 + 512 * x1 + 4096 * x2), None)
tmp7 = tl.load(in_ptr0 + (2048 + x0 + 512 * x1 + 4096 * x2), None)
tmp12 = tl.load(in_ptr0 + (2304 + x0 + 512 * x1 + 4096 * x2), None)
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_20(in_ptr0, out_ptr0, out_ptr1,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x3 = xindex
y0 = yindex % 2
y4 = yindex // 2
y2 = yindex // 4
y5 = yindex % 4
y6 = yindex
tmp0 = tl.load(in_ptr0 + (x3 + 1024 * y0 + 8192 * y4), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (256 + x3 + 1024 * y0 + 8192 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (512 + x3 + 1024 * y0 + 8192 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (768 + x3 + 1024 * y0 + 8192 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2048 + x3 + 1024 * y0 + 8192 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (2304 + x3 + 1024 * y0 + 8192 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2560 + x3 + 1024 * y0 + 8192 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (2816 + x3 + 1024 * y0 + 8192 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (4096 + x3 + 1024 * y0 + 8192 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (4352 + x3 + 1024 * y0 + 8192 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (4608 + x3 + 1024 * y0 + 8192 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (4864 + x3 + 1024 * y0 + 8192 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (6144 + x3 + 1024 * y0 + 8192 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr0 + (6400 + x3 + 1024 * y0 + 8192 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr0 + (6656 + x3 + 1024 * y0 + 8192 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (6912 + x3 + 1024 * y0 + 8192 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp20 = triton_helpers.maximum(tmp19, tmp18)
tmp22 = triton_helpers.maximum(tmp21, tmp20)
tmp24 = triton_helpers.maximum(tmp23, tmp22)
tmp26 = triton_helpers.maximum(tmp25, tmp24)
tmp28 = triton_helpers.maximum(tmp27, tmp26)
tmp30 = triton_helpers.maximum(tmp29, tmp28)
tmp31 = tmp1 > tmp0
tmp32 = tl.full([1, 1], 1, tl.int8)
tmp33 = tl.full([1, 1], 0, tl.int8)
tmp34 = tl.where(tmp31, tmp32, tmp33)
tmp35 = tmp3 > tmp2
tmp36 = tl.full([1, 1], 2, tl.int8)
tmp37 = tl.where(tmp35, tmp36, tmp34)
tmp38 = tmp5 > tmp4
tmp39 = tl.full([1, 1], 3, tl.int8)
tmp40 = tl.where(tmp38, tmp39, tmp37)
tmp41 = tmp7 > tmp6
tmp42 = tl.full([1, 1], 4, tl.int8)
tmp43 = tl.where(tmp41, tmp42, tmp40)
tmp44 = tmp9 > tmp8
tmp45 = tl.full([1, 1], 5, tl.int8)
tmp46 = tl.where(tmp44, tmp45, tmp43)
tmp47 = tmp11 > tmp10
tmp48 = tl.full([1, 1], 6, tl.int8)
tmp49 = tl.where(tmp47, tmp48, tmp46)
tmp50 = tmp13 > tmp12
tmp51 = tl.full([1, 1], 7, tl.int8)
tmp52 = tl.where(tmp50, tmp51, tmp49)
tmp53 = tmp15 > tmp14
tmp54 = tl.full([1, 1], 8, tl.int8)
tmp55 = tl.where(tmp53, tmp54, tmp52)
tmp56 = tmp17 > tmp16
tmp57 = tl.full([1, 1], 9, tl.int8)
tmp58 = tl.where(tmp56, tmp57, tmp55)
tmp59 = tmp19 > tmp18
tmp60 = tl.full([1, 1], 10, tl.int8)
tmp61 = tl.where(tmp59, tmp60, tmp58)
tmp62 = tmp21 > tmp20
tmp63 = tl.full([1, 1], 11, tl.int8)
tmp64 = tl.where(tmp62, tmp63, tmp61)
tmp65 = tmp23 > tmp22
tmp66 = tl.full([1, 1], 12, tl.int8)
tmp67 = tl.where(tmp65, tmp66, tmp64)
tmp68 = tmp25 > tmp24
tmp69 = tl.full([1, 1], 13, tl.int8)
tmp70 = tl.where(tmp68, tmp69, tmp67)
tmp71 = tmp27 > tmp26
tmp72 = tl.full([1, 1], 14, tl.int8)
tmp73 = tl.where(tmp71, tmp72, tmp70)
tmp74 = tmp29 > tmp28
tmp75 = tl.full([1, 1], 15, tl.int8)
tmp76 = tl.where(tmp74, tmp75, tmp73)
tl.store(out_ptr0 + (y5 + 4 * x3 + 1024 * y2), tmp30, xmask & ymask)
tl.store(out_ptr1 + (x3 + 256 * y6), tmp76, xmask & ymask)
@triton.jit
def triton_poi_fused_cat_21(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 21504
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5376
x1 = xindex // 5376
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 5120, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.full([1], 4096, tl.int64)
tmp6 = tmp0 < tmp5
tmp7 = tmp6 & tmp4
tmp8 = tl.load(in_ptr0 + (512 * (x0 % 4) + 4096 * (x0 // 4 % 4) + 16384 *
x1 + x0 // 16 % 256), tmp7 & xmask, eviction_policy='evict_last',
other=0.0)
tmp9 = tl.load(in_ptr0 + (256 + 512 * (x0 % 4) + 4096 * (x0 // 4 % 4) +
16384 * x1 + x0 // 16 % 256), tmp7 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp11 = tl.load(in_ptr0 + (2048 + 512 * (x0 % 4) + 4096 * (x0 // 4 % 4) +
16384 * x1 + x0 // 16 % 256), tmp7 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp13 = tl.load(in_ptr0 + (2304 + 512 * (x0 % 4) + 4096 * (x0 // 4 % 4) +
16384 * x1 + x0 // 16 % 256), tmp7 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp7, tmp14, tmp15)
tmp17 = tmp0 >= tmp5
tmp18 = tmp17 & tmp4
tmp19 = tl.load(in_ptr1 + (1024 * x1 + (-4096 + x0)), tmp18 & xmask,
eviction_policy='evict_last', other=0.0)
tmp20 = tl.where(tmp6, tmp16, tmp19)
tmp21 = tl.full(tmp20.shape, 0.0, tmp20.dtype)
tmp22 = tl.where(tmp4, tmp20, tmp21)
tmp23 = tmp0 >= tmp3
tl.full([1], 5376, tl.int64)
tmp26 = tl.load(in_ptr2 + (256 * x1 + (-5120 + x0)), tmp23 & xmask,
eviction_policy='evict_last', other=0.0)
tmp27 = tl.where(tmp4, tmp22, tmp26)
tl.store(out_ptr0 + x2, tmp27, xmask)
@triton.jit
def triton_poi_fused_relu_22(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 4096
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17) = args
args.clear()
assert_size_stride(primals_1, (96, 3, 7, 7), (147, 49, 7, 1))
assert_size_stride(primals_2, (96,), (1,))
assert_size_stride(primals_3, (4, 3, 256, 256), (196608, 65536, 256, 1))
assert_size_stride(primals_4, (256, 96, 5, 5), (2400, 25, 5, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (384, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_7, (384,), (1,))
assert_size_stride(primals_8, (384, 384, 3, 3), (3456, 9, 3, 1))
assert_size_stride(primals_9, (384,), (1,))
assert_size_stride(primals_10, (256, 384, 3, 3), (3456, 9, 3, 1))
assert_size_stride(primals_11, (256,), (1,))
assert_size_stride(primals_12, (4096, 5376), (5376, 1))
assert_size_stride(primals_13, (4096,), (1,))
assert_size_stride(primals_14, (4096, 4096), (4096, 1))
assert_size_stride(primals_15, (4096,), (1,))
assert_size_stride(primals_16, (102, 4096), (4096, 1))
assert_size_stride(primals_17, (102,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((96, 3, 7, 7), (147, 1, 21, 3), torch.float32
)
get_raw_stream(0)
triton_poi_fused_0[grid(288, 49)](primals_1, buf0, 288, 49, XBLOCK=
32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 3, 256, 256), (196608, 1, 768, 3),
torch.float32)
triton_poi_fused_1[grid(12, 65536)](primals_3, buf1, 12, 65536,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((256, 96, 5, 5), (2400, 1, 480, 96),
torch.float32)
triton_poi_fused_2[grid(24576, 25)](primals_4, buf2, 24576, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((384, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_3[grid(98304, 9)](primals_6, buf3, 98304, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf4 = empty_strided_cuda((384, 384, 3, 3), (3456, 1, 1152, 384),
torch.float32)
triton_poi_fused_4[grid(147456, 9)](primals_8, buf4, 147456, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_8
buf5 = empty_strided_cuda((256, 384, 3, 3), (3456, 1, 1152, 384),
torch.float32)
triton_poi_fused_5[grid(98304, 9)](primals_10, buf5, 98304, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_10
buf6 = extern_kernels.convolution(buf1, buf0, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 96, 125, 125), (1500000, 1, 12000, 96))
buf7 = buf6
del buf6
triton_poi_fused_convolution_6[grid(6000000)](buf7, primals_2,
6000000, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf8 = empty_strided_cuda((4, 1, 99, 125, 125), (1549152, 1549152,
15648, 125, 1), torch.float32)
triton_poi_fused_constant_pad_nd_7[grid(62500, 99)](buf7, buf8,
62500, 99, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
buf9 = empty_strided_cuda((4, 1, 96, 125, 125), (1502208, 1502208,
15648, 125, 1), torch.float32)
triton_poi_fused_avg_pool3d_8[grid(6000000)](buf8, buf9, 6000000,
XBLOCK=512, num_warps=8, num_stages=1)
buf10 = empty_strided_cuda((4, 96, 125, 125), (1500000, 1, 12000,
96), torch.float32)
triton_poi_fused_add_div_mul_pow_relu_9[grid(62500, 96)](buf7, buf9,
buf10, 62500, 96, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
buf11 = empty_strided_cuda((4, 96, 62, 62), (369024, 1, 5952, 96),
torch.float32)
buf12 = empty_strided_cuda((4, 96, 62, 62), (369024, 1, 5952, 96),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_10[grid(1476096)](buf10,
buf11, buf12, 1476096, XBLOCK=512, num_warps=8, num_stages=1)
buf13 = extern_kernels.convolution(buf11, buf2, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 256, 29, 29), (215296, 1, 7424, 256))
buf14 = buf13
del buf13
triton_poi_fused_convolution_11[grid(861184)](buf14, primals_5,
861184, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf15 = empty_strided_cuda((4, 1, 259, 29, 29), (217824, 217824,
841, 29, 1), torch.float32)
triton_poi_fused_constant_pad_nd_12[grid(3364, 259)](buf14, buf15,
3364, 259, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
buf16 = empty_strided_cuda((4, 1, 256, 29, 29), (215296, 215296,
841, 29, 1), torch.float32)
triton_poi_fused_avg_pool3d_13[grid(861184)](buf15, buf16, 861184,
XBLOCK=512, num_warps=8, num_stages=1)
buf17 = empty_strided_cuda((4, 256, 29, 29), (215296, 1, 7424, 256),
torch.float32)
triton_poi_fused_add_div_mul_pow_relu_14[grid(3364, 256)](buf14,
buf16, buf17, 3364, 256, XBLOCK=32, YBLOCK=32, num_warps=4,
num_stages=1)
buf18 = empty_strided_cuda((4, 256, 14, 14), (50176, 1, 3584, 256),
torch.float32)
buf19 = empty_strided_cuda((4, 256, 14, 14), (50176, 1, 3584, 256),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_15[grid(200704)](buf17,
buf18, buf19, 200704, XBLOCK=512, num_warps=8, num_stages=1)
buf20 = extern_kernels.convolution(buf18, buf3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 384, 12, 12), (55296, 1, 4608, 384))
buf21 = buf20
del buf20
triton_poi_fused_convolution_relu_16[grid(221184)](buf21, primals_7,
221184, XBLOCK=512, num_warps=8, num_stages=1)
del primals_7
buf22 = extern_kernels.convolution(buf21, buf4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 384, 10, 10), (38400, 1, 3840, 384))
buf23 = buf22
del buf22
triton_poi_fused_convolution_relu_17[grid(153600)](buf23, primals_9,
153600, XBLOCK=512, num_warps=8, num_stages=1)
del primals_9
buf24 = extern_kernels.convolution(buf23, buf5, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 256, 8, 8), (16384, 1, 2048, 256))
buf25 = buf24
del buf24
triton_poi_fused_convolution_relu_18[grid(65536)](buf25, primals_11,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_11
buf26 = empty_strided_cuda((4, 256, 4, 4), (4096, 1, 1024, 256),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_19[grid(16384)](buf25,
buf26, 16384, XBLOCK=128, num_warps=4, num_stages=1)
buf27 = empty_strided_cuda((4, 256, 2, 2), (1024, 4, 2, 1), torch.
float32)
buf28 = empty_strided_cuda((4, 256, 2, 2), (1024, 1, 512, 256),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_20[grid(16, 256)](buf25,
buf27, buf28, 16, 256, XBLOCK=256, YBLOCK=1, num_warps=4,
num_stages=1)
buf29 = torch.ops.aten.max_pool2d_with_indices.default(buf25, [8, 8
], [8, 8])
buf30 = buf29[0]
buf31 = buf29[1]
del buf29
buf32 = empty_strided_cuda((4, 5376), (5376, 1), torch.float32)
triton_poi_fused_cat_21[grid(21504)](buf25, buf27, buf30, buf32,
21504, XBLOCK=256, num_warps=4, num_stages=1)
del buf27
del buf30
buf33 = empty_strided_cuda((4, 4096), (4096, 1), torch.float32)
extern_kernels.mm(buf32, reinterpret_tensor(primals_12, (5376, 4096
), (1, 5376), 0), out=buf33)
buf34 = buf33
del buf33
triton_poi_fused_relu_22[grid(16384)](buf34, primals_13, 16384,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_13
buf35 = empty_strided_cuda((4, 4096), (4096, 1), torch.float32)
extern_kernels.mm(buf34, reinterpret_tensor(primals_14, (4096, 4096
), (1, 4096), 0), out=buf35)
buf36 = buf35
del buf35
triton_poi_fused_relu_22[grid(16384)](buf36, primals_15, 16384,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_15
buf37 = empty_strided_cuda((4, 102), (102, 1), torch.float32)
extern_kernels.addmm(primals_17, buf36, reinterpret_tensor(
primals_16, (4096, 102), (1, 4096), 0), alpha=1, beta=1, out=buf37)
del primals_17
return (buf37, buf0, buf1, buf2, buf3, buf4, buf5, buf7, buf8, buf9,
buf10, buf11, buf12, buf14, buf15, buf16, buf17, buf18, buf19,
buf21, buf23, buf25, buf26, buf28, buf31, buf32, buf34, buf36,
primals_16, primals_14, primals_12)
def spatial_pyramid_pool(previous_conv, num_sample, previous_conv_size,
out_pool_size):
"""
previous_conv: a tensor vector of previous convolution layer
num_sample: an int number of image in the batch
previous_conv_size: an int vector [height, width] of the matrix features size of previous convolution layer
out_pool_size: a int vector of expected output size of max pooling layer
returns: a tensor vector with shape [1 x n] is the concentration of multi-level pooling
"""
for i in range(len(out_pool_size)):
h, w = previous_conv_size
h_wid = math.ceil(h / out_pool_size[i])
w_wid = math.ceil(w / out_pool_size[i])
h_str = math.floor(h / out_pool_size[i])
w_str = math.floor(w / out_pool_size[i])
max_pool = nn.MaxPool2d(kernel_size=(h_wid, w_wid), stride=(h_str,
w_str))
x = max_pool(previous_conv)
if i == 0:
spp = x.view(num_sample, -1)
else:
spp = torch.cat((spp, x.view(num_sample, -1)), 1)
return spp
class SPPNetNew(nn.Module):
"""
A CNN model which adds spp layer so that we can input single-size tensor
"""
def __init__(self, n_classes=102, init_weights=True):
super(SPPNetNew, self).__init__()
"""
'wc1',[3,96,11,11]
'wc2',[96,256,5,5]
'wc3',[256,384,3,3]
'wc4':[384,384,3,3]
'wc5':[384,256,3,3]
'fc6':[spatial_pool_dim*256,4096]
'fc7':[4096,4096]
'out',[4096,n_classes])
"""
self.output_num = [4, 2, 1]
self.conv1 = nn.Conv2d(3, 96, kernel_size=7, stride=2)
self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2)
self.conv2 = nn.Conv2d(96, 256, kernel_size=5, stride=2)
self.pool2 = nn.MaxPool2d(kernel_size=3, stride=2)
self.conv3 = nn.Conv2d(256, 384, kernel_size=3)
self.conv4 = nn.Conv2d(384, 384, kernel_size=3)
self.conv5 = nn.Conv2d(384, 256, kernel_size=3)
self.fc1 = nn.Linear(sum([(i * i) for i in self.output_num]) * 256,
4096)
self.fc2 = nn.Linear(4096, 4096)
self.out = nn.Linear(4096, n_classes)
if init_weights:
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out',
nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.conv5.weight
primals_11 = self.conv5.bias
primals_12 = self.fc1.weight
primals_13 = self.fc1.bias
primals_14 = self.fc2.weight
primals_15 = self.fc2.bias
primals_16 = self.out.weight
primals_17 = self.out.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17])
return output[0]
|
maj34/Deep-Learning-Papers
|
SPPNet
| false
| 13,081
|
[
"MIT"
] | 0
|
2672d3426b3f4342f7d81cd5ae029f2485594b4c
|
https://github.com/maj34/Deep-Learning-Papers/tree/2672d3426b3f4342f7d81cd5ae029f2485594b4c
|
CmapPafHeadAttention
|
import torch
import torch.utils.data
import torch.nn
import torch.optim
class UpsampleCBR(torch.nn.Sequential):
def __init__(self, input_channels, output_channels, count=1, num_flat=0):
layers = []
for i in range(count):
if i == 0:
inch = input_channels
else:
inch = output_channels
layers += [torch.nn.ConvTranspose2d(inch, output_channels,
kernel_size=4, stride=2, padding=1), torch.nn.BatchNorm2d(
output_channels), torch.nn.ReLU()]
for i in range(num_flat):
layers += [torch.nn.Conv2d(output_channels, output_channels,
kernel_size=3, stride=1, padding=1), torch.nn.
BatchNorm2d(output_channels), torch.nn.ReLU()]
super(UpsampleCBR, self).__init__(*layers)
class CmapPafHeadAttention(torch.nn.Module):
def __init__(self, input_channels, cmap_channels, paf_channels,
upsample_channels=256, num_upsample=0, num_flat=0):
super(CmapPafHeadAttention, self).__init__()
self.cmap_up = UpsampleCBR(input_channels, upsample_channels,
num_upsample, num_flat)
self.cmap_att = torch.nn.Conv2d(upsample_channels,
upsample_channels, kernel_size=3, stride=1, padding=1)
self.cmap_conv = torch.nn.Conv2d(upsample_channels, cmap_channels,
kernel_size=1, stride=1, padding=0)
def forward(self, x):
xc = self.cmap_up(x)
ac = torch.sigmoid(self.cmap_att(xc))
return self.cmap_conv(xc * ac)
def get_inputs():
return [torch.rand([4, 256, 64, 64])]
def get_init_inputs():
return [[], {'input_channels': 4, 'cmap_channels': 4, 'paf_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
import torch.nn
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 256 * x2 + 1048576 * y1), tmp0, None)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_mul_sigmoid_2(in_out_ptr0, in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, None)
tmp2 = tmp0 + tmp1
tmp4 = tl.sigmoid(tmp2)
tmp5 = tmp3 * tmp4
tl.store(in_out_ptr0 + x2, tmp2, None)
tl.store(out_ptr0 + x2, tmp5, None)
@triton.jit
def triton_poi_fused_convolution_3(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16384 * y1), ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4096 * y3), tmp2, ymask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 256, 64, 64), (1048576, 4096, 64, 1))
assert_size_stride(primals_2, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_3, (256,), (1,))
assert_size_stride(primals_4, (4, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 256, 64, 64), (1048576, 1, 16384, 256
), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(1024, 4096)](primals_1, buf0, 1024, 4096,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_1[grid(65536, 9)](primals_2, buf1, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf0, buf1, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 256, 64, 64), (1048576, 1, 16384, 256))
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((4, 256, 64, 64), (1048576, 1, 16384, 256
), torch.float32)
triton_poi_fused_convolution_mul_sigmoid_2[grid(4194304)](buf3,
primals_3, buf0, buf4, 4194304, XBLOCK=1024, num_warps=4,
num_stages=1)
del primals_3
buf5 = extern_kernels.convolution(buf4, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 64, 64), (16384, 1, 256, 4))
buf6 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1),
torch.float32)
triton_poi_fused_convolution_3[grid(16, 4096)](buf5, primals_5,
buf6, 16, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del buf5
del primals_5
return buf6, buf0, buf1, primals_4, buf3, buf4
class UpsampleCBR(torch.nn.Sequential):
def __init__(self, input_channels, output_channels, count=1, num_flat=0):
layers = []
for i in range(count):
if i == 0:
inch = input_channels
else:
inch = output_channels
layers += [torch.nn.ConvTranspose2d(inch, output_channels,
kernel_size=4, stride=2, padding=1), torch.nn.BatchNorm2d(
output_channels), torch.nn.ReLU()]
for i in range(num_flat):
layers += [torch.nn.Conv2d(output_channels, output_channels,
kernel_size=3, stride=1, padding=1), torch.nn.
BatchNorm2d(output_channels), torch.nn.ReLU()]
super(UpsampleCBR, self).__init__(*layers)
class CmapPafHeadAttentionNew(torch.nn.Module):
def __init__(self, input_channels, cmap_channels, paf_channels,
upsample_channels=256, num_upsample=0, num_flat=0):
super(CmapPafHeadAttentionNew, self).__init__()
self.cmap_up = UpsampleCBR(input_channels, upsample_channels,
num_upsample, num_flat)
self.cmap_att = torch.nn.Conv2d(upsample_channels,
upsample_channels, kernel_size=3, stride=1, padding=1)
self.cmap_conv = torch.nn.Conv2d(upsample_channels, cmap_channels,
kernel_size=1, stride=1, padding=0)
def forward(self, input_0):
primals_2 = self.cmap_att.weight
primals_3 = self.cmap_att.bias
primals_4 = self.cmap_conv.weight
primals_5 = self.cmap_conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
tucachmo2202/trt_pose
|
CmapPafHeadAttention
| false
| 13,082
|
[
"MIT"
] | 0
|
b847fc197c32219dc2d719c2b42906603da0988a
|
https://github.com/tucachmo2202/trt_pose/tree/b847fc197c32219dc2d719c2b42906603da0988a
|
ParsingRelationLoss
|
import torch
import torch.nn.modules
import torch.nn as nn
class ParsingRelationLoss(nn.Module):
def __init__(self):
super(ParsingRelationLoss, self).__init__()
def forward(self, logits):
_n, _c, h, _w = logits.shape
loss_all = []
for i in range(0, h - 1):
loss_all.append(logits[:, :, i, :] - logits[:, :, i + 1, :])
loss = torch.cat(loss_all)
return torch.nn.functional.smooth_l1_loss(loss, torch.zeros_like(loss))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn.modules
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_smooth_l1_loss_0(in_out_ptr0, in_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
rnumel = 192
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r2 = rindex // 16
r0 = rindex % 4
r1 = rindex // 4 % 4
tmp0 = r2
tl.full([1, 1], 0, tl.int64)
tmp3 = tl.full([1, 1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + tl.broadcast_to(r0 + 16 * r1 + 64 * r2, [
XBLOCK, RBLOCK]), rmask & tmp4, other=0.0)
tmp6 = tl.load(in_ptr0 + tl.broadcast_to(4 + r0 + 16 * r1 + 64 * r2, [
XBLOCK, RBLOCK]), rmask & tmp4, other=0.0)
tmp7 = tmp5 - tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1, 1], 8, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tmp10 & tmp12
tmp14 = tl.load(in_ptr0 + tl.broadcast_to(4 + r0 + 16 * r1 + 64 * (-4 +
r2), [XBLOCK, RBLOCK]), rmask & tmp13, other=0.0)
tmp15 = tl.load(in_ptr0 + tl.broadcast_to(8 + r0 + 16 * r1 + 64 * (-4 +
r2), [XBLOCK, RBLOCK]), rmask & tmp13, other=0.0)
tmp16 = tmp14 - tmp15
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp13, tmp16, tmp17)
tmp19 = tmp0 >= tmp11
tl.full([1, 1], 12, tl.int64)
tmp22 = tl.load(in_ptr0 + tl.broadcast_to(8 + r0 + 16 * r1 + 64 * (-8 +
r2), [XBLOCK, RBLOCK]), rmask & tmp19, other=0.0)
tmp23 = tl.load(in_ptr0 + tl.broadcast_to(12 + r0 + 16 * r1 + 64 * (-8 +
r2), [XBLOCK, RBLOCK]), rmask & tmp19, other=0.0)
tmp24 = tmp22 - tmp23
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp19, tmp24, tmp25)
tmp27 = tl.where(tmp13, tmp18, tmp26)
tmp28 = tl.where(tmp4, tmp9, tmp27)
tmp29 = tl_math.abs(tmp28)
tmp30 = 1.0
tmp31 = tmp29 < tmp30
tmp32 = tmp29 * tmp29
tmp33 = 0.5
tmp34 = tmp32 * tmp33
tmp35 = tmp34 * tmp30
tmp36 = tmp29 - tmp33
tmp37 = tl.where(tmp31, tmp35, tmp36)
tmp38 = tl.broadcast_to(tmp37, [XBLOCK, RBLOCK])
tmp40 = tl.where(rmask, tmp38, 0)
tmp41 = tl.sum(tmp40, 1)[:, None]
tmp42 = 192.0
tmp43 = tmp41 / tmp42
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp43, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_smooth_l1_loss_0[grid(1)](buf2, arg0_1, 1, 192,
XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf2,
class ParsingRelationLossNew(nn.Module):
def __init__(self):
super(ParsingRelationLossNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
wangping984/Ultra-Fast-Lane-Detection
|
ParsingRelationLoss
| false
| 13,083
|
[
"MIT"
] | 0
|
b7559c1469d832bf5afe5d158dd3ad63b4df9d9c
|
https://github.com/wangping984/Ultra-Fast-Lane-Detection/tree/b7559c1469d832bf5afe5d158dd3ad63b4df9d9c
|
FSP
|
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch._utils
from itertools import product as product
import torch.utils.data.distributed
class FSP(nn.Module):
"""
A Gift from Knowledge Distillation: Fast Optimization, Network Minimization and Transfer Learning
http://openaccess.thecvf.com/content_cvpr_2017/papers/Yim_A_Gift_From_CVPR_2017_paper.pdf
"""
def __init__(self):
super(FSP, self).__init__()
def forward(self, fm_s1, fm_s2, fm_t1, fm_t2):
loss = F.mse_loss(self.fsp_matrix(fm_s1, fm_s2), self.fsp_matrix(
fm_t1, fm_t2))
return loss
def fsp_matrix(self, fm1, fm2):
if fm1.size(2) > fm2.size(2):
fm1 = F.adaptive_avg_pool2d(fm1, (fm2.size(2), fm2.size(3)))
fm1 = fm1.view(fm1.size(0), fm1.size(1), -1)
fm2 = fm2.view(fm2.size(0), fm2.size(1), -1).transpose(1, 2)
fsp = torch.bmm(fm1, fm2) / fm1.size(2)
return fsp
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn.functional as F
import torch.nn as nn
import torch._utils
from itertools import product as product
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_div_mse_loss_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 0.0625
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp5 = tmp2 - tmp4
tmp6 = tmp5 * tmp5
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.sum(tmp7, 1)[:, None]
tmp10 = 64.0
tmp11 = tmp9 / tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp11, None)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 4, 16), (64, 16,
1), 0), reinterpret_tensor(arg1_1, (4, 16, 4), (64, 1, 16), 0),
out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(arg2_1, (4, 4, 16), (64, 16,
1), 0), reinterpret_tensor(arg3_1, (4, 16, 4), (64, 1, 16), 0),
out=buf1)
del arg2_1
del arg3_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
get_raw_stream(0)
triton_per_fused_div_mse_loss_0[grid(1)](buf3, buf0, buf1, 1, 64,
XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf1
return buf3,
class FSPNew(nn.Module):
"""
A Gift from Knowledge Distillation: Fast Optimization, Network Minimization and Transfer Learning
http://openaccess.thecvf.com/content_cvpr_2017/papers/Yim_A_Gift_From_CVPR_2017_paper.pdf
"""
def __init__(self):
super(FSPNew, self).__init__()
def fsp_matrix(self, fm1, fm2):
if fm1.size(2) > fm2.size(2):
fm1 = F.adaptive_avg_pool2d(fm1, (fm2.size(2), fm2.size(3)))
fm1 = fm1.view(fm1.size(0), fm1.size(1), -1)
fm2 = fm2.view(fm2.size(0), fm2.size(1), -1).transpose(1, 2)
fsp = torch.bmm(fm1, fm2) / fm1.size(2)
return fsp
def forward(self, input_0, input_1, input_2, input_3):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0]
|
wangxianliang/FaceX-Zoo
|
FSP
| false
| 13,084
|
[
"Apache-2.0"
] | 0
|
b0555c88a0350fa7b59c317f3a171f551fef4e6e
|
https://github.com/wangxianliang/FaceX-Zoo/tree/b0555c88a0350fa7b59c317f3a171f551fef4e6e
|
FT
|
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch._utils
from itertools import product as product
import torch.utils.data.distributed
class FT(nn.Module):
"""
araphrasing Complex Network: Network Compression via Factor Transfer
http://papers.nips.cc/paper/7541-paraphrasing-complex-network-network-compression-via-factor-transfer.pdf
"""
def __init__(self):
super(FT, self).__init__()
def forward(self, factor_s, factor_t):
loss = F.l1_loss(self.normalize(factor_s), self.normalize(factor_t))
return loss
def normalize(self, factor):
norm_factor = F.normalize(factor.view(factor.size(0), -1))
return norm_factor
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn.functional as F
import torch.nn as nn
import torch._utils
from itertools import product as product
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tl.store(out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_per_fused_abs_div_mean_sub_1(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, in_ptr3, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex
r1 = rindex // 64
tmp0 = tl.load(in_ptr0 + r2, None)
tmp1 = tl.load(in_ptr1 + r1, None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + r2, None)
tmp7 = tl.load(in_ptr3 + r1, None, eviction_policy='evict_last')
tmp2 = libdevice.sqrt(tmp1)
tmp3 = 1e-12
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = tmp0 / tmp4
tmp8 = libdevice.sqrt(tmp7)
tmp9 = triton_helpers.maximum(tmp8, tmp3)
tmp10 = tmp6 / tmp9
tmp11 = tmp5 - tmp10
tmp12 = tl_math.abs(tmp11)
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 256.0
tmp17 = tmp15 / tmp16
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
get_raw_stream(0)
triton_per_fused_linalg_vector_norm_0[grid(4)](arg0_1, buf0, 4, 64,
XBLOCK=1, num_warps=2, num_stages=1)
buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_per_fused_linalg_vector_norm_0[grid(4)](arg1_1, buf1, 4, 64,
XBLOCK=1, num_warps=2, num_stages=1)
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
triton_per_fused_abs_div_mean_sub_1[grid(1)](buf3, arg0_1, buf0,
arg1_1, buf1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del buf0
del buf1
return buf3,
class FTNew(nn.Module):
"""
araphrasing Complex Network: Network Compression via Factor Transfer
http://papers.nips.cc/paper/7541-paraphrasing-complex-network-network-compression-via-factor-transfer.pdf
"""
def __init__(self):
super(FTNew, self).__init__()
def normalize(self, factor):
norm_factor = F.normalize(factor.view(factor.size(0), -1))
return norm_factor
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
wangxianliang/FaceX-Zoo
|
FT
| false
| 13,085
|
[
"Apache-2.0"
] | 0
|
b0555c88a0350fa7b59c317f3a171f551fef4e6e
|
https://github.com/wangxianliang/FaceX-Zoo/tree/b0555c88a0350fa7b59c317f3a171f551fef4e6e
|
Attention
|
from _paritybench_helpers import _mock_config
import torch
import torch.nn.functional as F
from torch import nn
from torch.autograd import *
class Attention(nn.Module):
def __init__(self, opt):
super(Attention, self).__init__()
self.rnn_size = opt.rnn_size
self.att_hid_size = opt.att_hid_size
self.h2att = nn.Linear(self.rnn_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
def forward(self, h, att_feats, p_att_feats, att_masks=None):
att_size = att_feats.numel() // att_feats.size(0) // att_feats.size(-1)
att = p_att_feats.view(-1, att_size, self.att_hid_size)
att_h = self.h2att(h)
att_h = att_h.unsqueeze(1).expand_as(att)
dot = att + att_h
dot = torch.tanh(dot)
dot = dot.view(-1, self.att_hid_size)
dot = self.alpha_net(dot)
dot = dot.view(-1, att_size)
weight = F.softmax(dot, dim=1)
if att_masks is not None:
weight = weight * att_masks.view(-1, att_size)
weight = weight / weight.sum(1, keepdim=True)
att_feats_ = att_feats.view(-1, att_size, att_feats.size(-1))
att_res = torch.bmm(weight.unsqueeze(1), att_feats_).squeeze(1)
return att_res
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4,
4, 4])]
def get_init_inputs():
return [[], {'opt': _mock_config(rnn_size=4, att_hid_size=4)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
from torch.autograd import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_tanh_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp5 = libdevice.tanh(tmp4)
tl.store(out_ptr0 + x3, tmp5, xmask)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr0, out_ptr1, out_ptr2,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + 16 * x0), tmp11, xmask)
tl.store(out_ptr0 + x0, tmp4, xmask)
tl.store(out_ptr1 + x0, tmp10, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (1, 4), (4, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_5, reinterpret_tensor(primals_3, (4, 4),
(1, 4), 0), out=buf0)
del primals_3
buf1 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_tanh_0[grid(256)](primals_2, buf0, primals_4,
buf1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
del primals_4
buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 1), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_7
buf4 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
buf6 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
triton_per_fused__softmax_1[grid(4)](buf3, buf4, buf5, buf6, 4, 16,
XBLOCK=1, num_warps=2, num_stages=1)
buf7 = reinterpret_tensor(buf0, (4, 1, 4), (4, 4, 1), 0)
del buf0
extern_kernels.bmm(reinterpret_tensor(buf6, (4, 1, 16), (16, 0, 1),
0), reinterpret_tensor(primals_1, (4, 16, 4), (64, 4, 1), 0),
out=buf7)
del buf6
return reinterpret_tensor(buf7, (4, 4), (4, 1), 0
), primals_5, buf1, buf3, buf4, buf5, reinterpret_tensor(primals_1,
(4, 4, 16), (64, 1, 4), 0), primals_6
class AttentionNew(nn.Module):
def __init__(self, opt):
super(AttentionNew, self).__init__()
self.rnn_size = opt.rnn_size
self.att_hid_size = opt.att_hid_size
self.h2att = nn.Linear(self.rnn_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
def forward(self, input_0, input_1, input_2):
primals_3 = self.h2att.weight
primals_4 = self.h2att.bias
primals_6 = self.alpha_net.weight
primals_7 = self.alpha_net.bias
primals_5 = input_0
primals_1 = input_1
primals_2 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
GeorgeKostenkov/ImageCaptioning.pytorch
|
Attention
| false
| 13,086
|
[
"MIT"
] | 0
|
8f17433fdaba2f89774e45ad5a3a88b880932ee6
|
https://github.com/GeorgeKostenkov/ImageCaptioning.pytorch/tree/8f17433fdaba2f89774e45ad5a3a88b880932ee6
|
Visual_Q_Network
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Visual_Q_Network(nn.Module):
"""
The input of this network should have shape (num_frame, 80, 80)
"""
def __init__(self, num_frame, num_action):
super(Visual_Q_Network, self).__init__()
self.conv1 = nn.Conv2d(in_channels=num_frame, out_channels=16,
kernel_size=8, stride=4, padding=2)
self.conv2 = nn.Conv2d(in_channels=16, out_channels=32, kernel_size
=4, stride=2)
self.fc1 = nn.Linear(32 * 81, 256)
self.fc2 = nn.Linear(256, num_action)
def forward(self, image):
x = F.relu(self.conv1(image))
x = F.relu(self.conv2(x))
x = x.view(-1, 32 * 81)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 81, 81])]
def get_init_inputs():
return [[], {'num_frame': 4, 'num_action': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 400 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 10368
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x1 = xindex // 81 % 32
x2 = xindex // 2592
x3 = xindex % 2592
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x3 + 2688 * x2), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (16, 4, 8, 8), (256, 64, 8, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 4, 81, 81), (26244, 6561, 81, 1))
assert_size_stride(primals_4, (32, 16, 4, 4), (256, 16, 4, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (256, 2592), (2592, 1))
assert_size_stride(primals_7, (256,), (1,))
assert_size_stride(primals_8, (4, 256), (256, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(4,
4), padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 20, 20), (6400, 400, 20, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(25600)](buf1, primals_2,
25600, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 9, 9), (2592, 81, 9, 1))
buf3 = buf2
del buf2
buf7 = empty_strided_cuda((4, 32, 9, 9), (2688, 81, 9, 1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_1[grid(10368)](
buf3, primals_5, buf7, 10368, XBLOCK=256, num_warps=4, num_stages=1
)
del primals_5
buf4 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (4, 2592), (2592, 1), 0),
reinterpret_tensor(primals_6, (2592, 256), (1, 2592), 0), out=buf4)
buf5 = buf4
del buf4
triton_poi_fused_relu_2[grid(1024)](buf5, primals_7, 1024, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, buf5, reinterpret_tensor(primals_8,
(256, 4), (1, 256), 0), alpha=1, beta=1, out=buf6)
del primals_9
return buf6, primals_1, primals_3, primals_4, buf1, reinterpret_tensor(buf3
, (4, 2592), (2592, 1), 0), buf5, primals_8, primals_6, buf7
class Visual_Q_NetworkNew(nn.Module):
"""
The input of this network should have shape (num_frame, 80, 80)
"""
def __init__(self, num_frame, num_action):
super(Visual_Q_NetworkNew, self).__init__()
self.conv1 = nn.Conv2d(in_channels=num_frame, out_channels=16,
kernel_size=8, stride=4, padding=2)
self.conv2 = nn.Conv2d(in_channels=16, out_channels=32, kernel_size
=4, stride=2)
self.fc1 = nn.Linear(32 * 81, 256)
self.fc2 = nn.Linear(256, num_action)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
wanghaodi/DQN_with_DDQN
|
Visual_Q_Network
| false
| 13,087
|
[
"MIT"
] | 0
|
970ebf429c863debfd009b48e3bc4169fcbb05d4
|
https://github.com/wanghaodi/DQN_with_DDQN/tree/970ebf429c863debfd009b48e3bc4169fcbb05d4
|
SoftTarget
|
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch._utils
from itertools import product as product
import torch.utils.data.distributed
class SoftTarget(nn.Module):
"""
Distilling the Knowledge in a Neural Network
https://arxiv.org/pdf/1503.02531.pdf
"""
def __init__(self, T):
super(SoftTarget, self).__init__()
self.T = T
def forward(self, out_s, out_t):
loss = F.kl_div(F.log_softmax(out_s / self.T, dim=1), F.softmax(
out_t / self.T, dim=1), reduction='batchmean') * self.T * self.T
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'T': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch._utils
from itertools import product as product
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x3, tmp17, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + x3, tmp16, xmask)
@triton.jit
def triton_per_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = rindex // 64
tmp0 = tl.load(in_ptr0 + r3, None)
tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last'
)
tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr1 + r3, None)
tmp18 = tl.load(in_ptr1 + (r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr1 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr1 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp26 = tl.load(in_ptr1 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = libdevice.isnan(tmp8).to(tl.int1)
tmp10 = 0.0
tmp11 = tmp8 == tmp10
tmp12 = tl_math.log(tmp8)
tmp13 = tmp8 * tmp12
tmp14 = tl.where(tmp11, tmp10, tmp13)
tmp15 = float('nan')
tmp16 = tl.where(tmp9, tmp15, tmp14)
tmp19 = tl_math.exp(tmp18)
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tl_math.log(tmp28)
tmp30 = tmp17 - tmp29
tmp31 = tmp8 * tmp30
tmp32 = tmp16 - tmp31
tmp33 = tl.broadcast_to(tmp32, [RBLOCK])
tmp35 = triton_helpers.promote_to_tensor(tl.sum(tmp33, 0))
tmp36 = 0.25
tmp37 = tmp35 * tmp36
tmp38 = 4.0
tmp39 = tmp37 * tmp38
tmp40 = tmp39 * tmp38
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp40, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_1[grid(256)](arg0_1, buf2, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3
del buf3
triton_per_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2[grid(1)
](buf4, buf0, buf2, 1, 256, num_warps=2, num_stages=1)
del buf0
del buf2
return buf4,
class SoftTargetNew(nn.Module):
"""
Distilling the Knowledge in a Neural Network
https://arxiv.org/pdf/1503.02531.pdf
"""
def __init__(self, T):
super(SoftTargetNew, self).__init__()
self.T = T
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
wangxianliang/FaceX-Zoo
|
SoftTarget
| false
| 13,088
|
[
"Apache-2.0"
] | 0
|
b0555c88a0350fa7b59c317f3a171f551fef4e6e
|
https://github.com/wangxianliang/FaceX-Zoo/tree/b0555c88a0350fa7b59c317f3a171f551fef4e6e
|
GCN
|
from torch.nn import Module
import torch
import torch.nn as nn
from torch.nn.modules.module import Module
import torch.nn.functional as F
class GraphConvolution(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, bias=True):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.W = nn.Linear(in_features=in_features, out_features=
out_features, bias=bias)
self._init_parameters()
def _init_parameters(self):
nn.init.kaiming_uniform_(self.W.weight, nonlinearity='relu')
def forward(self, input, adj):
support = self.W(input)
output = torch.sparse.mm(adj, support)
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
class GCN(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout):
super(GCN, self).__init__()
self.gc1 = GraphConvolution(nfeat, nhid)
self.gc2 = GraphConvolution(nhid, nclass)
self.dropout = dropout
def forward(self, x, adj):
x = F.relu(self.gc1(x, adj))
x = F.dropout(x, self.dropout, training=self.training)
x = self.gc2(x, adj)
return x
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'nfeat': 4, 'nhid': 4, 'nclass': 4, 'dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch.nn import Module
import torch.nn as nn
from torch.nn.modules.module import Module
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_zeros_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, primals_3, reinterpret_tensor(
primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_zeros_0[grid(16)](buf1, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf2 = torch.ops.aten._sparse_addmm.default(reinterpret_tensor(buf1,
(4, 4), (1, 4), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), beta=0)
buf3 = buf2
del buf2
buf4 = reinterpret_tensor(buf3, (4, 4), (1, 4), 0)
del buf3
triton_poi_fused_relu_1[grid(16)](buf4, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf5 = buf0
del buf0
extern_kernels.addmm(primals_6, buf4, reinterpret_tensor(primals_5,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf5)
del primals_6
buf6 = torch.ops.aten._sparse_addmm.default(reinterpret_tensor(buf1,
(4, 4), (1, 4), 0), reinterpret_tensor(buf5, (4, 4), (1, 4), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), beta=0)
del buf1
del buf5
buf7 = buf6
del buf6
return reinterpret_tensor(buf7, (4, 4), (1, 4), 0
), primals_3, buf4, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0
), primals_4
class GraphConvolution(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, bias=True):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.W = nn.Linear(in_features=in_features, out_features=
out_features, bias=bias)
self._init_parameters()
def _init_parameters(self):
nn.init.kaiming_uniform_(self.W.weight, nonlinearity='relu')
def forward(self, input, adj):
support = self.W(input)
output = torch.sparse.mm(adj, support)
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
class GCNNew(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout):
super(GCNNew, self).__init__()
self.gc1 = GraphConvolution(nfeat, nhid)
self.gc2 = GraphConvolution(nhid, nclass)
self.dropout = dropout
def forward(self, input_0, input_1):
primals_1 = self.gc1.W.weight
primals_2 = self.gc1.W.bias
primals_3 = self.gc2.W.weight
primals_6 = self.gc2.W.bias
primals_4 = input_0
primals_5 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
wangzefan666/pygcn
|
GCN
| false
| 13,089
|
[
"MIT"
] | 0
|
2a5e4f299e3c9d3eafe3014622e8ec3742ba365c
|
https://github.com/wangzefan666/pygcn/tree/2a5e4f299e3c9d3eafe3014622e8ec3742ba365c
|
GraphConvolution
|
from torch.nn import Module
import torch
import torch.nn as nn
from torch.nn.modules.module import Module
class GraphConvolution(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, bias=True):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.W = nn.Linear(in_features=in_features, out_features=
out_features, bias=bias)
self._init_parameters()
def _init_parameters(self):
nn.init.kaiming_uniform_(self.W.weight, nonlinearity='relu')
def forward(self, input, adj):
support = self.W(input)
output = torch.sparse.mm(adj, support)
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.nn import Module
import torch.nn as nn
from torch.nn.modules.module import Module
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_zeros_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + x0, tmp0, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, primals_3, reinterpret_tensor(
primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_zeros_0[grid(16)](buf1, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf2 = torch.ops.aten._sparse_addmm.default(reinterpret_tensor(buf1,
(4, 4), (1, 4), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), beta=0)
del buf0
del buf1
buf3 = buf2
del buf2
return reinterpret_tensor(buf3, (4, 4), (1, 4), 0), primals_3, primals_4
class GraphConvolutionNew(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, bias=True):
super(GraphConvolutionNew, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.W = nn.Linear(in_features=in_features, out_features=
out_features, bias=bias)
self._init_parameters()
def _init_parameters(self):
nn.init.kaiming_uniform_(self.W.weight, nonlinearity='relu')
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
def forward(self, input_0, input_1):
primals_1 = self.W.weight
primals_2 = self.W.bias
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
wangzefan666/pygcn
|
GraphConvolution
| false
| 13,090
|
[
"MIT"
] | 0
|
2a5e4f299e3c9d3eafe3014622e8ec3742ba365c
|
https://github.com/wangzefan666/pygcn/tree/2a5e4f299e3c9d3eafe3014622e8ec3742ba365c
|
SqueezeExcite
|
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch._utils
from itertools import product as product
import torch.utils.data.distributed
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
if new_v < 0.9 * v:
new_v += divisor
return new_v
def hard_sigmoid(x, inplace: 'bool'=False):
if inplace:
return x.add_(3.0).clamp_(0.0, 6.0).div_(6.0)
else:
return F.relu6(x + 3.0) / 6.0
class SqueezeExcite(nn.Module):
def __init__(self, in_chs, se_ratio=0.25, reduced_base_chs=None,
act_layer=nn.ReLU, gate_fn=hard_sigmoid, divisor=4, **_):
super(SqueezeExcite, self).__init__()
self.gate_fn = gate_fn
reduced_chs = _make_divisible((reduced_base_chs or in_chs) *
se_ratio, divisor)
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv_reduce = nn.Conv2d(in_chs, reduced_chs, 1, bias=True)
self.act1 = act_layer(inplace=True)
self.conv_expand = nn.Conv2d(reduced_chs, in_chs, 1, bias=True)
def forward(self, x):
x_se = self.avg_pool(x)
x_se = self.conv_reduce(x_se)
x_se = self.act1(x_se)
x_se = self.conv_expand(x_se)
x = x * self.gate_fn(x_se)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_chs': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn.functional as F
import torch.nn as nn
import torch._utils
from itertools import product as product
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_convolution_div_hardtanh_mul_2(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex // 16
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = 3.0
tmp5 = tmp3 + tmp4
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = 6.0
tmp9 = triton_helpers.minimum(tmp7, tmp8)
tmp10 = 0.16666666666666666
tmp11 = tmp9 * tmp10
tmp12 = tmp0 * tmp11
tl.store(out_ptr0 + x3, tmp12, xmask)
@triton.jit
def triton_poi_fused_add_convolution_hardtanh_backward_3(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 3.0
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tmp7 = 6.0
tmp8 = tmp4 >= tmp7
tmp9 = tmp6 | tmp8
tl.store(out_ptr0 + x2, tmp9, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(16)](buf3, primals_3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 1, 1), (4, 1, 1, 1))
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_convolution_div_hardtanh_mul_2[grid(256)](
primals_1, buf4, primals_5, buf5, 256, XBLOCK=256, num_warps=4,
num_stages=1)
buf6 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool)
triton_poi_fused_add_convolution_hardtanh_backward_3[grid(16)](buf4,
primals_5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf4
del primals_5
return buf5, primals_1, primals_2, primals_4, buf1, buf3, buf6
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
if new_v < 0.9 * v:
new_v += divisor
return new_v
def hard_sigmoid(x, inplace: 'bool'=False):
if inplace:
return x.add_(3.0).clamp_(0.0, 6.0).div_(6.0)
else:
return F.relu6(x + 3.0) / 6.0
class SqueezeExciteNew(nn.Module):
def __init__(self, in_chs, se_ratio=0.25, reduced_base_chs=None,
act_layer=nn.ReLU, gate_fn=hard_sigmoid, divisor=4, **_):
super(SqueezeExciteNew, self).__init__()
self.gate_fn = gate_fn
reduced_chs = _make_divisible((reduced_base_chs or in_chs) *
se_ratio, divisor)
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv_reduce = nn.Conv2d(in_chs, reduced_chs, 1, bias=True)
self.act1 = act_layer(inplace=True)
self.conv_expand = nn.Conv2d(reduced_chs, in_chs, 1, bias=True)
def forward(self, input_0):
primals_2 = self.conv_reduce.weight
primals_3 = self.conv_reduce.bias
primals_4 = self.conv_expand.weight
primals_5 = self.conv_expand.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
wangxianliang/FaceX-Zoo
|
SqueezeExcite
| false
| 13,091
|
[
"Apache-2.0"
] | 0
|
b0555c88a0350fa7b59c317f3a171f551fef4e6e
|
https://github.com/wangxianliang/FaceX-Zoo/tree/b0555c88a0350fa7b59c317f3a171f551fef4e6e
|
ArcFace
|
from torch.nn import Module
import math
import torch
from torch.nn import Parameter
import torch.nn.functional as F
import torch._utils
from itertools import product as product
import torch.utils.data.distributed
class ArcFace(Module):
"""Implementation for "ArcFace: Additive Angular Margin Loss for Deep Face Recognition"
"""
def __init__(self, feat_dim, num_class, margin_arc=0.35, margin_am=0.0,
scale=32):
super(ArcFace, self).__init__()
self.weight = Parameter(torch.Tensor(feat_dim, num_class))
self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-05).mul_(100000.0)
self.margin_arc = margin_arc
self.margin_am = margin_am
self.scale = scale
self.cos_margin = math.cos(margin_arc)
self.sin_margin = math.sin(margin_arc)
self.min_cos_theta = math.cos(math.pi - margin_arc)
def forward(self, feats, labels):
kernel_norm = F.normalize(self.weight, dim=0)
feats = F.normalize(feats)
cos_theta = torch.mm(feats, kernel_norm)
cos_theta = cos_theta.clamp(-1, 1)
sin_theta = torch.sqrt(1.0 - torch.pow(cos_theta, 2))
cos_theta_m = cos_theta * self.cos_margin - sin_theta * self.sin_margin
cos_theta_m = torch.where(cos_theta > self.min_cos_theta,
cos_theta_m, cos_theta - self.margin_am)
index = torch.zeros_like(cos_theta)
index.scatter_(1, labels.data.view(-1, 1), 1)
index = index.byte().bool()
output = cos_theta * 1.0
output[index] = cos_theta_m[index]
output *= self.scale
return output
def get_inputs():
return [torch.rand([4, 4]), torch.ones([4], dtype=torch.int64)]
def get_init_inputs():
return [[], {'feat_dim': 4, 'num_class': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch.nn import Module
import math
from torch.nn import Parameter
import torch._utils
from itertools import product as product
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_clamp_gt_mul_pow_rsub_sqrt_sub_where_2(in_ptr0,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = -1.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 1.0
tmp4 = triton_helpers.minimum(tmp2, tmp3)
tmp5 = -0.9393727128473789
tmp6 = tmp4 > tmp5
tmp7 = 0.9393727128473789
tmp8 = tmp4 * tmp7
tmp9 = tmp4 * tmp4
tmp10 = tmp3 - tmp9
tmp11 = libdevice.sqrt(tmp10)
tmp12 = 0.34289780745545134
tmp13 = tmp11 * tmp12
tmp14 = tmp8 - tmp13
tmp15 = 0.0
tmp16 = tmp4 - tmp15
tmp17 = tl.where(tmp6, tmp14, tmp16)
tmp18 = tmp4 * tmp3
tl.store(out_ptr0 + x0, tmp17, xmask)
tl.store(out_ptr1 + x0, tmp18, xmask)
@triton.jit
def triton_poi_fused__to_copy_scatter_3(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = x0
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = tmp5.to(tl.int8).to(tl.uint8)
tmp7 = tmp6 != 0
tl.store(out_ptr0 + x2, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(16)](primals_2, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_div_1[grid(16)](primals_1, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, buf1, out=buf2)
buf3 = buf1
del buf1
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_clamp_gt_mul_pow_rsub_sqrt_sub_where_2[grid(16)](buf2,
buf3, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused__to_copy_scatter_3[grid(16)](primals_3, buf4, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
return buf3, buf4, buf5, primals_1, buf2, reinterpret_tensor(buf0, (4,
4), (1, 4), 0)
class ArcFaceNew(Module):
"""Implementation for "ArcFace: Additive Angular Margin Loss for Deep Face Recognition"
"""
def __init__(self, feat_dim, num_class, margin_arc=0.35, margin_am=0.0,
scale=32):
super(ArcFaceNew, self).__init__()
self.weight = Parameter(torch.Tensor(feat_dim, num_class))
self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-05).mul_(100000.0)
self.margin_arc = margin_arc
self.margin_am = margin_am
self.scale = scale
self.cos_margin = math.cos(margin_arc)
self.sin_margin = math.sin(margin_arc)
self.min_cos_theta = math.cos(math.pi - margin_arc)
def forward(self, input_0, input_1):
primals_1 = self.weight
primals_2 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
|
wangxianliang/FaceX-Zoo
|
ArcFace
| false
| 13,092
|
[
"Apache-2.0"
] | 0
|
b0555c88a0350fa7b59c317f3a171f551fef4e6e
|
https://github.com/wangxianliang/FaceX-Zoo/tree/b0555c88a0350fa7b59c317f3a171f551fef4e6e
|
GELU
|
import torch
from torch import nn
class GELU(nn.Module):
def forward(self, x):
return torch.sigmoid(1.702 * x) * x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.702
tmp2 = tmp0 * tmp1
tmp3 = tl.sigmoid(tmp2)
tmp4 = tmp3 * tmp0
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0[grid(256)](arg0_1, buf0, 256, XBLOCK
=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class GELUNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
simonlevine/enformer-pytorch
|
GELU
| false
| 13,093
|
[
"MIT"
] | 0
|
342915c3f9385f5f24ee4d1d9965d126d49ca279
|
https://github.com/simonlevine/enformer-pytorch/tree/342915c3f9385f5f24ee4d1d9965d126d49ca279
|
MLP
|
import torch
import torch.nn as nn
class MLP(nn.Module):
"""
Create a multilayer perceptron model with variable hidden layers.
The network will have the specified number of layers and neurons,
with each layer using the leaky ReLU activation function.
Parameters
----------
input_dim : int
The number of input features.
hidden_dims : list of int
The number of neurons in each hidden layer. Each element
specifies a new hidden layer.
"""
def __init__(self, input_dim, hidden_dims, output_dim, leaky_relu_slope
=0.2, batchnorm=False):
"""Instantiate an MLP object"""
super(MLP, self).__init__()
layers = hidden_dims
if not input_dim or not isinstance(input_dim, int):
raise ValueError("'input_dim' must be a non-zero integer.")
if isinstance(layers, int):
layers = [layers]
if not all(layers) or not all(isinstance(l, int) for l in layers):
raise ValueError(
"'hidden_dims' must be a list of non-zero integers.")
layers = list(layers)
in_layers = [input_dim] + layers
out_layers = layers + [output_dim]
for idx, in_layer in enumerate(in_layers):
self.add_module('linear_{}'.format(idx), nn.Linear(in_layer,
out_layers[idx]))
if idx < len(in_layers) - 1:
self.add_module('leakyReLU_{}'.format(idx), nn.LeakyReLU(
negative_slope=leaky_relu_slope))
if batchnorm:
self.add_module('BatchNorm_{}'.format(idx), nn.
BatchNorm1d(out_layers[idx]))
def forward(self, x):
"""
Run an example through the model
Parameters
----------
x : torch.Tensor
A tensor to run through the model.
Returns
-------
torch.Tensor
The model predictions.
"""
for idx, layer in enumerate(self._modules.values()):
if idx < len(self._modules) - 1:
x = layer(x)
else:
return layer(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'hidden_dims': 4, 'output_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(256)](buf0, primals_2, buf1,
buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf3 = buf0
del buf0
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_5
return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), primals_4
class MLPNew(nn.Module):
"""
Create a multilayer perceptron model with variable hidden layers.
The network will have the specified number of layers and neurons,
with each layer using the leaky ReLU activation function.
Parameters
----------
input_dim : int
The number of input features.
hidden_dims : list of int
The number of neurons in each hidden layer. Each element
specifies a new hidden layer.
"""
def __init__(self, input_dim, hidden_dims, output_dim, leaky_relu_slope
=0.2, batchnorm=False):
"""Instantiate an MLP object"""
super(MLPNew, self).__init__()
layers = hidden_dims
if not input_dim or not isinstance(input_dim, int):
raise ValueError("'input_dim' must be a non-zero integer.")
if isinstance(layers, int):
layers = [layers]
if not all(layers) or not all(isinstance(l, int) for l in layers):
raise ValueError(
"'hidden_dims' must be a list of non-zero integers.")
layers = list(layers)
in_layers = [input_dim] + layers
out_layers = layers + [output_dim]
for idx, in_layer in enumerate(in_layers):
self.add_module('linear_{}'.format(idx), nn.Linear(in_layer,
out_layers[idx]))
if idx < len(in_layers) - 1:
self.add_module('leakyReLU_{}'.format(idx), nn.LeakyReLU(
negative_slope=leaky_relu_slope))
if batchnorm:
self.add_module('BatchNorm_{}'.format(idx), nn.
BatchNorm1d(out_layers[idx]))
def forward(self, input_0):
primals_1 = self.linear_0.weight
primals_2 = self.linear_0.bias
primals_4 = self.linear_1.weight
primals_5 = self.linear_1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
wfondrie/wefpy
|
MLP
| false
| 13,094
|
[
"Apache-2.0"
] | 0
|
00691d453048203e1e3b1daea53879067ee4a395
|
https://github.com/wfondrie/wefpy/tree/00691d453048203e1e3b1daea53879067ee4a395
|
PKTCosSim
|
import torch
import torch.nn as nn
import torch._utils
from itertools import product as product
import torch.utils.data.distributed
class PKTCosSim(nn.Module):
"""
Learning Deep Representations with Probabilistic Knowledge Transfer
http://openaccess.thecvf.com/content_ECCV_2018/papers/Nikolaos_Passalis_Learning_Deep_Representations_ECCV_2018_paper.pdf
"""
def __init__(self):
super(PKTCosSim, self).__init__()
def forward(self, feat_s, feat_t, eps=1e-06):
feat_s_norm = torch.sqrt(torch.sum(feat_s ** 2, dim=1, keepdim=True))
feat_s = feat_s / (feat_s_norm + eps)
feat_s[feat_s != feat_s] = 0
feat_t_norm = torch.sqrt(torch.sum(feat_t ** 2, dim=1, keepdim=True))
feat_t = feat_t / (feat_t_norm + eps)
feat_t[feat_t != feat_t] = 0
feat_s_cos_sim = torch.mm(feat_s, feat_s.transpose(0, 1))
feat_t_cos_sim = torch.mm(feat_t, feat_t.transpose(0, 1))
feat_s_cos_sim = (feat_s_cos_sim + 1.0) / 2.0
feat_t_cos_sim = (feat_t_cos_sim + 1.0) / 2.0
feat_s_cond_prob = feat_s_cos_sim / torch.sum(feat_s_cos_sim, dim=1,
keepdim=True)
feat_t_cond_prob = feat_t_cos_sim / torch.sum(feat_t_cos_sim, dim=1,
keepdim=True)
loss = torch.mean(feat_t_cond_prob * torch.log((feat_t_cond_prob +
eps) / (feat_s_cond_prob + eps)))
return loss
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch._utils
from itertools import product as product
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_div_index_put_lift_fresh_pow_sqrt_sum_0(in_out_ptr0,
in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-06
tmp14 = tmp12 + tmp13
tmp15 = tmp0 / tmp14
tmp16 = tmp15 != tmp15
tmp17 = 0.0
tmp18 = tl.where(tmp16, tmp17, tmp15)
tl.store(in_out_ptr0 + x2, tmp18, xmask)
@triton.jit
def triton_per_fused_add_div_log_mean_mul_sum_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
r1 = rindex // 4
tmp0 = tl.load(in_ptr0 + r2, None)
tmp5 = tl.load(in_ptr0 + 4 * r1, None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (1 + 4 * r1), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * r1), None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (3 + 4 * r1), None, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr1 + r2, None)
tmp26 = tl.load(in_ptr1 + 4 * r1, None, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr1 + (1 + 4 * r1), None, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr1 + (2 + 4 * r1), None, eviction_policy='evict_last')
tmp37 = tl.load(in_ptr1 + (3 + 4 * r1), None, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp6 = tmp5 + tmp1
tmp7 = tmp6 * tmp3
tmp9 = tmp8 + tmp1
tmp10 = tmp9 * tmp3
tmp11 = tmp7 + tmp10
tmp13 = tmp12 + tmp1
tmp14 = tmp13 * tmp3
tmp15 = tmp11 + tmp14
tmp17 = tmp16 + tmp1
tmp18 = tmp17 * tmp3
tmp19 = tmp15 + tmp18
tmp20 = tmp4 / tmp19
tmp21 = 1e-06
tmp22 = tmp20 + tmp21
tmp24 = tmp23 + tmp1
tmp25 = tmp24 * tmp3
tmp27 = tmp26 + tmp1
tmp28 = tmp27 * tmp3
tmp30 = tmp29 + tmp1
tmp31 = tmp30 * tmp3
tmp32 = tmp28 + tmp31
tmp34 = tmp33 + tmp1
tmp35 = tmp34 * tmp3
tmp36 = tmp32 + tmp35
tmp38 = tmp37 + tmp1
tmp39 = tmp38 * tmp3
tmp40 = tmp36 + tmp39
tmp41 = tmp25 / tmp40
tmp42 = tmp41 + tmp21
tmp43 = tmp22 / tmp42
tmp44 = tl_math.log(tmp43)
tmp45 = tmp20 * tmp44
tmp46 = tl.broadcast_to(tmp45, [XBLOCK, RBLOCK])
tmp48 = tl.sum(tmp46, 1)[:, None]
tmp49 = 16.0
tmp50 = tmp48 / tmp49
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp50, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_add_div_index_put_lift_fresh_pow_sqrt_sum_0[grid(16)](
buf1, arg1_1, 16, XBLOCK=16, num_warps=1, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(buf1, (4, 4), (1, 4), 0),
out=buf2)
buf4 = buf1
del buf1
buf5 = buf4
del buf4
triton_poi_fused_add_div_index_put_lift_fresh_pow_sqrt_sum_0[grid(16)](
buf5, arg0_1, 16, XBLOCK=16, num_warps=1, num_stages=1)
del arg0_1
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf5, reinterpret_tensor(buf5, (4, 4), (1, 4), 0),
out=buf6)
del buf5
buf7 = empty_strided_cuda((), (), torch.float32)
buf8 = buf7
del buf7
triton_per_fused_add_div_log_mean_mul_sum_1[grid(1)](buf8, buf2,
buf6, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
del buf2
del buf6
return buf8,
class PKTCosSimNew(nn.Module):
"""
Learning Deep Representations with Probabilistic Knowledge Transfer
http://openaccess.thecvf.com/content_ECCV_2018/papers/Nikolaos_Passalis_Learning_Deep_Representations_ECCV_2018_paper.pdf
"""
def __init__(self):
super(PKTCosSimNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
wangxianliang/FaceX-Zoo
|
PKTCosSim
| false
| 13,095
|
[
"Apache-2.0"
] | 0
|
b0555c88a0350fa7b59c317f3a171f551fef4e6e
|
https://github.com/wangxianliang/FaceX-Zoo/tree/b0555c88a0350fa7b59c317f3a171f551fef4e6e
|
PDF
|
import math
import torch
import torch.nn as nn
def compute_negative_ln_prob(Y, mu, ln_var, pdf):
var = ln_var.exp()
if pdf == 'gauss':
negative_ln_prob = 0.5 * ((Y - mu) ** 2 / var).sum(1).mean(
) + 0.5 * Y.size(1) * math.log(2 * math.pi) + 0.5 * ln_var.sum(1
).mean()
elif pdf == 'logistic':
whitened = (Y - mu) / var
adjust = torch.logsumexp(torch.stack([torch.zeros(Y.size()), -
whitened]), 0)
negative_ln_prob = whitened.sum(1).mean() + 2 * adjust.sum(1).mean(
) + ln_var.sum(1).mean()
else:
raise ValueError('Unknown PDF: %s' % pdf)
return negative_ln_prob
class PDF(nn.Module):
def __init__(self, dim, pdf):
super(PDF, self).__init__()
assert pdf in {'gauss', 'logistic'}
self.dim = dim
self.pdf = pdf
self.mu = nn.Embedding(1, self.dim)
self.ln_var = nn.Embedding(1, self.dim)
def forward(self, Y):
cross_entropy = compute_negative_ln_prob(Y, self.mu.weight, self.
ln_var.weight, self.pdf)
return cross_entropy
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4, 'pdf': 'gauss'}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_div_exp_mean_pow_sub_sum_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex // 16
r3 = rindex % 16
r0 = rindex % 4
tmp0 = tl.load(in_ptr0 + (r3 + 64 * r2), None)
tmp1 = tl.load(in_ptr1 + r0, None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + r0, None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (16 + r3 + 64 * r2), None)
tmp12 = tl.load(in_ptr0 + (32 + r3 + 64 * r2), None)
tmp17 = tl.load(in_ptr0 + (48 + r3 + 64 * r2), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp3 / tmp5
tmp8 = tmp7 - tmp1
tmp9 = tmp8 * tmp8
tmp10 = tmp9 / tmp5
tmp11 = tmp6 + tmp10
tmp13 = tmp12 - tmp1
tmp14 = tmp13 * tmp13
tmp15 = tmp14 / tmp5
tmp16 = tmp11 + tmp15
tmp18 = tmp17 - tmp1
tmp19 = tmp18 * tmp18
tmp20 = tmp19 / tmp5
tmp21 = tmp16 + tmp20
tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK])
tmp24 = tl.sum(tmp22, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp24, None)
@triton.jit
def triton_per_fused_add_div_exp_mean_mul_pow_sub_sum_1(in_out_ptr0,
in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp4 = tl.load(in_out_ptr0 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, 1])
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.sum(tmp1, 1)[:, None]
tmp6 = 64.0
tmp7 = tmp5 / tmp6
tmp8 = 0.5
tmp9 = tmp7 * tmp8
tmp10 = 3.6757541328186907
tmp11 = tmp9 + tmp10
tmp12 = 1.0
tmp13 = tmp3 / tmp12
tmp14 = tmp13 * tmp8
tmp15 = tmp11 + tmp14
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp15, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 4), (4, 1))
assert_size_stride(primals_2, (1, 4), (4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_div_exp_mean_pow_sub_sum_0[grid(1)](primals_3,
primals_1, primals_2, buf0, 1, 64, XBLOCK=1, num_warps=2,
num_stages=1)
buf2 = buf0
del buf0
triton_per_fused_add_div_exp_mean_mul_pow_sub_sum_1[grid(1)](buf2,
primals_2, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
return buf2, primals_1, primals_2, primals_3
def compute_negative_ln_prob(Y, mu, ln_var, pdf):
var = ln_var.exp()
if pdf == 'gauss':
negative_ln_prob = 0.5 * ((Y - mu) ** 2 / var).sum(1).mean(
) + 0.5 * Y.size(1) * math.log(2 * math.pi) + 0.5 * ln_var.sum(1
).mean()
elif pdf == 'logistic':
whitened = (Y - mu) / var
adjust = torch.logsumexp(torch.stack([torch.zeros(Y.size()), -
whitened]), 0)
negative_ln_prob = whitened.sum(1).mean() + 2 * adjust.sum(1).mean(
) + ln_var.sum(1).mean()
else:
raise ValueError('Unknown PDF: %s' % pdf)
return negative_ln_prob
class PDFNew(nn.Module):
def __init__(self, dim, pdf):
super(PDFNew, self).__init__()
assert pdf in {'gauss', 'logistic'}
self.dim = dim
self.pdf = pdf
self.mu = nn.Embedding(1, self.dim)
self.ln_var = nn.Embedding(1, self.dim)
def forward(self, input_0):
primals_1 = self.mu.weight
primals_2 = self.ln_var.weight
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
wangxuuu/Demo
|
PDF
| false
| 13,096
|
[
"MIT"
] | 0
|
f1d85a55525a4199d63ee7dfe0ae2f21d3066c7c
|
https://github.com/wangxuuu/Demo/tree/f1d85a55525a4199d63ee7dfe0ae2f21d3066c7c
|
JointsMSELoss
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
class JointsMSELoss(nn.Module):
def __init__(self, use_target_weight):
super(JointsMSELoss, self).__init__()
self.criterion = nn.MSELoss(reduction='sum')
self.use_target_weight = use_target_weight
def forward(self, output, target, target_weight):
batch_size = output.size(0)
num_joints = output.size(1)
heatmaps_pred = output.reshape((batch_size, num_joints, -1)).split(1, 1
)
heatmaps_gt = target.reshape((batch_size, num_joints, -1)).split(1, 1)
loss = 0
for idx in range(num_joints):
heatmap_pred = heatmaps_pred[idx].squeeze()
heatmap_gt = heatmaps_gt[idx].squeeze()
if self.use_target_weight:
loss += 0.5 * self.criterion(heatmap_pred.mul(target_weight
[:, idx]), heatmap_gt.mul(target_weight[:, idx]))
else:
loss += 0.5 * self.criterion(heatmap_pred, heatmap_gt)
return loss / num_joints
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'use_target_weight': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + 4 * r0, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr2 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr2 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr2 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp5 = tmp2 - tmp4
tmp6 = tmp5 * tmp5
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.sum(tmp7, 1)[:, None]
tmp12 = tmp10 * tmp11
tmp14 = tmp13 * tmp11
tmp15 = tmp12 - tmp14
tmp16 = tmp15 * tmp15
tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK])
tmp19 = tl.sum(tmp17, 1)[:, None]
tmp22 = tmp20 * tmp21
tmp24 = tmp23 * tmp21
tmp25 = tmp22 - tmp24
tmp26 = tmp25 * tmp25
tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK])
tmp29 = tl.sum(tmp27, 1)[:, None]
tmp32 = tmp30 * tmp31
tmp34 = tmp33 * tmp31
tmp35 = tmp32 - tmp34
tmp36 = tmp35 * tmp35
tmp37 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK])
tmp39 = tl.sum(tmp37, 1)[:, None]
tmp40 = 0.5
tmp41 = tmp9 * tmp40
tmp42 = 0.0
tmp43 = tmp41 + tmp42
tmp44 = tmp19 * tmp40
tmp45 = tmp43 + tmp44
tmp46 = tmp29 * tmp40
tmp47 = tmp45 + tmp46
tmp48 = tmp39 * tmp40
tmp49 = tmp47 + tmp48
tmp50 = 0.25
tmp51 = tmp49 * tmp50
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp51, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf4 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mse_loss_mul_0[grid(1)](buf4, arg0_1,
arg2_1, arg1_1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf4,
class JointsMSELossNew(nn.Module):
def __init__(self, use_target_weight):
super(JointsMSELossNew, self).__init__()
self.criterion = nn.MSELoss(reduction='sum')
self.use_target_weight = use_target_weight
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
wszsycn/DarkPose-for-VIP2021
|
JointsMSELoss
| false
| 13,097
|
[
"Apache-2.0"
] | 0
|
3658c74ed8bc76c497cb0269dbe10ed6898e07fb
|
https://github.com/wszsycn/DarkPose-for-VIP2021/tree/3658c74ed8bc76c497cb0269dbe10ed6898e07fb
|
Offset
|
import torch
from torch import nn
class Offset(nn.Module):
def __init__(self, init_value=0.0):
super(Offset, self).__init__()
self.bias = nn.Parameter(torch.FloatTensor([init_value]))
def forward(self, input):
return input + self.bias
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (1,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](primals_2, primals_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
return buf0,
class OffsetNew(nn.Module):
def __init__(self, init_value=0.0):
super(OffsetNew, self).__init__()
self.bias = nn.Parameter(torch.FloatTensor([init_value]))
def forward(self, input_0):
primals_1 = self.bias
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
xaviolo99/dd3d
|
Offset
| false
| 13,098
|
[
"MIT"
] | 0
|
e83cbbc14986fe5c9e0d65c58085b4d0bc9330ff
|
https://github.com/xaviolo99/dd3d/tree/e83cbbc14986fe5c9e0d65c58085b4d0bc9330ff
|
CompositePrior
|
import torch
import numpy as np
from torch import nn
from torch.nn import functional as F
def swish(x):
return x.mul(torch.sigmoid(x))
def log_norm_pdf(x, mu, logvar):
return -0.5 * (logvar + np.log(2 * np.pi) + (x - mu).pow(2) / logvar.exp())
class Encoder(nn.Module):
def __init__(self, hidden_dim, latent_dim, input_dim, eps=0.1):
super(Encoder, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.ln1 = nn.LayerNorm(hidden_dim, eps=eps)
self.fc2 = nn.Linear(hidden_dim, hidden_dim)
self.ln2 = nn.LayerNorm(hidden_dim, eps=eps)
self.fc3 = nn.Linear(hidden_dim, hidden_dim)
self.ln3 = nn.LayerNorm(hidden_dim, eps=eps)
self.fc4 = nn.Linear(hidden_dim, hidden_dim)
self.ln4 = nn.LayerNorm(hidden_dim, eps=eps)
self.fc5 = nn.Linear(hidden_dim, hidden_dim)
self.ln5 = nn.LayerNorm(hidden_dim, eps=eps)
self.fc_mu = nn.Linear(hidden_dim, latent_dim)
self.fc_logvar = nn.Linear(hidden_dim, latent_dim)
def forward(self, x, dropout_rate):
norm = x.pow(2).sum(dim=-1).sqrt()
x = x / norm[:, None]
x = F.dropout(x, p=dropout_rate, training=self.training)
h1 = self.ln1(swish(self.fc1(x)))
h2 = self.ln2(swish(self.fc2(h1) + h1))
h3 = self.ln3(swish(self.fc3(h2) + h1 + h2))
h4 = self.ln4(swish(self.fc4(h3) + h1 + h2 + h3))
h5 = self.ln5(swish(self.fc5(h4) + h1 + h2 + h3 + h4))
return self.fc_mu(h5), self.fc_logvar(h5)
class CompositePrior(nn.Module):
def __init__(self, hidden_dim, latent_dim, input_dim, mixture_weights=[
3 / 20, 3 / 4, 1 / 10]):
super(CompositePrior, self).__init__()
self.mixture_weights = mixture_weights
self.mu_prior = nn.Parameter(torch.Tensor(1, latent_dim),
requires_grad=False)
self.mu_prior.data.fill_(0)
self.logvar_prior = nn.Parameter(torch.Tensor(1, latent_dim),
requires_grad=False)
self.logvar_prior.data.fill_(0)
self.logvar_uniform_prior = nn.Parameter(torch.Tensor(1, latent_dim
), requires_grad=False)
self.logvar_uniform_prior.data.fill_(10)
self.encoder_old = Encoder(hidden_dim, latent_dim, input_dim)
self.encoder_old.requires_grad_(False)
def forward(self, x, z):
post_mu, post_logvar = self.encoder_old(x, 0)
stnd_prior = log_norm_pdf(z, self.mu_prior, self.logvar_prior)
post_prior = log_norm_pdf(z, post_mu, post_logvar)
unif_prior = log_norm_pdf(z, self.mu_prior, self.logvar_uniform_prior)
gaussians = [stnd_prior, post_prior, unif_prior]
gaussians = [g.add(np.log(w)) for g, w in zip(gaussians, self.
mixture_weights)]
density_per_gaussian = torch.stack(gaussians, dim=-1)
return torch.logsumexp(density_per_gaussian, dim=-1)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_dim': 4, 'latent_dim': 4, 'input_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import numpy as np
from torch import nn
from torch.nn import functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (4 * x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0 + 64 * x2), xmask, eviction_policy
='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x0 + 64 * x2), xmask, eviction_policy
='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x0 + 64 * x2), xmask, eviction_policy
='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tmp0 / tmp12
tl.store(out_ptr0 + x3, tmp13, xmask)
@triton.jit
def triton_poi_fused_mul_native_layer_norm_sigmoid_1(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + 1)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + 2)
tmp15 = tl.broadcast_to(tmp14, [XBLOCK])
tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + 3)
tmp22 = tl.broadcast_to(tmp21, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tmp5 = tmp3 * tmp4
tmp9 = tmp6 + tmp8
tmp10 = tl.sigmoid(tmp9)
tmp11 = tmp9 * tmp10
tmp12 = tmp5 + tmp11
tmp16 = tmp13 + tmp15
tmp17 = tl.sigmoid(tmp16)
tmp18 = tmp16 * tmp17
tmp19 = tmp12 + tmp18
tmp23 = tmp20 + tmp22
tmp24 = tl.sigmoid(tmp23)
tmp25 = tmp23 * tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + x0, tmp28, xmask)
tl.store(out_ptr1 + x0, tmp40, xmask)
@triton.jit
def triton_poi_fused_mul_native_layer_norm_sigmoid_2(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tmp4 = tmp2 * tmp3
tmp6 = tmp4 - tmp5
tmp8 = 0.1
tmp9 = tmp7 + tmp8
tmp10 = libdevice.rsqrt(tmp9)
tmp11 = tmp6 * tmp10
tmp13 = tmp11 * tmp12
tmp15 = tmp13 + tmp14
tl.store(in_out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_add_mul_native_layer_norm_sigmoid_3(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + 1)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK])
tmp12 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr1 + 2)
tmp19 = tl.broadcast_to(tmp18, [XBLOCK])
tmp21 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp26 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp27 = tl.load(in_ptr1 + 3)
tmp28 = tl.broadcast_to(tmp27, [XBLOCK])
tmp30 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tmp0 + tmp2
tmp5 = tmp3 + tmp4
tmp6 = tl.sigmoid(tmp5)
tmp7 = tmp5 * tmp6
tmp11 = tmp8 + tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.sigmoid(tmp13)
tmp15 = tmp13 * tmp14
tmp16 = tmp7 + tmp15
tmp20 = tmp17 + tmp19
tmp22 = tmp20 + tmp21
tmp23 = tl.sigmoid(tmp22)
tmp24 = tmp22 * tmp23
tmp25 = tmp16 + tmp24
tmp29 = tmp26 + tmp28
tmp31 = tmp29 + tmp30
tmp32 = tl.sigmoid(tmp31)
tmp33 = tmp31 * tmp32
tmp34 = tmp25 + tmp33
tmp35 = 4.0
tmp36 = tmp34 / tmp35
tmp37 = tmp7 - tmp36
tmp38 = tmp37 * tmp37
tmp39 = tmp15 - tmp36
tmp40 = tmp39 * tmp39
tmp41 = tmp38 + tmp40
tmp42 = tmp24 - tmp36
tmp43 = tmp42 * tmp42
tmp44 = tmp41 + tmp43
tmp45 = tmp33 - tmp36
tmp46 = tmp45 * tmp45
tmp47 = tmp44 + tmp46
tmp48 = tmp47 / tmp35
tl.store(out_ptr0 + x0, tmp36, xmask)
tl.store(out_ptr1 + x0, tmp48, xmask)
@triton.jit
def triton_poi_fused_add_mul_native_layer_norm_sigmoid_4(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp7 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.sigmoid(tmp4)
tmp6 = tmp4 * tmp5
tmp8 = tmp6 - tmp7
tmp10 = 0.1
tmp11 = tmp9 + tmp10
tmp12 = libdevice.rsqrt(tmp11)
tmp13 = tmp8 * tmp12
tmp15 = tmp13 * tmp14
tmp17 = tmp15 + tmp16
tl.store(in_out_ptr0 + x2, tmp17, xmask)
@triton.jit
def triton_poi_fused_add_mul_native_layer_norm_sigmoid_5(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr3 + 4 * x0, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr1 + 1)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp14 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr3 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr1 + 2)
tmp23 = tl.broadcast_to(tmp22, [XBLOCK])
tmp25 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp27 = tl.load(in_ptr3 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp32 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp33 = tl.load(in_ptr1 + 3)
tmp34 = tl.broadcast_to(tmp33, [XBLOCK])
tmp36 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp38 = tl.load(in_ptr3 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tmp0 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tl.sigmoid(tmp7)
tmp9 = tmp7 * tmp8
tmp13 = tmp10 + tmp12
tmp15 = tmp13 + tmp14
tmp17 = tmp15 + tmp16
tmp18 = tl.sigmoid(tmp17)
tmp19 = tmp17 * tmp18
tmp20 = tmp9 + tmp19
tmp24 = tmp21 + tmp23
tmp26 = tmp24 + tmp25
tmp28 = tmp26 + tmp27
tmp29 = tl.sigmoid(tmp28)
tmp30 = tmp28 * tmp29
tmp31 = tmp20 + tmp30
tmp35 = tmp32 + tmp34
tmp37 = tmp35 + tmp36
tmp39 = tmp37 + tmp38
tmp40 = tl.sigmoid(tmp39)
tmp41 = tmp39 * tmp40
tmp42 = tmp31 + tmp41
tmp43 = 4.0
tmp44 = tmp42 / tmp43
tmp45 = tmp9 - tmp44
tmp46 = tmp45 * tmp45
tmp47 = tmp19 - tmp44
tmp48 = tmp47 * tmp47
tmp49 = tmp46 + tmp48
tmp50 = tmp30 - tmp44
tmp51 = tmp50 * tmp50
tmp52 = tmp49 + tmp51
tmp53 = tmp41 - tmp44
tmp54 = tmp53 * tmp53
tmp55 = tmp52 + tmp54
tmp56 = tmp55 / tmp43
tl.store(out_ptr0 + x0, tmp44, xmask)
tl.store(out_ptr1 + x0, tmp56, xmask)
@triton.jit
def triton_poi_fused_add_mul_native_layer_norm_sigmoid_6(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp5 = tl.load(in_ptr2 + x2, xmask)
tmp9 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = tl.sigmoid(tmp6)
tmp8 = tmp6 * tmp7
tmp10 = tmp8 - tmp9
tmp12 = 0.1
tmp13 = tmp11 + tmp12
tmp14 = libdevice.rsqrt(tmp13)
tmp15 = tmp10 * tmp14
tmp17 = tmp15 * tmp16
tmp19 = tmp17 + tmp18
tl.store(in_out_ptr0 + x2, tmp19, xmask)
@triton.jit
def triton_poi_fused_add_7(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp5 = tl.load(in_ptr2 + x2, xmask)
tmp7 = tl.load(in_ptr3 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tl.store(in_out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_native_layer_norm_sigmoid_8(in_ptr0, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = tl.sigmoid(tmp0)
tmp2 = tmp0 * tmp1
tmp4 = tl.sigmoid(tmp3)
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp8 = tl.sigmoid(tmp7)
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp12 = tl.sigmoid(tmp11)
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tmp29 = 0.1
tmp30 = tmp28 + tmp29
tmp31 = libdevice.rsqrt(tmp30)
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp31, xmask)
@triton.jit
def triton_poi_fused_mul_native_layer_norm_sigmoid_9(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.sigmoid(tmp0)
tmp2 = tmp0 * tmp1
tmp4 = tmp2 - tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 * tmp7
tmp10 = tmp8 + tmp9
tl.store(in_out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x2, xmask)
tmp5 = tl.load(in_ptr3 + x2, xmask)
tmp7 = tl.load(in_out_ptr0 + x2, xmask)
tmp9 = tl.load(in_ptr4 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tl.store(in_out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_stack_11(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x1 = xindex // 3 % 4
x3 = xindex // 3
x4 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = 1.8378770664093453
tmp7 = tmp5 + tmp6
tmp8 = tl.load(in_ptr1 + x3, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp9 = tl.load(in_ptr2 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp10 = tmp8 - tmp9
tmp11 = tmp10 * tmp10
tmp12 = tl_math.exp(tmp5)
tmp13 = tmp11 / tmp12
tmp14 = tmp7 + tmp13
tmp15 = -0.5
tmp16 = tmp14 * tmp15
tmp17 = -1.8971199848858813
tmp18 = tmp16 + tmp17
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp4, tmp18, tmp19)
tmp21 = tmp0 >= tmp3
tmp22 = tl.full([1], 2, tl.int64)
tmp23 = tmp0 < tmp22
tmp24 = tmp21 & tmp23
tmp25 = tl.load(in_ptr3 + x3, tmp24 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp26 = tl.load(in_ptr4 + x1, tmp24 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp27 = tmp25 + tmp26
tmp28 = tmp27 + tmp6
tmp29 = tl.load(in_ptr1 + x3, tmp24 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp30 = tl.load(in_ptr5 + x3, tmp24 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp31 = tl.load(in_ptr6 + x1, tmp24 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp32 = tmp30 + tmp31
tmp33 = tmp29 - tmp32
tmp34 = tmp33 * tmp33
tmp35 = tl_math.exp(tmp27)
tmp36 = tmp34 / tmp35
tmp37 = tmp28 + tmp36
tmp38 = tmp37 * tmp15
tmp39 = -0.2876820724517809
tmp40 = tmp38 + tmp39
tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype)
tmp42 = tl.where(tmp24, tmp40, tmp41)
tmp43 = tmp0 >= tmp22
tl.full([1], 3, tl.int64)
tmp46 = tl.load(in_ptr7 + x1, tmp43 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp47 = tmp46 + tmp6
tmp48 = tl.load(in_ptr1 + x3, tmp43 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp49 = tl.load(in_ptr2 + x1, tmp43 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp50 = tmp48 - tmp49
tmp51 = tmp50 * tmp50
tmp52 = tl_math.exp(tmp46)
tmp53 = tmp51 / tmp52
tmp54 = tmp47 + tmp53
tmp55 = tmp54 * tmp15
tmp56 = -2.3025850929940455
tmp57 = tmp55 + tmp56
tmp58 = tl.full(tmp57.shape, 0.0, tmp57.dtype)
tmp59 = tl.where(tmp43, tmp57, tmp58)
tmp60 = tl.where(tmp24, tmp42, tmp59)
tmp61 = tl.where(tmp4, tmp20, tmp60)
tl.store(out_ptr0 + x4, tmp61, xmask)
@triton.jit
def triton_poi_fused_logsumexp_12(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 3 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 3 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 3 * x0), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = tl_math.abs(tmp4)
tmp6 = float('inf')
tmp7 = tmp5 == tmp6
tmp8 = 0.0
tmp9 = tl.where(tmp7, tmp8, tmp4)
tmp10 = tmp0 - tmp9
tmp11 = tl_math.exp(tmp10)
tmp12 = tmp1 - tmp9
tmp13 = tl_math.exp(tmp12)
tmp14 = tmp11 + tmp13
tmp15 = tmp3 - tmp9
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp14 + tmp16
tmp18 = tl_math.log(tmp17)
tmp19 = tmp18 + tmp9
tl.store(out_ptr0 + x0, tmp19, xmask)
def call(args):
(arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1, arg6_1, arg7_1, arg8_1,
arg9_1, arg10_1, arg11_1, arg12_1, arg13_1, arg14_1, arg15_1,
arg16_1, arg17_1, arg18_1, arg19_1, arg20_1, arg21_1, arg22_1,
arg23_1, arg24_1, arg25_1, arg26_1, arg27_1, arg28_1) = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4,), (1,))
assert_size_stride(arg3_1, (4,), (1,))
assert_size_stride(arg4_1, (4,), (1,))
assert_size_stride(arg5_1, (4, 4), (4, 1))
assert_size_stride(arg6_1, (4,), (1,))
assert_size_stride(arg7_1, (4,), (1,))
assert_size_stride(arg8_1, (4,), (1,))
assert_size_stride(arg9_1, (4, 4), (4, 1))
assert_size_stride(arg10_1, (4,), (1,))
assert_size_stride(arg11_1, (4,), (1,))
assert_size_stride(arg12_1, (4,), (1,))
assert_size_stride(arg13_1, (4, 4), (4, 1))
assert_size_stride(arg14_1, (4,), (1,))
assert_size_stride(arg15_1, (4,), (1,))
assert_size_stride(arg16_1, (4,), (1,))
assert_size_stride(arg17_1, (4, 4), (4, 1))
assert_size_stride(arg18_1, (4,), (1,))
assert_size_stride(arg19_1, (4,), (1,))
assert_size_stride(arg20_1, (4,), (1,))
assert_size_stride(arg21_1, (4, 4), (4, 1))
assert_size_stride(arg22_1, (4,), (1,))
assert_size_stride(arg23_1, (4, 4), (4, 1))
assert_size_stride(arg24_1, (4,), (1,))
assert_size_stride(arg25_1, (1, 4), (4, 1))
assert_size_stride(arg26_1, (1, 4), (4, 1))
assert_size_stride(arg27_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg28_1, (1, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(arg1_1, (4, 4), (1, 4), 0), out=buf1)
del arg1_1
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused_mul_native_layer_norm_sigmoid_1[grid(64)](buf1,
arg2_1, buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf4 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
triton_poi_fused_mul_native_layer_norm_sigmoid_2[grid(256)](buf4,
arg2_1, buf2, buf3, arg3_1, arg4_1, 256, XBLOCK=256, num_warps=
4, num_stages=1)
del arg2_1
del arg3_1
del arg4_1
buf5 = reinterpret_tensor(buf0, (64, 4), (4, 1), 0)
del buf0
extern_kernels.mm(reinterpret_tensor(buf4, (64, 4), (4, 1), 0),
reinterpret_tensor(arg5_1, (4, 4), (1, 4), 0), out=buf5)
del arg5_1
buf6 = buf3
del buf3
buf7 = buf2
del buf2
triton_poi_fused_add_mul_native_layer_norm_sigmoid_3[grid(64)](buf5,
arg6_1, buf4, buf6, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf8 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused_add_mul_native_layer_norm_sigmoid_4[grid(256)](buf8,
arg6_1, buf4, buf6, buf7, arg7_1, arg8_1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg6_1
del arg7_1
del arg8_1
buf9 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf8, (64, 4), (4, 1), 0),
reinterpret_tensor(arg9_1, (4, 4), (1, 4), 0), out=buf9)
del arg9_1
buf10 = buf7
del buf7
buf11 = buf6
del buf6
triton_poi_fused_add_mul_native_layer_norm_sigmoid_5[grid(64)](buf9,
arg10_1, buf4, buf8, buf10, buf11, 64, XBLOCK=64, num_warps=1,
num_stages=1)
buf12 = reinterpret_tensor(buf9, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf9
triton_poi_fused_add_mul_native_layer_norm_sigmoid_6[grid(256)](buf12,
arg10_1, buf4, buf8, buf10, buf11, arg11_1, arg12_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg10_1
del arg11_1
del arg12_1
buf13 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf12, (64, 4), (4, 1), 0),
reinterpret_tensor(arg13_1, (4, 4), (1, 4), 0), out=buf13)
del arg13_1
buf14 = reinterpret_tensor(buf13, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf13
triton_poi_fused_add_7[grid(256)](buf14, arg14_1, buf4, buf8, buf12,
256, XBLOCK=256, num_warps=4, num_stages=1)
del arg14_1
buf15 = buf11
del buf11
buf16 = buf10
del buf10
triton_poi_fused_mul_native_layer_norm_sigmoid_8[grid(64)](buf14,
buf15, buf16, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf17 = buf14
del buf14
triton_poi_fused_mul_native_layer_norm_sigmoid_9[grid(256)](buf17,
buf15, buf16, arg15_1, arg16_1, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del arg15_1
del arg16_1
buf18 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf17, (64, 4), (4, 1), 0),
reinterpret_tensor(arg17_1, (4, 4), (1, 4), 0), out=buf18)
del arg17_1
buf19 = buf12
del buf12
triton_poi_fused_add_10[grid(256)](buf19, buf18, arg18_1, buf4,
buf8, buf17, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg18_1
del buf17
del buf18
buf20 = buf16
del buf16
buf21 = buf15
del buf15
triton_poi_fused_mul_native_layer_norm_sigmoid_8[grid(64)](buf19,
buf20, buf21, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf22 = buf19
del buf19
triton_poi_fused_mul_native_layer_norm_sigmoid_9[grid(256)](buf22,
buf20, buf21, arg19_1, arg20_1, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del arg19_1
del arg20_1
del buf20
del buf21
buf23 = reinterpret_tensor(buf8, (64, 4), (4, 1), 0)
del buf8
extern_kernels.mm(reinterpret_tensor(buf22, (64, 4), (4, 1), 0),
reinterpret_tensor(arg23_1, (4, 4), (1, 4), 0), out=buf23)
del arg23_1
buf24 = reinterpret_tensor(buf4, (64, 4), (4, 1), 0)
del buf4
extern_kernels.mm(reinterpret_tensor(buf22, (64, 4), (4, 1), 0),
reinterpret_tensor(arg21_1, (4, 4), (1, 4), 0), out=buf24)
del arg21_1
del buf22
buf25 = empty_strided_cuda((4, 4, 4, 4, 3), (192, 48, 12, 3, 1),
torch.float32)
triton_poi_fused_stack_11[grid(768)](arg26_1, arg27_1, arg25_1,
buf23, arg24_1, buf24, arg22_1, arg28_1, buf25, 768, XBLOCK=256,
num_warps=4, num_stages=1)
del arg22_1
del arg24_1
del arg25_1
del arg26_1
del arg27_1
del arg28_1
del buf23
buf26 = reinterpret_tensor(buf24, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf24
triton_poi_fused_logsumexp_12[grid(256)](buf25, buf26, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del buf25
return buf26,
def swish(x):
return x.mul(torch.sigmoid(x))
def log_norm_pdf(x, mu, logvar):
return -0.5 * (logvar + np.log(2 * np.pi) + (x - mu).pow(2) / logvar.exp())
class Encoder(nn.Module):
def __init__(self, hidden_dim, latent_dim, input_dim, eps=0.1):
super(Encoder, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.ln1 = nn.LayerNorm(hidden_dim, eps=eps)
self.fc2 = nn.Linear(hidden_dim, hidden_dim)
self.ln2 = nn.LayerNorm(hidden_dim, eps=eps)
self.fc3 = nn.Linear(hidden_dim, hidden_dim)
self.ln3 = nn.LayerNorm(hidden_dim, eps=eps)
self.fc4 = nn.Linear(hidden_dim, hidden_dim)
self.ln4 = nn.LayerNorm(hidden_dim, eps=eps)
self.fc5 = nn.Linear(hidden_dim, hidden_dim)
self.ln5 = nn.LayerNorm(hidden_dim, eps=eps)
self.fc_mu = nn.Linear(hidden_dim, latent_dim)
self.fc_logvar = nn.Linear(hidden_dim, latent_dim)
def forward(self, x, dropout_rate):
norm = x.pow(2).sum(dim=-1).sqrt()
x = x / norm[:, None]
x = F.dropout(x, p=dropout_rate, training=self.training)
h1 = self.ln1(swish(self.fc1(x)))
h2 = self.ln2(swish(self.fc2(h1) + h1))
h3 = self.ln3(swish(self.fc3(h2) + h1 + h2))
h4 = self.ln4(swish(self.fc4(h3) + h1 + h2 + h3))
h5 = self.ln5(swish(self.fc5(h4) + h1 + h2 + h3 + h4))
return self.fc_mu(h5), self.fc_logvar(h5)
class CompositePriorNew(nn.Module):
def __init__(self, hidden_dim, latent_dim, input_dim, mixture_weights=[
3 / 20, 3 / 4, 1 / 10]):
super(CompositePriorNew, self).__init__()
self.mixture_weights = mixture_weights
self.mu_prior = nn.Parameter(torch.Tensor(1, latent_dim),
requires_grad=False)
self.mu_prior.data.fill_(0)
self.logvar_prior = nn.Parameter(torch.Tensor(1, latent_dim),
requires_grad=False)
self.logvar_prior.data.fill_(0)
self.logvar_uniform_prior = nn.Parameter(torch.Tensor(1, latent_dim
), requires_grad=False)
self.logvar_uniform_prior.data.fill_(10)
self.encoder_old = Encoder(hidden_dim, latent_dim, input_dim)
self.encoder_old.requires_grad_(False)
def forward(self, input_0, input_1):
arg25_1 = self.mu_prior
arg26_1 = self.logvar_prior
arg28_1 = self.logvar_uniform_prior
arg1_1 = self.encoder_old.fc1.weight
arg2_1 = self.encoder_old.fc1.bias
arg3_1 = self.encoder_old.ln1.weight
arg4_1 = self.encoder_old.ln1.bias
arg5_1 = self.encoder_old.fc2.weight
arg6_1 = self.encoder_old.fc2.bias
arg7_1 = self.encoder_old.ln2.weight
arg8_1 = self.encoder_old.ln2.bias
arg9_1 = self.encoder_old.fc3.weight
arg10_1 = self.encoder_old.fc3.bias
arg11_1 = self.encoder_old.ln3.weight
arg12_1 = self.encoder_old.ln3.bias
arg13_1 = self.encoder_old.fc4.weight
arg14_1 = self.encoder_old.fc4.bias
arg15_1 = self.encoder_old.ln4.weight
arg16_1 = self.encoder_old.ln4.bias
arg17_1 = self.encoder_old.fc5.weight
arg18_1 = self.encoder_old.fc5.bias
arg19_1 = self.encoder_old.ln5.weight
arg20_1 = self.encoder_old.ln5.bias
arg21_1 = self.encoder_old.fc_mu.weight
arg22_1 = self.encoder_old.fc_mu.bias
arg23_1 = self.encoder_old.fc_logvar.weight
arg24_1 = self.encoder_old.fc_logvar.bias
arg0_1 = input_0
arg27_1 = input_1
output = call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1,
arg6_1, arg7_1, arg8_1, arg9_1, arg10_1, arg11_1, arg12_1,
arg13_1, arg14_1, arg15_1, arg16_1, arg17_1, arg18_1, arg19_1,
arg20_1, arg21_1, arg22_1, arg23_1, arg24_1, arg25_1, arg26_1,
arg27_1, arg28_1])
return output[0]
|
verachtertr/RecVAE
|
CompositePrior
| false
| 13,099
|
[
"Apache-2.0"
] | 0
|
915bed7f537cac6fc918aac8c622112561d15f03
|
https://github.com/verachtertr/RecVAE/tree/915bed7f537cac6fc918aac8c622112561d15f03
|
CBAM_Module
|
import torch
from torch import nn
from torchvision.transforms import *
class CBAM_Module(nn.Module):
def __init__(self, channels, reduction):
super(CBAM_Module, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.max_pool = nn.AdaptiveMaxPool2d(1)
self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1,
padding=0)
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1,
padding=0)
self.sigmoid_channel = nn.Sigmoid()
self.conv_after_concat = nn.Conv2d(2, 1, kernel_size=3, stride=1,
padding=1)
self.sigmoid_spatial = nn.Sigmoid()
def forward(self, x):
module_input = x
avg = self.avg_pool(x)
mx = self.max_pool(x)
avg = self.fc1(avg)
mx = self.fc1(mx)
avg = self.relu(avg)
mx = self.relu(mx)
avg = self.fc2(avg)
mx = self.fc2(mx)
x = avg + mx
x = self.sigmoid_channel(x)
x = module_input * x
module_input = x
avg = torch.mean(x, 1, True)
mx, _ = torch.max(x, 1, True)
x = torch.cat((avg, mx), 1)
x = self.conv_after_concat(x)
x = self.sigmoid_spatial(x)
x = module_input * x
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4, 'reduction': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
from torchvision.transforms import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_adaptive_max_pool2d_1(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp20 = triton_helpers.maximum(tmp19, tmp18)
tmp22 = triton_helpers.maximum(tmp21, tmp20)
tmp24 = triton_helpers.maximum(tmp23, tmp22)
tmp26 = triton_helpers.maximum(tmp25, tmp24)
tmp28 = triton_helpers.maximum(tmp27, tmp26)
tmp30 = triton_helpers.maximum(tmp29, tmp28)
tl.store(out_ptr0 + x0, tmp30, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_out_ptr1, in_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp6 = tl.load(in_out_ptr1 + x0, xmask)
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp7 = tmp6 + tmp2
tmp8 = triton_helpers.maximum(tmp4, tmp7)
tl.store(in_out_ptr0 + x0, tmp5, xmask)
tl.store(in_out_ptr1 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_convolution_sigmoid_3(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp3 + tmp1
tmp5 = tmp2 + tmp4
tmp6 = tl.sigmoid(tmp5)
tl.store(in_out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_cat_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 2
x0 = xindex % 16
x2 = xindex // 32
x4 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + 4 * x2, tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tmp5 * tmp6
tmp8 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp9 = tl.load(in_ptr1 + (1 + 4 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp10 = tmp8 * tmp9
tmp11 = tmp7 + tmp10
tmp12 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp13 = tl.load(in_ptr1 + (2 + 4 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp14 = tmp12 * tmp13
tmp15 = tmp11 + tmp14
tmp16 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp17 = tl.load(in_ptr1 + (3 + 4 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp18 = tmp16 * tmp17
tmp19 = tmp15 + tmp18
tmp20 = 4.0
tmp21 = tmp19 / tmp20
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp4, tmp21, tmp22)
tmp24 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp27 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp24 & xmask,
eviction_policy='evict_last', other=0.0)
tmp28 = tl.load(in_ptr1 + 4 * x2, tmp24 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp29 = tmp27 * tmp28
tmp30 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp24 & xmask,
eviction_policy='evict_last', other=0.0)
tmp31 = tl.load(in_ptr1 + (1 + 4 * x2), tmp24 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp32 = tmp30 * tmp31
tmp33 = triton_helpers.maximum(tmp29, tmp32)
tmp34 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp24 & xmask,
eviction_policy='evict_last', other=0.0)
tmp35 = tl.load(in_ptr1 + (2 + 4 * x2), tmp24 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp36 = tmp34 * tmp35
tmp37 = triton_helpers.maximum(tmp33, tmp36)
tmp38 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp24 & xmask,
eviction_policy='evict_last', other=0.0)
tmp39 = tl.load(in_ptr1 + (3 + 4 * x2), tmp24 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp40 = tmp38 * tmp39
tmp41 = triton_helpers.maximum(tmp37, tmp40)
tmp42 = tl.full(tmp41.shape, 0.0, tmp41.dtype)
tmp43 = tl.where(tmp24, tmp41, tmp42)
tmp44 = tl.where(tmp4, tmp23, tmp43)
tl.store(out_ptr0 + x4, tmp44, xmask)
@triton.jit
def triton_poi_fused_convolution_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_6(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex // 16
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tl.sigmoid(tmp3)
tmp5 = tmp2 * tmp4
tl.store(out_ptr0 + x3, tmp5, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (1,), (1,))
assert_size_stride(primals_4, (4, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (1, 2, 3, 3), (18, 9, 3, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
triton_poi_fused_adaptive_max_pool2d_1[grid(16)](primals_1, buf2,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf3 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 1, 1, 1), (1, 1, 1, 1))
buf4 = extern_kernels.convolution(buf2, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 1, 1, 1), (1, 1, 1, 1))
buf5 = buf3
del buf3
buf6 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(4)](buf5, buf6, primals_3,
4, XBLOCK=4, num_warps=1, num_stages=1)
del primals_3
buf7 = extern_kernels.convolution(buf5, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 4, 1, 1), (4, 1, 1, 1))
buf8 = extern_kernels.convolution(buf6, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 4, 1, 1), (4, 1, 1, 1))
buf9 = buf7
del buf7
triton_poi_fused_add_convolution_sigmoid_3[grid(16)](buf9,
primals_5, buf8, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf8
del primals_5
buf10 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
triton_poi_fused_cat_4[grid(128)](primals_1, buf9, buf10, 128,
XBLOCK=128, num_warps=4, num_stages=1)
buf11 = extern_kernels.convolution(buf10, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 1, 4, 4), (16, 16, 4, 1))
buf12 = buf11
del buf11
triton_poi_fused_convolution_5[grid(64)](buf12, primals_7, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_7
buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sigmoid_6[grid(256)](primals_1, buf9, buf12,
buf13, 256, XBLOCK=256, num_warps=4, num_stages=1)
return (buf13, primals_1, primals_2, primals_4, primals_6, buf1, buf2,
buf5, buf6, buf9, buf10, buf12)
class CBAM_ModuleNew(nn.Module):
def __init__(self, channels, reduction):
super(CBAM_ModuleNew, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.max_pool = nn.AdaptiveMaxPool2d(1)
self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1,
padding=0)
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1,
padding=0)
self.sigmoid_channel = nn.Sigmoid()
self.conv_after_concat = nn.Conv2d(2, 1, kernel_size=3, stride=1,
padding=1)
self.sigmoid_spatial = nn.Sigmoid()
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.conv_after_concat.weight
primals_7 = self.conv_after_concat.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
wangxing001/project-for-ReID
|
CBAM_Module
| false
| 13,100
|
[
"MIT"
] | 0
|
68a216dbbc7f7036fa72e49e1a806edc9b8e152d
|
https://github.com/wangxing001/project-for-ReID/tree/68a216dbbc7f7036fa72e49e1a806edc9b8e152d
|
PolicyNetwork
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Normal
class PolicyNetwork(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_size, init_w=0.003,
log_std_min=-20, log_std_max=2):
super(PolicyNetwork, self).__init__()
self.log_std_min = log_std_min
self.log_std_max = log_std_max
self.linear1 = nn.Linear(num_inputs, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.mean_linear = nn.Linear(hidden_size, num_actions)
self.mean_linear.weight.data.uniform_(-init_w, init_w)
self.mean_linear.bias.data.uniform_(-init_w, init_w)
self.log_std_linear1 = nn.Linear(hidden_size, hidden_size)
self.log_std_linear2 = nn.Linear(hidden_size, num_actions)
self.log_std_linear2.weight.data.uniform_(-init_w, init_w)
self.log_std_linear2.bias.data.uniform_(-init_w, init_w)
def forward(self, state):
x = F.relu(self.linear1(state))
x = F.relu(self.linear2(x))
mean = self.mean_linear(x)
log_std = self.log_std_linear2(F.relu(self.log_std_linear1(x)))
log_std = torch.clamp(log_std, self.log_std_min, self.log_std_max)
return mean, log_std
def evaluate(self, state, epsilon=1e-06):
mean, log_std = self.forward(state)
std = log_std.exp()
normal = Normal(mean, std)
z = normal.rsample()
action = torch.tanh(z)
log_prob = normal.log_prob(z) - torch.log(1 - action.pow(2) + epsilon)
log_prob = log_prob.sum(-1, keepdim=True)
return action, log_prob, z, mean, log_std
def get_action(self, state):
state = torch.FloatTensor(state).unsqueeze(0)
mean, log_std = self.forward(state)
std = log_std.exp()
normal = Normal(mean, std)
z = normal.sample()
action = torch.tanh(z)
action = action.detach().cpu().numpy()
return action[0]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_inputs': 4, 'num_actions': 4, 'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from torch.distributions import Normal
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_clamp_ge_le_logical_and_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = -20.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 2.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = tmp2 >= tmp3
tmp8 = tmp2 <= tmp5
tmp9 = tmp7 & tmp8
tl.store(out_ptr0 + x2, tmp6, xmask)
tl.store(out_ptr1 + x2, tmp9, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf12, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3,
primals_5, buf11, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf5)
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf6,
primals_9, buf10, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_9
buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf6, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_clamp_ge_le_logical_and_1[grid(256)](buf7,
primals_11, buf8, buf9, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf7
del primals_11
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), buf8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(
buf3, (64, 4), (4, 1), 0), reinterpret_tensor(buf6, (64, 4), (4, 1), 0
), buf9, primals_10, buf10, primals_8, primals_6, buf11, primals_4, buf12
class PolicyNetworkNew(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_size, init_w=0.003,
log_std_min=-20, log_std_max=2):
super(PolicyNetworkNew, self).__init__()
self.log_std_min = log_std_min
self.log_std_max = log_std_max
self.linear1 = nn.Linear(num_inputs, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.mean_linear = nn.Linear(hidden_size, num_actions)
self.mean_linear.weight.data.uniform_(-init_w, init_w)
self.mean_linear.bias.data.uniform_(-init_w, init_w)
self.log_std_linear1 = nn.Linear(hidden_size, hidden_size)
self.log_std_linear2 = nn.Linear(hidden_size, num_actions)
self.log_std_linear2.weight.data.uniform_(-init_w, init_w)
self.log_std_linear2.bias.data.uniform_(-init_w, init_w)
def evaluate(self, state, epsilon=1e-06):
mean, log_std = self.forward(state)
std = log_std.exp()
normal = Normal(mean, std)
z = normal.rsample()
action = torch.tanh(z)
log_prob = normal.log_prob(z) - torch.log(1 - action.pow(2) + epsilon)
log_prob = log_prob.sum(-1, keepdim=True)
return action, log_prob, z, mean, log_std
def get_action(self, state):
state = torch.FloatTensor(state).unsqueeze(0)
mean, log_std = self.forward(state)
std = log_std.exp()
normal = Normal(mean, std)
z = normal.sample()
action = torch.tanh(z)
action = action.detach().cpu().numpy()
return action[0]
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_6 = self.mean_linear.weight
primals_7 = self.mean_linear.bias
primals_8 = self.log_std_linear1.weight
primals_9 = self.log_std_linear1.bias
primals_10 = self.log_std_linear2.weight
primals_11 = self.log_std_linear2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0], output[1]
|
watermeleon/spot_mini_mini
|
PolicyNetwork
| false
| 13,101
|
[
"MIT"
] | 0
|
8622d3b0e0a95f7c548cacb6722a94f61a7e2b4b
|
https://github.com/watermeleon/spot_mini_mini/tree/8622d3b0e0a95f7c548cacb6722a94f61a7e2b4b
|
MLP_G
|
from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
class MLP_G(nn.Module):
def __init__(self, opt):
super(MLP_G, self).__init__()
self.fc1 = nn.Linear(opt.attSize + opt.nz, opt.ngh)
self.fc2 = nn.Linear(opt.ngh, opt.resSize)
self.lrelu = nn.LeakyReLU(0.2, True)
self.relu = nn.ReLU(True)
self.apply(weights_init)
def forward(self, noise, att):
h = torch.cat((noise, att), 1)
h = self.lrelu(self.fc1(h))
h = self.relu(self.fc2(h))
return h
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'opt': _mock_config(attSize=4, nz=4, ngh=4, resSize=4)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8
), 0), out=buf1)
del primals_3
buf2 = buf1
del buf1
triton_poi_fused_leaky_relu_1[grid(16)](buf2, primals_4, 16, XBLOCK
=16, num_warps=1, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (4, 4), (1, 4
), 0), out=buf3)
buf4 = buf3
del buf3
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(16)](buf4,
primals_6, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_6
return buf4, buf0, buf2, buf5, primals_5
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
class MLP_GNew(nn.Module):
def __init__(self, opt):
super(MLP_GNew, self).__init__()
self.fc1 = nn.Linear(opt.attSize + opt.nz, opt.ngh)
self.fc2 = nn.Linear(opt.ngh, opt.resSize)
self.lrelu = nn.LeakyReLU(0.2, True)
self.relu = nn.ReLU(True)
self.apply(weights_init)
def forward(self, input_0, input_1):
primals_3 = self.fc1.weight
primals_4 = self.fc1.bias
primals_1 = self.fc2.weight
primals_6 = self.fc2.bias
primals_2 = input_0
primals_5 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
tasfia/BMCoGAN
|
MLP_G
| false
| 13,102
|
[
"MIT"
] | 0
|
0d400c2c71dbfb69af422afc487f65afb98de8af
|
https://github.com/tasfia/BMCoGAN/tree/0d400c2c71dbfb69af422afc487f65afb98de8af
|
Attention
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Attention(nn.Module):
"""
Applies an attention mechanism on the output features from the decoder.
.. math::
\\begin{array}{ll}
x = context*output \\\\
attn = exp(x_i) / sum_j exp(x_j) \\\\
output = \\tanh(w * (attn * context) + b * output)
\\end{array}
Args:
dim(int): The number of expected features in the output
Inputs: output, context
- **output** (batch, output_len, dimensions): tensor containing the output features from the decoder.
- **context** (batch, input_len, dimensions): tensor containing features of the encoded input sequence.
Outputs: output, attn
- **output** (batch, output_len, dimensions): tensor containing the attended output features from the decoder.
- **attn** (batch, output_len, input_len): tensor containing attention weights.
Attributes:
linear_out (torch.nn.Linear): applies a linear transformation to the incoming data: :math:`y = Ax + b`.
mask (torch.Tensor, optional): applies a :math:`-inf` to the indices specified in the `Tensor`.
Examples::
>>> attention = seq2seq.models.Attention(256)
>>> context = Variable(torch.randn(5, 3, 256))
>>> output = Variable(torch.randn(5, 5, 256))
>>> output, attn = attention(output, context)
"""
def __init__(self, dim):
super(Attention, self).__init__()
self.linear_out = nn.Linear(dim * 2, dim)
self.mask = None
def set_mask(self, mask):
"""
Sets indices to be masked
Args:
mask (torch.Tensor): tensor containing indices to be masked
"""
self.mask = mask
def forward(self, output, context):
batch_size = output.size(0)
hidden_size = output.size(2)
input_size = context.size(1)
attn = torch.bmm(output, context.transpose(1, 2))
if self.mask is not None:
attn.data.masked_fill_(self.mask, -float('inf'))
attn = F.softmax(attn.view(-1, input_size), dim=1).view(batch_size,
-1, input_size)
mix = torch.bmm(attn, context)
combined = torch.cat((mix, output), dim=2)
output = F.tanh(self.linear_out(combined.view(-1, 2 * hidden_size))
).view(batch_size, -1, hidden_size)
return output, attn
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_tanh_tanh_backward_3(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tmp4 = tmp3 * tmp3
tmp5 = 1.0
tmp6 = tmp5 - tmp4
tl.store(in_out_ptr0 + x2, tmp3, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(primals_1, reinterpret_tensor(primals_2, (4, 4,
4), (16, 1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf0, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf2 = reinterpret_tensor(buf0, (16, 4), (4, 1), 0)
del buf0
triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0)
del buf1
extern_kernels.bmm(reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1),
0), primals_2, out=buf3)
del primals_2
buf4 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
triton_poi_fused_cat_2[grid(128)](buf3, primals_1, buf4, 128,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf5 = reinterpret_tensor(buf3, (16, 4), (4, 1), 0)
del buf3
extern_kernels.mm(reinterpret_tensor(buf4, (16, 8), (8, 1), 0),
reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf5)
del primals_3
buf6 = buf5
del buf5
buf7 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
triton_poi_fused_tanh_tanh_backward_3[grid(64)](buf6, primals_4,
buf7, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_4
return reinterpret_tensor(buf6, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(buf4, (16, 8), (8, 1), 0), buf7
class AttentionNew(nn.Module):
"""
Applies an attention mechanism on the output features from the decoder.
.. math::
\\begin{array}{ll}
x = context*output \\\\
attn = exp(x_i) / sum_j exp(x_j) \\\\
output = \\tanh(w * (attn * context) + b * output)
\\end{array}
Args:
dim(int): The number of expected features in the output
Inputs: output, context
- **output** (batch, output_len, dimensions): tensor containing the output features from the decoder.
- **context** (batch, input_len, dimensions): tensor containing features of the encoded input sequence.
Outputs: output, attn
- **output** (batch, output_len, dimensions): tensor containing the attended output features from the decoder.
- **attn** (batch, output_len, input_len): tensor containing attention weights.
Attributes:
linear_out (torch.nn.Linear): applies a linear transformation to the incoming data: :math:`y = Ax + b`.
mask (torch.Tensor, optional): applies a :math:`-inf` to the indices specified in the `Tensor`.
Examples::
>>> attention = seq2seq.models.Attention(256)
>>> context = Variable(torch.randn(5, 3, 256))
>>> output = Variable(torch.randn(5, 5, 256))
>>> output, attn = attention(output, context)
"""
def __init__(self, dim):
super(AttentionNew, self).__init__()
self.linear_out = nn.Linear(dim * 2, dim)
self.mask = None
def set_mask(self, mask):
"""
Sets indices to be masked
Args:
mask (torch.Tensor): tensor containing indices to be masked
"""
self.mask = mask
def forward(self, input_0, input_1):
primals_3 = self.linear_out.weight
primals_4 = self.linear_out.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0], output[1]
|
woaksths/set2regex-baseline
|
Attention
| false
| 13,103
|
[
"Apache-2.0"
] | 0
|
be377593526ad664a727dd7152fcb186118adaa5
|
https://github.com/woaksths/set2regex-baseline/tree/be377593526ad664a727dd7152fcb186118adaa5
|
ConditionalEntropyLoss
|
import torch
import torch.nn.functional as F
class ConditionalEntropyLoss(torch.nn.Module):
def __init__(self, model):
super(ConditionalEntropyLoss, self).__init__()
def forward(self, x, weight):
loss = F.softmax(x, dim=1) * F.log_softmax(x, dim=1) * weight
loss = loss.sum(dim=1)
return -1.0 * loss.mean(dim=0)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'model': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax__softmax_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
tl.store(out_ptr1 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax__softmax_mul_1(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr1 + x3, xmask)
tmp10 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp11 = tl_math.exp(tmp10)
tmp13 = tl_math.exp(tmp12)
tmp14 = tmp11 + tmp13
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp14 + tmp16
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp17 + tmp19
tmp21 = tl_math.log(tmp20)
tmp22 = tmp9 - tmp21
tmp23 = tmp8 * tmp22
tl.store(out_ptr0 + x3, tmp23, xmask)
@triton.jit
def triton_poi_fused_mean_mul_sum_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp3 = tl.load(in_ptr0 + (16 + x0), xmask)
tmp4 = tl.load(in_ptr1 + (16 + x0), xmask)
tmp7 = tl.load(in_ptr0 + (32 + x0), xmask)
tmp8 = tl.load(in_ptr1 + (32 + x0), xmask)
tmp11 = tl.load(in_ptr0 + (48 + x0), xmask)
tmp12 = tl.load(in_ptr1 + (48 + x0), xmask)
tmp15 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp16 = tl.load(in_ptr1 + (64 + x0), xmask)
tmp18 = tl.load(in_ptr0 + (80 + x0), xmask)
tmp19 = tl.load(in_ptr1 + (80 + x0), xmask)
tmp22 = tl.load(in_ptr0 + (96 + x0), xmask)
tmp23 = tl.load(in_ptr1 + (96 + x0), xmask)
tmp26 = tl.load(in_ptr0 + (112 + x0), xmask)
tmp27 = tl.load(in_ptr1 + (112 + x0), xmask)
tmp31 = tl.load(in_ptr0 + (128 + x0), xmask)
tmp32 = tl.load(in_ptr1 + (128 + x0), xmask)
tmp34 = tl.load(in_ptr0 + (144 + x0), xmask)
tmp35 = tl.load(in_ptr1 + (144 + x0), xmask)
tmp38 = tl.load(in_ptr0 + (160 + x0), xmask)
tmp39 = tl.load(in_ptr1 + (160 + x0), xmask)
tmp42 = tl.load(in_ptr0 + (176 + x0), xmask)
tmp43 = tl.load(in_ptr1 + (176 + x0), xmask)
tmp47 = tl.load(in_ptr0 + (192 + x0), xmask)
tmp48 = tl.load(in_ptr1 + (192 + x0), xmask)
tmp50 = tl.load(in_ptr0 + (208 + x0), xmask)
tmp51 = tl.load(in_ptr1 + (208 + x0), xmask)
tmp54 = tl.load(in_ptr0 + (224 + x0), xmask)
tmp55 = tl.load(in_ptr1 + (224 + x0), xmask)
tmp58 = tl.load(in_ptr0 + (240 + x0), xmask)
tmp59 = tl.load(in_ptr1 + (240 + x0), xmask)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp17 = tmp15 * tmp16
tmp20 = tmp18 * tmp19
tmp21 = tmp17 + tmp20
tmp24 = tmp22 * tmp23
tmp25 = tmp21 + tmp24
tmp28 = tmp26 * tmp27
tmp29 = tmp25 + tmp28
tmp30 = tmp14 + tmp29
tmp33 = tmp31 * tmp32
tmp36 = tmp34 * tmp35
tmp37 = tmp33 + tmp36
tmp40 = tmp38 * tmp39
tmp41 = tmp37 + tmp40
tmp44 = tmp42 * tmp43
tmp45 = tmp41 + tmp44
tmp46 = tmp30 + tmp45
tmp49 = tmp47 * tmp48
tmp52 = tmp50 * tmp51
tmp53 = tmp49 + tmp52
tmp56 = tmp54 * tmp55
tmp57 = tmp53 + tmp56
tmp60 = tmp58 * tmp59
tmp61 = tmp57 + tmp60
tmp62 = tmp46 + tmp61
tmp63 = 4.0
tmp64 = tmp62 / tmp63
tmp65 = -1.0
tmp66 = tmp64 * tmp65
tl.store(in_out_ptr0 + x0, tmp66, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax__softmax_0[grid(256)](arg0_1, buf0,
buf1, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__log_softmax__softmax_mul_1[grid(256)](buf0, buf1,
buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del buf1
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf4 = buf3
del buf3
triton_poi_fused_mean_mul_sum_2[grid(16)](buf4, buf2, arg1_1, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del arg1_1
del buf2
return buf4,
class ConditionalEntropyLossNew(torch.nn.Module):
def __init__(self, model):
super(ConditionalEntropyLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
tasfia/BMCoGAN
|
ConditionalEntropyLoss
| false
| 13,104
|
[
"MIT"
] | 0
|
0d400c2c71dbfb69af422afc487f65afb98de8af
|
https://github.com/tasfia/BMCoGAN/tree/0d400c2c71dbfb69af422afc487f65afb98de8af
|
SelfAttentionGPT2
|
import torch
from torch import nn
def mask_(matrices, maskval=0.0, mask_diagonal=True):
"""
Masks out all values in the given batch of matrices where i <= j holds,
i < j if mask_diagonal is false
In place operation
:param tns:
:return:
"""
h, w = matrices.size(-2), matrices.size(-1)
indices = torch.triu_indices(h, w, offset=0 if mask_diagonal else 1)
matrices[..., indices[0], indices[1]] = maskval
class SelfAttentionGPT2(nn.Module):
"""
This is the self-attention operation as implemented in the Huggingface port of GPT2. The code has been
simplified to remove several features not used here but otherwise it should do exactly the same as GPT2 when run with
normal parameters.
It is very similar to the default SelfAttention below, with the exception of the way it's initialized and some
small speed improvements in the custom implementation of the linear layer (the Conv1D defined above).
We include this primarily for comparison with our own canonical implementation to check for performance differences.
"""
def __init__(self, emb, heads, mask=False):
super().__init__()
self.nheads = heads
self.emb = emb
self.mask = mask
self.c_attn = nn.Linear(emb, 3 * emb)
self.c_proj = nn.Linear(emb, emb)
def _attn(self, q, k, v):
dot = torch.matmul(q, k)
dot = dot / float(v.size(-1)) ** 0.5
if self.mask:
mask_(dot, maskval=float('-inf'), mask_diagonal=False)
dot = nn.Softmax(dim=-1)(dot)
return torch.matmul(dot, v)
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape)
def split_heads(self, x, is_key=False):
new_x_shape = x.size()[:-1] + (self.nheads, x.size(-1) // self.nheads)
x = x.view(*new_x_shape)
if is_key:
return x.permute(0, 2, 3, 1)
else:
return x.permute(0, 2, 1, 3)
def forward(self, input_sequence):
_b, _t, e = input_sequence.size()
query, key, value = self.c_attn(input_sequence).split(e, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, is_key=True)
value = self.split_heads(value)
a = self._attn(query, key, value)
a = self.merge_heads(a)
a = self.c_proj(a)
return a
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'emb': 4, 'heads': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (4 + y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4 + y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (8 + y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (8 + y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (12,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 12), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_3, buf1, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32)
triton_poi_fused_clone_1[grid(16, 4)](buf0, primals_3, buf2, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf2, (16, 1, 4), (4, 0, 1), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(256)](buf3, buf4, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf3
triton_poi_fused__softmax_3[grid(256)](buf4, buf5, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf4
buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_4[grid(16, 4)](buf0, primals_3, buf6, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del buf0
del primals_3
buf7 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 0), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_5[grid(16, 4)](buf7, buf8, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0)
del buf7
extern_kernels.addmm(primals_5, reinterpret_tensor(buf8, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf9)
del primals_5
return reinterpret_tensor(buf9, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), buf5, reinterpret_tensor(buf8, (16, 4), (4, 1), 0
), primals_4, reinterpret_tensor(buf6, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf1, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 4), 0)
def mask_(matrices, maskval=0.0, mask_diagonal=True):
"""
Masks out all values in the given batch of matrices where i <= j holds,
i < j if mask_diagonal is false
In place operation
:param tns:
:return:
"""
h, w = matrices.size(-2), matrices.size(-1)
indices = torch.triu_indices(h, w, offset=0 if mask_diagonal else 1)
matrices[..., indices[0], indices[1]] = maskval
class SelfAttentionGPT2New(nn.Module):
"""
This is the self-attention operation as implemented in the Huggingface port of GPT2. The code has been
simplified to remove several features not used here but otherwise it should do exactly the same as GPT2 when run with
normal parameters.
It is very similar to the default SelfAttention below, with the exception of the way it's initialized and some
small speed improvements in the custom implementation of the linear layer (the Conv1D defined above).
We include this primarily for comparison with our own canonical implementation to check for performance differences.
"""
def __init__(self, emb, heads, mask=False):
super().__init__()
self.nheads = heads
self.emb = emb
self.mask = mask
self.c_attn = nn.Linear(emb, 3 * emb)
self.c_proj = nn.Linear(emb, emb)
def _attn(self, q, k, v):
dot = torch.matmul(q, k)
dot = dot / float(v.size(-1)) ** 0.5
if self.mask:
mask_(dot, maskval=float('-inf'), mask_diagonal=False)
dot = nn.Softmax(dim=-1)(dot)
return torch.matmul(dot, v)
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape)
def split_heads(self, x, is_key=False):
new_x_shape = x.size()[:-1] + (self.nheads, x.size(-1) // self.nheads)
x = x.view(*new_x_shape)
if is_key:
return x.permute(0, 2, 3, 1)
else:
return x.permute(0, 2, 1, 3)
def forward(self, input_0):
primals_2 = self.c_attn.weight
primals_3 = self.c_attn.bias
primals_4 = self.c_proj.weight
primals_5 = self.c_proj.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
wjeliot/former
|
SelfAttentionGPT2
| false
| 13,105
|
[
"MIT"
] | 0
|
38bd29b68b110e1e3eddae3106f7db2ffc0e5ce8
|
https://github.com/wjeliot/former/tree/38bd29b68b110e1e3eddae3106f7db2ffc0e5ce8
|
SelfAttentionWide
|
import torch
from torch import nn
import torch.nn.functional as F
def mask_(matrices, maskval=0.0, mask_diagonal=True):
"""
Masks out all values in the given batch of matrices where i <= j holds,
i < j if mask_diagonal is false
In place operation
:param tns:
:return:
"""
h, w = matrices.size(-2), matrices.size(-1)
indices = torch.triu_indices(h, w, offset=0 if mask_diagonal else 1)
matrices[..., indices[0], indices[1]] = maskval
class SelfAttentionWide(nn.Module):
"""
A self-attention with a larger number of parameters than the standard one.
Uses a full-size embedding vector for each head.
"""
def __init__(self, emb, heads=8, mask=False):
"""
:param emb:
:param heads:
:param mask:
"""
super().__init__()
self.emb = emb
self.heads = heads
self.mask = mask
self.tokeys = nn.Linear(emb, emb * heads, bias=False)
self.toqueries = nn.Linear(emb, emb * heads, bias=False)
self.tovalues = nn.Linear(emb, emb * heads, bias=False)
self.unifyheads = nn.Linear(heads * emb, emb)
def forward(self, x):
b, t, e = x.size()
h = self.heads
assert e == self.emb, f'Input embedding dim ({e}) should match layer embedding dim ({self.emb})'
keys = self.tokeys(x).view(b, t, h, e)
queries = self.toqueries(x).view(b, t, h, e)
values = self.tovalues(x).view(b, t, h, e)
keys = keys.transpose(1, 2).contiguous().view(b * h, t, e)
queries = queries.transpose(1, 2).contiguous().view(b * h, t, e)
values = values.transpose(1, 2).contiguous().view(b * h, t, e)
queries = queries / e ** (1 / 4)
keys = keys / e ** (1 / 4)
dot = torch.bmm(queries, keys.transpose(1, 2))
assert dot.size() == (b * h, t, t)
if self.mask:
mask_(dot, maskval=float('-inf'), mask_diagonal=False)
dot = F.softmax(dot, dim=2)
out = torch.bmm(dot, values).view(b, h, t, e)
out = out.transpose(1, 2).contiguous().view(b, t, h * e)
return self.unifyheads(out)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'emb': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 32
x2 = xindex // 128
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * (x1 % 8) + 32 * x2 + 128 * (x1 // 8)
), xmask)
tmp1 = 0.7071067811865475
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * (x2 % 8) + 32 * x1 + 128 * (x2 // 8)
), xmask)
tmp1 = 0.7071067811865475
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 8
x3 = xindex // 128
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 32 * x1 + 128 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 8
x2 = xindex // 32 % 4
x3 = xindex // 128
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 128 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_transpose_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 128 * x1), xmask)
tl.store(out_ptr0 + x3, tmp0, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (32, 4), (4, 1))
assert_size_stride(primals_3, (32, 4), (4, 1))
assert_size_stride(primals_4, (32, 4), (4, 1))
assert_size_stride(primals_5, (4, 32), (32, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 32), (32, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 32), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 32), (32, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 32), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((16, 32), (32, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 32), (1, 4), 0), out=buf2)
del primals_4
buf3 = empty_strided_cuda((32, 4, 4), (4, 128, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(512)](buf1, buf3, 512, XBLOCK=256,
num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf1, (32, 4, 4), (16, 4, 1), 0)
del buf1
triton_poi_fused_div_1[grid(512)](buf0, buf4, 512, XBLOCK=256,
num_warps=4, num_stages=1)
buf5 = reinterpret_tensor(buf0, (32, 4, 4), (16, 4, 1), 0)
del buf0
extern_kernels.bmm(buf3, reinterpret_tensor(buf4, (32, 4, 4), (16,
1, 4), 0), out=buf5)
buf6 = empty_strided_cuda((32, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(512)](buf5, buf6, 512, XBLOCK=128,
num_warps=4, num_stages=1)
buf7 = buf5
del buf5
triton_poi_fused__softmax_3[grid(512)](buf6, buf7, 512, XBLOCK=128,
num_warps=4, num_stages=1)
buf8 = reinterpret_tensor(buf6, (4, 8, 4, 4), (128, 16, 4, 1), 0)
del buf6
triton_poi_fused_clone_4[grid(512)](buf2, buf8, 512, XBLOCK=128,
num_warps=4, num_stages=1)
buf9 = reinterpret_tensor(buf2, (32, 4, 4), (16, 4, 1), 0)
del buf2
extern_kernels.bmm(buf7, reinterpret_tensor(buf8, (32, 4, 4), (16,
4, 1), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 8, 4), (128, 32, 4, 1), torch.float32
)
triton_poi_fused_clone_5[grid(512)](buf9, buf10, 512, XBLOCK=128,
num_warps=4, num_stages=1)
buf11 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, reinterpret_tensor(buf10, (16, 32),
(32, 1), 0), reinterpret_tensor(primals_5, (32, 4), (1, 32), 0),
alpha=1, beta=1, out=buf11)
del primals_6
buf12 = reinterpret_tensor(buf9, (32, 4, 4), (16, 1, 4), 0)
del buf9
triton_poi_fused_transpose_6[grid(512)](buf3, buf12, 512, XBLOCK=
256, num_warps=4, num_stages=1)
del buf3
return reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), buf7, reinterpret_tensor(buf10, (16, 32), (32, 1), 0
), primals_5, reinterpret_tensor(buf8, (32, 4, 4), (16, 1, 4), 0
), buf12, buf4
def mask_(matrices, maskval=0.0, mask_diagonal=True):
"""
Masks out all values in the given batch of matrices where i <= j holds,
i < j if mask_diagonal is false
In place operation
:param tns:
:return:
"""
h, w = matrices.size(-2), matrices.size(-1)
indices = torch.triu_indices(h, w, offset=0 if mask_diagonal else 1)
matrices[..., indices[0], indices[1]] = maskval
class SelfAttentionWideNew(nn.Module):
"""
A self-attention with a larger number of parameters than the standard one.
Uses a full-size embedding vector for each head.
"""
def __init__(self, emb, heads=8, mask=False):
"""
:param emb:
:param heads:
:param mask:
"""
super().__init__()
self.emb = emb
self.heads = heads
self.mask = mask
self.tokeys = nn.Linear(emb, emb * heads, bias=False)
self.toqueries = nn.Linear(emb, emb * heads, bias=False)
self.tovalues = nn.Linear(emb, emb * heads, bias=False)
self.unifyheads = nn.Linear(heads * emb, emb)
def forward(self, input_0):
primals_2 = self.tokeys.weight
primals_3 = self.toqueries.weight
primals_4 = self.tovalues.weight
primals_5 = self.unifyheads.weight
primals_6 = self.unifyheads.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
wjeliot/former
|
SelfAttentionWide
| false
| 13,106
|
[
"MIT"
] | 0
|
38bd29b68b110e1e3eddae3106f7db2ffc0e5ce8
|
https://github.com/wjeliot/former/tree/38bd29b68b110e1e3eddae3106f7db2ffc0e5ce8
|
Discriminator
|
from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
class Discriminator(nn.Module):
def __init__(self, opt):
super(Discriminator, self).__init__()
self.discriminatorS = nn.Linear(opt.attSize, 256)
self.discriminatorU = nn.Linear(opt.attSize, 256)
self.fc = nn.Linear(256 * 2, 2)
self.lrelu = nn.LeakyReLU(0.2, True)
self.sigmoid = nn.Sigmoid()
self.logic = nn.LogSoftmax(dim=1)
self.apply(weights_init)
def forward(self, s, u):
dis_s = self.lrelu(self.discriminatorS(s))
dis_u = self.lrelu(self.discriminatorU(u))
hs = torch.cat((dis_s, dis_u), 1)
hs = self.fc(hs).squeeze()
return hs, dis_s, dis_u
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'opt': _mock_config(attSize=4)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_leaky_relu_leaky_relu_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(in_out_ptr0 + x2, tmp7, xmask)
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 512
x1 = xindex // 512
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 256, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (256 * x1 + x0), tmp4, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 512, tl.int64)
tmp9 = tl.load(in_ptr1 + (256 * x1 + (-256 + x0)), tmp6,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (256, 4), (4, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (2, 512), (512, 1))
assert_size_stride(primals_8, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 256),
(1, 4), 0), out=buf0)
del primals_1
buf1 = buf0
del buf0
buf7 = empty_strided_cuda((4, 256), (256, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_leaky_relu_leaky_relu_backward_0[grid(1024)](buf1,
primals_2, buf7, 1024, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
extern_kernels.mm(primals_6, reinterpret_tensor(primals_4, (4, 256),
(1, 4), 0), out=buf2)
del primals_4
buf3 = buf2
del buf2
buf6 = empty_strided_cuda((4, 256), (256, 1), torch.bool)
triton_poi_fused_leaky_relu_leaky_relu_backward_0[grid(1024)](buf3,
primals_5, buf6, 1024, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 512), (512, 1), torch.float32)
triton_poi_fused_cat_1[grid(2048)](buf1, buf3, buf4, 2048, XBLOCK=
256, num_warps=4, num_stages=1)
buf5 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7,
(512, 2), (1, 512), 0), alpha=1, beta=1, out=buf5)
del primals_8
return buf5, buf1, buf3, primals_3, primals_6, buf4, primals_7, buf6, buf7
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
class DiscriminatorNew(nn.Module):
def __init__(self, opt):
super(DiscriminatorNew, self).__init__()
self.discriminatorS = nn.Linear(opt.attSize, 256)
self.discriminatorU = nn.Linear(opt.attSize, 256)
self.fc = nn.Linear(256 * 2, 2)
self.lrelu = nn.LeakyReLU(0.2, True)
self.sigmoid = nn.Sigmoid()
self.logic = nn.LogSoftmax(dim=1)
self.apply(weights_init)
def forward(self, input_0, input_1):
primals_1 = self.discriminatorS.weight
primals_2 = self.discriminatorS.bias
primals_4 = self.discriminatorU.weight
primals_5 = self.discriminatorU.bias
primals_7 = self.fc.weight
primals_8 = self.fc.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0], output[1], output[2]
|
tasfia/BMCoGAN
|
Discriminator
| false
| 13,107
|
[
"MIT"
] | 0
|
0d400c2c71dbfb69af422afc487f65afb98de8af
|
https://github.com/tasfia/BMCoGAN/tree/0d400c2c71dbfb69af422afc487f65afb98de8af
|
Mapping
|
from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def reparameter(mu, sigma):
return torch.randn_like(mu) * sigma + mu
class Mapping(nn.Module):
def __init__(self, opt):
super(Mapping, self).__init__()
self.latensize = opt.latenSize
self.encoder_linear = nn.Linear(opt.resSize, opt.latenSize * 2)
self.discriminator = nn.Linear(opt.latenSize, 1)
self.classifier = nn.Linear(opt.latenSize, opt.nclass_seen)
self.lrelu = nn.LeakyReLU(0.2, True)
self.sigmoid = nn.Sigmoid()
self.logic = nn.LogSoftmax(dim=1)
self.apply(weights_init)
def forward(self, x, train_G=False):
laten = self.lrelu(self.encoder_linear(x))
mus, stds = laten[:, :self.latensize], laten[:, self.latensize:]
stds = self.sigmoid(stds)
encoder_out = reparameter(mus, stds)
if not train_G:
dis_out = self.discriminator(encoder_out)
else:
dis_out = self.discriminator(mus)
pred = self.logic(self.classifier(mus))
return mus, stds, dis_out, pred, encoder_out
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'opt': _mock_config(latenSize=4, resSize=4, nclass_seen=4)}]
|
import torch
from torch import device
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_leaky_relu_leaky_relu_backward_0(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(out_ptr0 + x2, tmp7, xmask)
tl.store(out_ptr1 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4 + x0 + 8 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + x2, xmask)
tmp11 = tl.load(in_ptr3 + (x0 + 8 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tl.sigmoid(tmp7)
tmp10 = tmp9 * tmp8
tmp12 = tmp10 + tmp11
tl.store(out_ptr0 + x2, tmp8, xmask)
tl.store(out_ptr1 + x2, tmp12, xmask)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (8, 4), (4, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (1, 4), (4, 1))
assert_size_stride(primals_5, (1,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 8),
(1, 4), 0), out=buf0)
del primals_1
buf2 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
buf11 = empty_strided_cuda((4, 8), (8, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_leaky_relu_leaky_relu_backward_0[grid(32)](buf0,
primals_2, buf2, buf11, 32, XBLOCK=32, num_warps=1, num_stages=1)
buf3 = torch.ops.aten.randn.default([4, 4], dtype=torch.float32,
device=device(type='cuda', index=0), pin_memory=False)
buf4 = buf3
del buf3
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_mul_sigmoid_1[grid(16)](buf0, primals_2, buf4,
buf2, buf1, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf0
del primals_2
buf7 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_5, buf5, reinterpret_tensor(primals_4,
(4, 1), (1, 4), 0), alpha=1, beta=1, out=buf7)
del primals_5
buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf2, (4, 4), (8,
1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha
=1, beta=1, out=buf8)
del primals_7
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__log_softmax_2[grid(16)](buf8, buf9, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf10 = buf8
del buf8
triton_poi_fused__log_softmax_3[grid(16)](buf9, buf10, 16, XBLOCK=
16, num_warps=1, num_stages=1)
del buf9
return reinterpret_tensor(buf2, (4, 4), (8, 1), 0
), buf1, buf7, buf10, buf5, primals_3, buf1, reinterpret_tensor(buf2,
(4, 4), (8, 1), 0), buf4, buf5, buf10, primals_6, primals_4, buf11
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def reparameter(mu, sigma):
return torch.randn_like(mu) * sigma + mu
class MappingNew(nn.Module):
def __init__(self, opt):
super(MappingNew, self).__init__()
self.latensize = opt.latenSize
self.encoder_linear = nn.Linear(opt.resSize, opt.latenSize * 2)
self.discriminator = nn.Linear(opt.latenSize, 1)
self.classifier = nn.Linear(opt.latenSize, opt.nclass_seen)
self.lrelu = nn.LeakyReLU(0.2, True)
self.sigmoid = nn.Sigmoid()
self.logic = nn.LogSoftmax(dim=1)
self.apply(weights_init)
def forward(self, input_0):
primals_1 = self.encoder_linear.weight
primals_2 = self.encoder_linear.bias
primals_4 = self.discriminator.weight
primals_5 = self.discriminator.bias
primals_3 = self.classifier.weight
primals_7 = self.classifier.bias
primals_6 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0], output[1], output[2], output[3], output[4]
|
tasfia/BMCoGAN
|
Mapping
| false
| 13,108
|
[
"MIT"
] | 0
|
0d400c2c71dbfb69af422afc487f65afb98de8af
|
https://github.com/tasfia/BMCoGAN/tree/0d400c2c71dbfb69af422afc487f65afb98de8af
|
LinearEmbedder
|
import torch
import torch.nn as nn
class LinearEmbedder(torch.nn.Module):
def __init__(self, in_features, out_features):
super(LinearEmbedder, self).__init__()
self.fc = nn.Linear(in_features, out_features)
def forward(self, x):
o = self.fc(x)
o = self.l2_norm(o)
return o
def l2_norm(self, input):
input_size = input.size()
buffer = torch.pow(input, 2)
normp = torch.sum(buffer, 1).add_(1e-12)
norm = torch.sqrt(normp)
_output = torch.div(input, norm.view(-1, 1).expand_as(input))
output = _output.view(input_size)
return output
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = 1e-12
tmp13 = tmp11 + tmp12
tmp14 = libdevice.sqrt(tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, primals_3, reinterpret_tensor(
primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(16)](buf0, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
return buf1, primals_3, buf0
class LinearEmbedderNew(torch.nn.Module):
def __init__(self, in_features, out_features):
super(LinearEmbedderNew, self).__init__()
self.fc = nn.Linear(in_features, out_features)
def l2_norm(self, input):
input_size = input.size()
buffer = torch.pow(input, 2)
normp = torch.sum(buffer, 1).add_(1e-12)
norm = torch.sqrt(normp)
_output = torch.div(input, norm.view(-1, 1).expand_as(input))
output = _output.view(input_size)
return output
def forward(self, input_0):
primals_1 = self.fc.weight
primals_2 = self.fc.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
xiaonanzzz/ProxyAnchorLossSimple
|
LinearEmbedder
| false
| 13,109
|
[
"MIT"
] | 0
|
a501578142fd00bf001c840e8051c67dee873f67
|
https://github.com/xiaonanzzz/ProxyAnchorLossSimple/tree/a501578142fd00bf001c840e8051c67dee873f67
|
ValueNet
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class ValueNet(nn.Module):
def __init__(self, actions):
super(ValueNet, self).__init__()
self.conv1 = nn.Conv2d(4, 32, 8, stride=4, padding=2)
self.conv2 = nn.Conv2d(32, 64, 4, stride=2, padding=1)
self.conv3 = nn.Conv2d(64, 64, 3, stride=1, padding=1)
self.pool = nn.MaxPool2d(2)
self.fc1 = nn.Linear(1600, 512)
self.fc2 = nn.Linear(512, actions)
def forward(self, input_state):
x = F.relu(self.conv1(input_state))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = self.pool(x)
x = x.view(-1, 1600)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 81, 81])]
def get_init_inputs():
return [[], {'actions': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 400 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 100 % 64
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_2(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x3 = xindex // 5
x2 = xindex // 1600
x4 = xindex % 1600
x5 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 20 * x3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 20 * x3), xmask, eviction_policy
='evict_last')
tmp7 = tl.load(in_ptr0 + (10 + 2 * x0 + 20 * x3), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (11 + 2 * x0 + 20 * x3), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x4 + 1664 * x2), tmp15, xmask)
tl.store(out_ptr1 + x5, tmp16, xmask)
@triton.jit
def triton_poi_fused_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (32, 4, 8, 8), (256, 64, 8, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 4, 81, 81), (26244, 6561, 81, 1))
assert_size_stride(primals_4, (64, 32, 4, 4), (512, 16, 4, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (512, 1600), (1600, 1))
assert_size_stride(primals_9, (512,), (1,))
assert_size_stride(primals_10, (4, 512), (512, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(4,
4), padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 20, 20), (12800, 400, 20, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(51200)](buf1, primals_2,
51200, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 10, 10), (6400, 100, 10, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(25600)](buf3, primals_5,
25600, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 64, 10, 10), (6400, 100, 10, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_1[grid(25600)](buf5, primals_7,
25600, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((4, 64, 5, 5), (1664, 25, 5, 1), torch.int8)
buf7 = empty_strided_cuda((4, 64, 5, 5), (1600, 25, 5, 1), torch.
float32)
triton_poi_fused_max_pool2d_with_indices_2[grid(6400)](buf5, buf6,
buf7, 6400, XBLOCK=128, num_warps=4, num_stages=1)
buf8 = empty_strided_cuda((4, 512), (512, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf7, (4, 1600), (1600, 1), 0),
reinterpret_tensor(primals_8, (1600, 512), (1, 1600), 0), out=buf8)
buf9 = buf8
del buf8
triton_poi_fused_relu_3[grid(2048)](buf9, primals_9, 2048, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_9
buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_11, buf9, reinterpret_tensor(
primals_10, (512, 4), (1, 512), 0), alpha=1, beta=1, out=buf10)
del primals_11
return (buf10, primals_1, primals_3, primals_4, primals_6, buf1, buf3,
buf5, buf6, reinterpret_tensor(buf7, (4, 1600), (1600, 1), 0), buf9,
primals_10, primals_8)
class ValueNetNew(nn.Module):
def __init__(self, actions):
super(ValueNetNew, self).__init__()
self.conv1 = nn.Conv2d(4, 32, 8, stride=4, padding=2)
self.conv2 = nn.Conv2d(32, 64, 4, stride=2, padding=1)
self.conv3 = nn.Conv2d(64, 64, 3, stride=1, padding=1)
self.pool = nn.MaxPool2d(2)
self.fc1 = nn.Linear(1600, 512)
self.fc2 = nn.Linear(512, actions)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.fc1.weight
primals_9 = self.fc1.bias
primals_10 = self.fc2.weight
primals_11 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
wondervictor/DeepQLearning
|
ValueNet
| false
| 13,110
|
[
"MIT"
] | 0
|
48d1a5c9e3dff38845366a31830d9114e9eefedc
|
https://github.com/wondervictor/DeepQLearning/tree/48d1a5c9e3dff38845366a31830d9114e9eefedc
|
DecoderBias
|
import torch
import torch.nn as nn
class DecoderBias(nn.Module):
def __init__(self, dim1_batch, latent_dim, bias=False):
super().__init__()
self.dim1_latent_decoder = nn.Parameter(torch.randn(latent_dim,
latent_dim))
self.dim2_latent_decoder = nn.Parameter(torch.randn(latent_dim,
latent_dim))
self.batch_decoder_weight = nn.Parameter(torch.randn(dim1_batch,
latent_dim))
if bias:
self.dim1_bias = nn.Parameter(torch.randn(latent_dim))
self.dim2_bias = nn.Parameter(torch.randn(latent_dim))
else:
self.dim1_bias = 0
self.dim2_bias = 0
def forward(self, x):
self.dim1_decoder_weight = torch.cat([self.dim1_latent_decoder,
self.batch_decoder_weight], dim=0)
self.dim2_decoder_weight = torch.cat([self.dim2_latent_decoder,
self.batch_decoder_weight], dim=0)
dim1_latent = x[0]
dim2_latent = x[1]
batch = x[2]
dim1_input = torch.cat([dim1_latent, batch], dim=1)
dim2_input = torch.cat([dim2_latent, batch], dim=1)
dim1_output = dim1_input @ self.dim1_decoder_weight + self.dim1_bias
dim2_output = dim2_input @ self.dim2_decoder_weight + self.dim2_bias
return dim1_output, dim2_output
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dim1_batch': 4, 'latent_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 4 * (-4 + x1)), tmp6 & xmask, other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tmp11 = tl.load(in_ptr2 + (x0 + 4 * x1), tmp4 & xmask, other=0.0)
tmp12 = tl.where(tmp4, tmp11, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
tl.store(out_ptr1 + x2, tmp12, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr0 + (32 + 4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tmp11 = tl.load(in_ptr0 + (16 + 4 * x1 + x0), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp12 = tl.where(tmp4, tmp11, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
tl.store(out_ptr1 + x2, tmp12, xmask)
@triton.jit
def triton_poi_fused_add_2(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((8, 4), (4, 1), torch.float32)
buf1 = empty_strided_cuda((8, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, primals_3,
buf0, buf1, 32, XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
del primals_3
buf2 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
buf3 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
triton_poi_fused_cat_1[grid(32)](primals_4, buf2, buf3, 32, XBLOCK=
32, num_warps=1, num_stages=1)
del primals_4
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf2, buf0, out=buf4)
buf5 = buf4
del buf4
triton_poi_fused_add_2[grid(16)](buf5, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf3, buf1, out=buf6)
buf7 = buf6
del buf6
triton_poi_fused_add_2[grid(16)](buf7, 16, XBLOCK=16, num_warps=1,
num_stages=1)
return buf5, buf7, buf1, buf0, reinterpret_tensor(buf3, (8, 4), (1, 8), 0
), reinterpret_tensor(buf2, (8, 4), (1, 8), 0)
class DecoderBiasNew(nn.Module):
def __init__(self, dim1_batch, latent_dim, bias=False):
super().__init__()
self.dim1_latent_decoder = nn.Parameter(torch.randn(latent_dim,
latent_dim))
self.dim2_latent_decoder = nn.Parameter(torch.randn(latent_dim,
latent_dim))
self.batch_decoder_weight = nn.Parameter(torch.randn(dim1_batch,
latent_dim))
if bias:
self.dim1_bias = nn.Parameter(torch.randn(latent_dim))
self.dim2_bias = nn.Parameter(torch.randn(latent_dim))
else:
self.dim1_bias = 0
self.dim2_bias = 0
def forward(self, input_0):
primals_1 = self.dim1_latent_decoder
primals_2 = self.dim2_latent_decoder
primals_3 = self.batch_decoder_weight
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0], output[1]
|
xiaoyanLi629/single_cell_data_analysis
|
DecoderBias
| false
| 13,111
|
[
"MIT"
] | 0
|
39d6bbd64249385d2005a775ea1d05e210f41fbe
|
https://github.com/xiaoyanLi629/single_cell_data_analysis/tree/39d6bbd64249385d2005a775ea1d05e210f41fbe
|
ContextualCell
|
from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
def conv_bn_relu(C_in, C_out, kernel_size, stride, padding, affine=True):
return nn.Sequential(nn.Conv2d(C_in, C_out, kernel_size, stride=stride,
padding=padding, bias=False), nn.BatchNorm2d(C_out, affine=affine),
nn.ReLU(inplace=False))
class AggregateCell(nn.Module):
"""
before aggregating, both paths should undergo
conv1x1 with the output channels being equal to the smallest channel size among them
upsampled to the highest resolution among them
pre_transform: whether to do convbnrelu before summing up
"""
def __init__(self, size_1, size_2, agg_size, pre_transform=True):
super(AggregateCell, self).__init__()
self.pre_transform = pre_transform
if self.pre_transform:
self.branch_1 = conv_bn_relu(size_1, agg_size, 1, 1, 0)
self.branch_2 = conv_bn_relu(size_2, agg_size, 1, 1, 0)
def forward(self, x1, x2):
if self.pre_transform:
x1 = self.branch_1(x1)
x2 = self.branch_2(x2)
if x1.size()[2:] > x2.size()[2:]:
x2 = nn.Upsample(size=x1.size()[2:], mode='bilinear')(x2)
elif x1.size()[2:] < x2.size()[2:]:
x1 = nn.Upsample(size=x2.size()[2:], mode='bilinear')(x1)
return x1 + x2
class ContextualCell(nn.Module):
"""New contextual cell design
Config contains [op1, [loc1, loc2, op1, op2], [...], [...]]
"""
def __init__(self, config, inp, repeats=1):
super(ContextualCell, self).__init__()
self._ops = nn.ModuleList()
self._pos = []
self._collect_inds = [0]
self._pools = ['x']
for ind, op in enumerate(config):
if ind == 0:
pos = 0
op_id = op
self._collect_inds.remove(pos)
op_name = OP_NAMES[op_id]
self._ops.append(OPS[op_name](inp, 1, True, repeats))
self._pos.append(pos)
self._collect_inds.append(ind + 1)
self._pools.append('{}({})'.format(op_name, self._pools[pos]))
else:
pos1, pos2, op_id1, op_id2 = op
for ind2, (pos, op_id) in enumerate(zip([pos1, pos2], [
op_id1, op_id2])):
if pos in self._collect_inds:
self._collect_inds.remove(pos)
op_name = OP_NAMES[op_id]
self._ops.append(OPS[op_name](inp, 1, True, repeats))
self._pos.append(pos)
self._pools.append('{}({})'.format(op_name, self._pools
[pos]))
op_name = 'sum'
self._ops.append(AggregateCell(size_1=None, size_2=None,
agg_size=inp, pre_transform=False))
self._pos.append([ind * 3 - 1, ind * 3])
self._collect_inds.append(ind * 3 + 1)
self._pools.append('{}({},{})'.format(op_name, self._pools[
ind * 3 - 1], self._pools[ind * 3]))
def forward(self, x):
feats = [x]
for pos, op in zip(self._pos, self._ops):
if isinstance(pos, list):
assert len(pos) == 2, 'Two ops must be provided'
feats.append(op(feats[pos[0]], feats[pos[1]]))
else:
feats.append(op(feats[pos]))
out = 0
for i in self._collect_inds:
out += feats[i]
return out
def prettify(self):
return ' + '.join(self._pools[i] for i in self._collect_inds)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(), 'inp': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
def conv_bn_relu(C_in, C_out, kernel_size, stride, padding, affine=True):
return nn.Sequential(nn.Conv2d(C_in, C_out, kernel_size, stride=stride,
padding=padding, bias=False), nn.BatchNorm2d(C_out, affine=affine),
nn.ReLU(inplace=False))
class AggregateCell(nn.Module):
"""
before aggregating, both paths should undergo
conv1x1 with the output channels being equal to the smallest channel size among them
upsampled to the highest resolution among them
pre_transform: whether to do convbnrelu before summing up
"""
def __init__(self, size_1, size_2, agg_size, pre_transform=True):
super(AggregateCell, self).__init__()
self.pre_transform = pre_transform
if self.pre_transform:
self.branch_1 = conv_bn_relu(size_1, agg_size, 1, 1, 0)
self.branch_2 = conv_bn_relu(size_2, agg_size, 1, 1, 0)
def forward(self, x1, x2):
if self.pre_transform:
x1 = self.branch_1(x1)
x2 = self.branch_2(x2)
if x1.size()[2:] > x2.size()[2:]:
x2 = nn.Upsample(size=x1.size()[2:], mode='bilinear')(x2)
elif x1.size()[2:] < x2.size()[2:]:
x1 = nn.Upsample(size=x2.size()[2:], mode='bilinear')(x1)
return x1 + x2
class ContextualCellNew(nn.Module):
"""New contextual cell design
Config contains [op1, [loc1, loc2, op1, op2], [...], [...]]
"""
def __init__(self, config, inp, repeats=1):
super(ContextualCellNew, self).__init__()
self._ops = nn.ModuleList()
self._pos = []
self._collect_inds = [0]
self._pools = ['x']
for ind, op in enumerate(config):
if ind == 0:
pos = 0
op_id = op
self._collect_inds.remove(pos)
op_name = OP_NAMES[op_id]
self._ops.append(OPS[op_name](inp, 1, True, repeats))
self._pos.append(pos)
self._collect_inds.append(ind + 1)
self._pools.append('{}({})'.format(op_name, self._pools[pos]))
else:
pos1, pos2, op_id1, op_id2 = op
for ind2, (pos, op_id) in enumerate(zip([pos1, pos2], [
op_id1, op_id2])):
if pos in self._collect_inds:
self._collect_inds.remove(pos)
op_name = OP_NAMES[op_id]
self._ops.append(OPS[op_name](inp, 1, True, repeats))
self._pos.append(pos)
self._pools.append('{}({})'.format(op_name, self._pools
[pos]))
op_name = 'sum'
self._ops.append(AggregateCell(size_1=None, size_2=None,
agg_size=inp, pre_transform=False))
self._pos.append([ind * 3 - 1, ind * 3])
self._collect_inds.append(ind * 3 + 1)
self._pools.append('{}({},{})'.format(op_name, self._pools[
ind * 3 - 1], self._pools[ind * 3]))
def prettify(self):
return ' + '.join(self._pools[i] for i in self._collect_inds)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
xkp793003821/nas-segm-pytorch
|
ContextualCell
| false
| 13,112
|
[
"BSD-2-Clause"
] | 0
|
c4b59ab56bd539bf08493c6d85072849213a3d62
|
https://github.com/xkp793003821/nas-segm-pytorch/tree/c4b59ab56bd539bf08493c6d85072849213a3d62
|
MLP_g
|
from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
class MLP_g(nn.Module):
def __init__(self, opt):
super(MLP_g, self).__init__()
self.SharedFC = nn.Linear(opt.attSize + opt.nz, opt.ngh)
self.fc1 = nn.Linear(opt.ngh, opt.resSize)
self.fc2 = nn.Linear(opt.ngh, opt.resSize)
self.lrelu = nn.LeakyReLU(0.2, True)
self.relu = nn.ReLU(True)
self.apply(weights_init)
def forward(self, noise, atts, attu):
hs = torch.cat((noise, atts), 1)
hu = torch.cat((noise, attu), 1)
hs = self.lrelu(self.SharedFC(hs))
hu = self.lrelu(self.SharedFC(hu))
hs = self.lrelu(self.fc1(hs))
hu = self.lrelu(self.fc2(hu))
return hs, hu
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'opt': _mock_config(attSize=4, nz=4, ngh=4, resSize=4)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tmp11 = tl.load(in_ptr2 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp12 = tl.where(tmp4, tmp5, tmp11)
tl.store(out_ptr0 + x2, tmp10, xmask)
tl.store(out_ptr1 + x2, tmp12, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_out_ptr0, in_out_ptr1, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_out_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp9 = tmp8 + tmp1
tmp10 = tmp9 > tmp3
tmp11 = tmp9 * tmp5
tmp12 = tl.where(tmp10, tmp9, tmp11)
tl.store(in_out_ptr0 + x2, tmp7, xmask)
tl.store(in_out_ptr1 + x2, tmp12, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_leaky_relu_backward_2(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(in_out_ptr0 + x2, tmp7, xmask)
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 8), (8, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
buf1 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, primals_3,
buf0, buf1, 32, XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
del primals_3
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_4, (8, 4), (1, 8
), 0), out=buf2)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (8, 4), (1, 8
), 0), out=buf4)
del primals_4
buf3 = buf2
del buf2
buf5 = buf4
del buf4
triton_poi_fused_leaky_relu_1[grid(16)](buf3, buf5, primals_5, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf3, reinterpret_tensor(primals_6, (4, 4), (1, 4
), 0), out=buf6)
buf7 = buf6
del buf6
buf11 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_leaky_relu_leaky_relu_backward_2[grid(16)](buf7,
primals_7, buf11, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_7
buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf5, reinterpret_tensor(primals_8, (4, 4), (1, 4
), 0), out=buf8)
buf9 = buf8
del buf8
buf10 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_leaky_relu_leaky_relu_backward_2[grid(16)](buf9,
primals_9, buf10, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_9
return (buf7, buf9, buf0, buf1, buf3, buf5, buf10, primals_8, buf11,
primals_6)
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
class MLP_gNew(nn.Module):
def __init__(self, opt):
super(MLP_gNew, self).__init__()
self.SharedFC = nn.Linear(opt.attSize + opt.nz, opt.ngh)
self.fc1 = nn.Linear(opt.ngh, opt.resSize)
self.fc2 = nn.Linear(opt.ngh, opt.resSize)
self.lrelu = nn.LeakyReLU(0.2, True)
self.relu = nn.ReLU(True)
self.apply(weights_init)
def forward(self, input_0, input_1, input_2):
primals_4 = self.SharedFC.weight
primals_5 = self.SharedFC.bias
primals_1 = self.fc1.weight
primals_7 = self.fc1.bias
primals_2 = self.fc2.weight
primals_9 = self.fc2.bias
primals_3 = input_0
primals_6 = input_1
primals_8 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0], output[1]
|
tasfia/BMCoGAN
|
MLP_g
| false
| 13,113
|
[
"MIT"
] | 0
|
0d400c2c71dbfb69af422afc487f65afb98de8af
|
https://github.com/tasfia/BMCoGAN/tree/0d400c2c71dbfb69af422afc487f65afb98de8af
|
EncoderBias
|
import torch
import torch.nn as nn
class EncoderBias(nn.Module):
def __init__(self, input_dim1, input_dim2, batch_feature, latent_dim,
bias=False):
"""[summary]
Args:
input_dim1 ([type]): [mod1 dimemsion]
input_dim2 ([type]): [mod2 dimemsion]
batch_feature ([type]): [batch dimemsion]
latent_dim ([type]): [latent dimemsion]
bias (bool, optional): [description]. Defaults to False.
"""
super().__init__()
self.dim1_encoder_weight = nn.Parameter(torch.randn(input_dim1,
latent_dim))
self.dim2_encoder_weight = nn.Parameter(torch.randn(input_dim2,
latent_dim))
self.batch_encoder_weight = nn.Parameter(torch.randn(batch_feature,
latent_dim))
if bias:
self.dim1_bias = nn.Parameter(torch.randn(latent_dim))
self.dim2_bias = nn.Parameter(torch.randn(latent_dim))
else:
self.dim1_bias = 0
self.dim2_bias = 0
def forward(self, x):
self.dim1_weight = torch.cat([self.dim1_encoder_weight, self.
batch_encoder_weight], dim=0)
self.dim2_weight = torch.cat([self.dim2_encoder_weight, self.
batch_encoder_weight], dim=0)
x1 = x[0]
x2 = x[1]
x3 = x[2]
dim1_input = torch.cat([x1, x3], dim=1)
dim2_input = torch.cat([x2, x3], dim=1)
dim1_encoder = dim1_input @ self.dim1_weight + self.dim1_bias
dim2_encoder = dim2_input @ self.dim2_weight + self.dim2_bias
return dim1_encoder, dim2_encoder
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim1': 4, 'input_dim2': 4, 'batch_feature': 4,
'latent_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 4 * (-4 + x1)), tmp6 & xmask, other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tmp11 = tl.load(in_ptr2 + (x0 + 4 * x1), tmp4 & xmask, other=0.0)
tmp12 = tl.where(tmp4, tmp11, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
tl.store(out_ptr1 + x2, tmp12, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr0 + (32 + 4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tmp11 = tl.load(in_ptr0 + (16 + 4 * x1 + x0), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp12 = tl.where(tmp4, tmp11, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
tl.store(out_ptr1 + x2, tmp12, xmask)
@triton.jit
def triton_poi_fused_add_2(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((8, 4), (4, 1), torch.float32)
buf1 = empty_strided_cuda((8, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, primals_3,
buf0, buf1, 32, XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
del primals_3
buf2 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
buf3 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
triton_poi_fused_cat_1[grid(32)](primals_4, buf2, buf3, 32, XBLOCK=
32, num_warps=1, num_stages=1)
del primals_4
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf2, buf0, out=buf4)
buf5 = buf4
del buf4
triton_poi_fused_add_2[grid(16)](buf5, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf3, buf1, out=buf6)
buf7 = buf6
del buf6
triton_poi_fused_add_2[grid(16)](buf7, 16, XBLOCK=16, num_warps=1,
num_stages=1)
return buf5, buf7, buf1, buf0, reinterpret_tensor(buf3, (8, 4), (1, 8), 0
), reinterpret_tensor(buf2, (8, 4), (1, 8), 0)
class EncoderBiasNew(nn.Module):
def __init__(self, input_dim1, input_dim2, batch_feature, latent_dim,
bias=False):
"""[summary]
Args:
input_dim1 ([type]): [mod1 dimemsion]
input_dim2 ([type]): [mod2 dimemsion]
batch_feature ([type]): [batch dimemsion]
latent_dim ([type]): [latent dimemsion]
bias (bool, optional): [description]. Defaults to False.
"""
super().__init__()
self.dim1_encoder_weight = nn.Parameter(torch.randn(input_dim1,
latent_dim))
self.dim2_encoder_weight = nn.Parameter(torch.randn(input_dim2,
latent_dim))
self.batch_encoder_weight = nn.Parameter(torch.randn(batch_feature,
latent_dim))
if bias:
self.dim1_bias = nn.Parameter(torch.randn(latent_dim))
self.dim2_bias = nn.Parameter(torch.randn(latent_dim))
else:
self.dim1_bias = 0
self.dim2_bias = 0
def forward(self, input_0):
primals_1 = self.dim1_encoder_weight
primals_2 = self.dim2_encoder_weight
primals_3 = self.batch_encoder_weight
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0], output[1]
|
xiaoyanLi629/single_cell_data_analysis
|
EncoderBias
| false
| 13,114
|
[
"MIT"
] | 0
|
39d6bbd64249385d2005a775ea1d05e210f41fbe
|
https://github.com/xiaoyanLi629/single_cell_data_analysis/tree/39d6bbd64249385d2005a775ea1d05e210f41fbe
|
CIFAR10ConvNet
|
import torch
from random import *
import torch.nn.functional as F
import torch.nn as nn
class CIFAR10ConvNet(torch.nn.Module):
def __init__(self, num_conv_layers, num_filters_1, num_filters_2,
num_filters_3, dropout_rate, num_fc_units, kernel_size):
super().__init__()
self.conv1 = nn.Conv2d(3, num_filters_1, kernel_size=kernel_size)
self.conv2 = None
self.conv3 = None
output_size = (32 - kernel_size + 1) // 2
num_output_filters = num_filters_1
if num_conv_layers > 1:
self.conv2 = nn.Conv2d(num_filters_1, num_filters_2,
kernel_size=kernel_size)
num_output_filters = num_filters_2
output_size = (output_size - kernel_size + 1) // 2
if num_conv_layers > 2:
self.conv3 = nn.Conv2d(num_filters_2, num_filters_3,
kernel_size=kernel_size)
num_output_filters = num_filters_3
output_size = (output_size - kernel_size + 1) // 2
self.dropout = nn.Dropout(p=dropout_rate)
self.conv_output_size = num_output_filters * output_size * output_size
self.fc1 = nn.Linear(self.conv_output_size, num_fc_units)
self.fc2 = nn.Linear(num_fc_units, 10)
def forward(self, x):
x = F.max_pool2d(F.relu(self.conv1(x)), 2)
if self.conv2 is not None:
x = F.max_pool2d(F.relu(self.conv2(x)), 2)
if self.conv3 is not None:
x = F.max_pool2d(F.relu(self.conv3(x)), 2)
x = self.dropout(x)
x = x.view(-1, self.conv_output_size)
x = F.relu(self.fc1(x))
x = self.dropout(x)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def number_of_parameters(self):
return sum(p.numel() for p in self.parameters() if p.requires_grad)
def get_inputs():
return [torch.rand([4, 3, 32, 32])]
def get_init_inputs():
return [[], {'num_conv_layers': 1, 'num_filters_1': 4, 'num_filters_2':
4, 'num_filters_3': 4, 'dropout_rate': 0.5, 'num_fc_units': 4,
'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from random import *
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 13456
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 841 % 4
x2 = xindex // 3364
x4 = xindex % 3364
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x4 + 3392 * x2), tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 3136
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 14
x1 = xindex // 14 % 14
x2 = xindex // 196 % 4
x3 = xindex // 784
x4 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 58 * x1 + 841 * x2 + 3392 * x3),
xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 58 * x1 + 841 * x2 + 3392 * x3),
xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (29 + 2 * x0 + 58 * x1 + 841 * x2 + 3392 * x3),
xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (30 + 2 * x0 + 58 * x1 + 841 * x2 + 3392 * x3
), xmask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x4, tmp15, xmask)
tl.store(out_ptr1 + x4, tmp16, xmask)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_per_fused__log_softmax_3(in_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 3, 4, 4), (48, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1))
assert_size_stride(primals_4, (4, 784), (784, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (10, 4), (4, 1))
assert_size_stride(primals_7, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 29, 29), (3364, 841, 29, 1))
buf1 = empty_strided_cuda((4, 4, 29, 29), (3392, 841, 29, 1), torch
.float32)
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(13456)](buf0, primals_2,
buf1, 13456, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_2
buf2 = empty_strided_cuda((4, 4, 14, 14), (784, 196, 14, 1), torch.int8
)
buf3 = empty_strided_cuda((4, 4, 14, 14), (784, 196, 14, 1), torch.
float32)
triton_poi_fused_max_pool2d_with_indices_1[grid(3136)](buf1, buf2,
buf3, 3136, XBLOCK=256, num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (4, 784), (784, 1), 0),
reinterpret_tensor(primals_4, (784, 4), (1, 784), 0), out=buf4)
buf5 = buf4
del buf4
triton_poi_fused_relu_2[grid(16)](buf5, primals_5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_7, buf5, reinterpret_tensor(primals_6,
(4, 10), (1, 4), 0), alpha=1, beta=1, out=buf6)
del primals_7
buf9 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
triton_per_fused__log_softmax_3[grid(4)](buf6, buf9, 4, 10, XBLOCK=
1, num_warps=2, num_stages=1)
del buf6
return buf9, primals_1, primals_3, buf1, buf2, reinterpret_tensor(buf3,
(4, 784), (784, 1), 0), buf5, buf9, primals_6, primals_4
class CIFAR10ConvNetNew(torch.nn.Module):
def __init__(self, num_conv_layers, num_filters_1, num_filters_2,
num_filters_3, dropout_rate, num_fc_units, kernel_size):
super().__init__()
self.conv1 = nn.Conv2d(3, num_filters_1, kernel_size=kernel_size)
self.conv2 = None
self.conv3 = None
output_size = (32 - kernel_size + 1) // 2
num_output_filters = num_filters_1
if num_conv_layers > 1:
self.conv2 = nn.Conv2d(num_filters_1, num_filters_2,
kernel_size=kernel_size)
num_output_filters = num_filters_2
output_size = (output_size - kernel_size + 1) // 2
if num_conv_layers > 2:
self.conv3 = nn.Conv2d(num_filters_2, num_filters_3,
kernel_size=kernel_size)
num_output_filters = num_filters_3
output_size = (output_size - kernel_size + 1) // 2
self.dropout = nn.Dropout(p=dropout_rate)
self.conv_output_size = num_output_filters * output_size * output_size
self.fc1 = nn.Linear(self.conv_output_size, num_fc_units)
self.fc2 = nn.Linear(num_fc_units, 10)
def number_of_parameters(self):
return sum(p.numel() for p in self.parameters() if p.requires_grad)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.fc1.weight
primals_5 = self.fc1.bias
primals_6 = self.fc2.weight
primals_7 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
xinranzhu/GPTune-1
|
CIFAR10ConvNet
| false
| 13,115
|
[
"BSD-3-Clause-LBNL"
] | 0
|
1e502295e790ab68990f657492243fd4fb3dfc0a
|
https://github.com/xinranzhu/GPTune-1/tree/1e502295e790ab68990f657492243fd4fb3dfc0a
|
maxout
|
import torch
import torch.nn as nn
import torch.utils.data
class maxout(nn.Module):
def __init__(self, in_feature, out_feature, pool_size):
super(maxout, self).__init__()
self.in_feature = in_feature
self.out_feature = out_feature
self.pool_size = pool_size
self.linear = nn.Linear(in_feature, out_feature * pool_size)
def forward(self, x):
output = self.linear(x)
output = output.view(-1, self.out_feature, self.pool_size)
output = output.max(2)[0]
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_feature': 4, 'out_feature': 4, 'pool_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_max_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = tmp0 > tmp1
tmp8 = tmp0 == tmp1
tmp9 = tmp0 != tmp0
tmp10 = tmp1 != tmp1
tmp11 = tmp9 > tmp10
tmp12 = tmp7 | tmp11
tmp13 = tmp9 & tmp10
tmp14 = tmp8 | tmp13
tmp15 = tl.full([1], 0, tl.int64)
tmp16 = tl.full([1], 1, tl.int64)
tmp17 = tmp15 < tmp16
tmp18 = tmp14 & tmp17
tmp19 = tmp12 | tmp18
tmp20 = tl.where(tmp19, tmp0, tmp1)
tmp21 = tl.where(tmp19, tmp15, tmp16)
tmp22 = tmp20 > tmp3
tmp23 = tmp20 == tmp3
tmp24 = tmp20 != tmp20
tmp25 = tmp3 != tmp3
tmp26 = tmp24 > tmp25
tmp27 = tmp22 | tmp26
tmp28 = tmp24 & tmp25
tmp29 = tmp23 | tmp28
tmp30 = tl.full([1], 2, tl.int64)
tmp31 = tmp21 < tmp30
tmp32 = tmp29 & tmp31
tmp33 = tmp27 | tmp32
tmp34 = tl.where(tmp33, tmp20, tmp3)
tmp35 = tl.where(tmp33, tmp21, tmp30)
tmp36 = tmp34 > tmp5
tmp37 = tmp34 == tmp5
tmp38 = tmp34 != tmp34
tmp39 = tmp5 != tmp5
tmp40 = tmp38 > tmp39
tmp41 = tmp36 | tmp40
tmp42 = tmp38 & tmp39
tmp43 = tmp37 | tmp42
tmp44 = tl.full([1], 3, tl.int64)
tmp45 = tmp35 < tmp44
tmp46 = tmp43 & tmp45
tmp47 = tmp41 | tmp46
tl.where(tmp47, tmp34, tmp5)
tmp49 = tl.where(tmp47, tmp35, tmp44)
tl.store(out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr1 + x0, tmp49, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (16, 4), (4, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4),
0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.int64)
get_raw_stream(0)
triton_poi_fused_max_0[grid(256)](buf0, buf1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf0
return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf2, (64, 4, 1), (4, 1, 1), 0)
class maxoutNew(nn.Module):
def __init__(self, in_feature, out_feature, pool_size):
super(maxoutNew, self).__init__()
self.in_feature = in_feature
self.out_feature = out_feature
self.pool_size = pool_size
self.linear = nn.Linear(in_feature, out_feature * pool_size)
def forward(self, input_0):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
xuehuiping/Global-Encoding
|
maxout
| false
| 13,116
|
[
"MIT"
] | 0
|
1cba2746162ac569b430aa1ba5bca58183416ee7
|
https://github.com/xuehuiping/Global-Encoding/tree/1cba2746162ac569b430aa1ba5bca58183416ee7
|
Conv2d
|
import math
import torch
import torch.nn.functional
import torch.backends.cudnn
class Conv2d(torch.nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
dilation=1, groups=1, bias=False):
super().__init__(in_channels, out_channels, kernel_size, stride, 0,
dilation, groups, bias)
def forward(self, x):
s = self.stride
d = self.dilation
k = self.weight.shape[-2:]
h, w = x.size()[-2:]
pad_h = max((math.ceil(h / s[0]) - 1) * s[0] + (k[0] - 1) * d[0] +
1 - h, 0)
pad_w = max((math.ceil(w / s[1]) - 1) * s[1] + (k[1] - 1) * d[1] +
1 - w, 0)
if pad_h > 0 or pad_w > 0:
x = torch.nn.functional.pad(x, [pad_w // 2, pad_w - pad_w // 2,
pad_h // 2, pad_h - pad_h // 2], value=0)
return torch.nn.functional.conv2d(x, self.weight, self.bias, self.
stride, (0, 0), self.dilation, self.groups)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn.functional
import torch.backends.cudnn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 7 % 7
x0 = xindex % 7
x2 = xindex // 49
x4 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask,
other=0.0)
tl.store(out_ptr0 + x4, tmp11, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(784)](primals_2, buf0, 784,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
return buf1, primals_1, buf0
class Conv2dNew(torch.nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
dilation=1, groups=1, bias=False):
super().__init__(in_channels, out_channels, kernel_size, stride, 0,
dilation, groups, bias)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
xolbynz/EfficientNetV2-PyTorch-
|
Conv2d
| false
| 13,117
|
[
"Apache-2.0"
] | 0
|
4b5039755adbd0e5f8ee0611e3d6b5be8c13ecd2
|
https://github.com/xolbynz/EfficientNetV2-PyTorch-/tree/4b5039755adbd0e5f8ee0611e3d6b5be8c13ecd2
|
MNISTConvNet
|
import torch
from random import *
import torch.nn.functional as F
import torch.nn as nn
class MNISTConvNet(torch.nn.Module):
def __init__(self, num_conv_layers, num_filters_1, num_filters_2,
num_filters_3, dropout_rate, num_fc_units, kernel_size):
super().__init__()
self.conv1 = nn.Conv2d(1, num_filters_1, kernel_size=kernel_size)
self.conv2 = None
self.conv3 = None
output_size = (28 - kernel_size + 1) // 2
num_output_filters = num_filters_1
if num_conv_layers > 1:
self.conv2 = nn.Conv2d(num_filters_1, num_filters_2,
kernel_size=kernel_size)
num_output_filters = num_filters_2
output_size = (output_size - kernel_size + 1) // 2
if num_conv_layers > 2:
self.conv3 = nn.Conv2d(num_filters_2, num_filters_3,
kernel_size=kernel_size)
num_output_filters = num_filters_3
output_size = (output_size - kernel_size + 1) // 2
self.dropout = nn.Dropout(p=dropout_rate)
self.conv_output_size = num_output_filters * output_size * output_size
self.fc1 = nn.Linear(self.conv_output_size, num_fc_units)
self.fc2 = nn.Linear(num_fc_units, 10)
def forward(self, x):
x = F.max_pool2d(F.relu(self.conv1(x)), 2)
if self.conv2 is not None:
x = F.max_pool2d(F.relu(self.conv2(x)), 2)
if self.conv3 is not None:
x = F.max_pool2d(F.relu(self.conv3(x)), 2)
x = self.dropout(x)
x = x.view(-1, self.conv_output_size)
x = F.relu(self.fc1(x))
x = self.dropout(x)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def number_of_parameters(self):
return sum(p.numel() for p in self.parameters() if p.requires_grad)
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {'num_conv_layers': 1, 'num_filters_1': 4, 'num_filters_2':
4, 'num_filters_3': 4, 'dropout_rate': 0.5, 'num_fc_units': 4,
'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from random import *
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 59536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3721 % 4
x0 = xindex % 3721
x4 = xindex // 3721
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + 3744 * x4), tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 14400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 30
x1 = xindex // 30 % 30
x4 = xindex // 900
x3 = xindex // 3600
x5 = xindex % 3600
tmp0 = tl.load(in_ptr0 + (2 * x0 + 122 * x1 + 3744 * x4), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 122 * x1 + 3744 * x4), xmask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (61 + 2 * x0 + 122 * x1 + 3744 * x4), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (62 + 2 * x0 + 122 * x1 + 3744 * x4), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x5 + 3712 * x3), tmp15, xmask)
tl.store(out_ptr1 + (x5 + 3616 * x3), tmp16, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_view_2(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 14400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (3616 * (x0 // 3600) + x0 % 3600), xmask)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 100
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_per_fused__log_softmax_4(in_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 25
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (4, 576), (576, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (10, 4), (4, 1))
assert_size_stride(primals_7, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 61, 61), (14884, 3721, 61, 1))
buf1 = empty_strided_cuda((4, 4, 61, 61), (14976, 3744, 61, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(59536)](buf0, primals_2,
buf1, 59536, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_2
buf2 = empty_strided_cuda((4, 4, 30, 30), (3712, 900, 30, 1), torch
.int8)
buf3 = empty_strided_cuda((4, 4, 30, 30), (3616, 900, 30, 1), torch
.float32)
triton_poi_fused_max_pool2d_with_indices_1[grid(14400)](buf1, buf2,
buf3, 14400, XBLOCK=256, num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((25, 576), (576, 1), torch.float32)
triton_poi_fused_max_pool2d_with_indices_view_2[grid(14400)](buf3,
buf4, 14400, XBLOCK=256, num_warps=4, num_stages=1)
del buf3
buf5 = empty_strided_cuda((25, 4), (4, 1), torch.float32)
extern_kernels.mm(buf4, reinterpret_tensor(primals_4, (576, 4), (1,
576), 0), out=buf5)
buf6 = buf5
del buf5
triton_poi_fused_relu_3[grid(100)](buf6, primals_5, 100, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_5
buf7 = empty_strided_cuda((25, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_7, buf6, reinterpret_tensor(primals_6,
(4, 10), (1, 4), 0), alpha=1, beta=1, out=buf7)
del primals_7
buf10 = empty_strided_cuda((25, 10), (10, 1), torch.float32)
triton_per_fused__log_softmax_4[grid(25)](buf7, buf10, 25, 10,
XBLOCK=32, num_warps=4, num_stages=1)
del buf7
return (buf10, primals_1, primals_3, buf1, buf2, buf4, buf6, buf10,
primals_6, primals_4)
class MNISTConvNetNew(torch.nn.Module):
def __init__(self, num_conv_layers, num_filters_1, num_filters_2,
num_filters_3, dropout_rate, num_fc_units, kernel_size):
super().__init__()
self.conv1 = nn.Conv2d(1, num_filters_1, kernel_size=kernel_size)
self.conv2 = None
self.conv3 = None
output_size = (28 - kernel_size + 1) // 2
num_output_filters = num_filters_1
if num_conv_layers > 1:
self.conv2 = nn.Conv2d(num_filters_1, num_filters_2,
kernel_size=kernel_size)
num_output_filters = num_filters_2
output_size = (output_size - kernel_size + 1) // 2
if num_conv_layers > 2:
self.conv3 = nn.Conv2d(num_filters_2, num_filters_3,
kernel_size=kernel_size)
num_output_filters = num_filters_3
output_size = (output_size - kernel_size + 1) // 2
self.dropout = nn.Dropout(p=dropout_rate)
self.conv_output_size = num_output_filters * output_size * output_size
self.fc1 = nn.Linear(self.conv_output_size, num_fc_units)
self.fc2 = nn.Linear(num_fc_units, 10)
def number_of_parameters(self):
return sum(p.numel() for p in self.parameters() if p.requires_grad)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.fc1.weight
primals_5 = self.fc1.bias
primals_6 = self.fc2.weight
primals_7 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
xinranzhu/GPTune-1
|
MNISTConvNet
| false
| 13,118
|
[
"BSD-3-Clause-LBNL"
] | 0
|
1e502295e790ab68990f657492243fd4fb3dfc0a
|
https://github.com/xinranzhu/GPTune-1/tree/1e502295e790ab68990f657492243fd4fb3dfc0a
|
ConvBlockD
|
import torch
import torch.nn as nn
class ConvBlockD(nn.Module):
def __init__(self, in_channels, out_channels, groups=3, ker_size=2):
super(ConvBlockD, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.groups = groups
def wn(x):
return torch.nn.utils.weight_norm(x)
self.group_conv = wn(nn.Conv2d(self.in_channels, self.in_channels,
1, groups=self.groups))
self.depth_conv = wn(nn.Conv2d(self.in_channels, self.in_channels,
3, padding=ker_size, dilation=ker_size, groups=in_channels))
self.point_conv = wn(nn.Conv2d(self.in_channels, self.out_channels,
1, groups=1))
def forward(self, x):
x = self.group_conv(x)
x = self.depth_conv(x)
x = self.point_conv(x)
return x
def get_inputs():
return [torch.rand([4, 18, 64, 64])]
def get_init_inputs():
return [[], {'in_channels': 18, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__weight_norm_interface_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 18
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 6 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 6 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 6 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + 6 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (4 + 6 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr0 + (5 + 6 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tl.store(out_ptr0 + x0, tmp17, xmask)
@triton.jit
def triton_poi_fused__weight_norm_interface_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 108
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 6
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 / tmp2
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 18
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_per_fused__weight_norm_interface_3(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 18
rnumel = 9
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 9 * x0), rmask & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(rmask & xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp8 = tmp7 / tmp6
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr0 + (r1 + 9 * x0), tmp9, rmask & xmask)
@triton.jit
def triton_per_fused__weight_norm_interface_4(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 18
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 18 * x0), rmask & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(rmask & xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp8 = tmp7 / tmp6
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr0 + (r1 + 18 * x0), tmp9, rmask & xmask)
@triton.jit
def triton_poi_fused_convolution_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 4
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_2, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_3, (18,), (1,))
assert_size_stride(primals_4, (4, 18, 64, 64), (73728, 4096, 64, 1))
assert_size_stride(primals_5, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_6, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_7, (18,), (1,))
assert_size_stride(primals_8, (4, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_9, (4, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_10, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_2, buf0,
18, XBLOCK=32, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_2,
primals_1, buf0, buf1, 108, XBLOCK=128, num_warps=4, num_stages=1)
buf2 = extern_kernels.convolution(primals_4, buf1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf2, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_2[grid(294912)](buf3, primals_3,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_3
buf4 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32
)
buf5 = reinterpret_tensor(buf4, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf4
buf6 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf5, primals_6,
primals_5, buf6, 18, 9, XBLOCK=32, num_warps=4, num_stages=1)
buf7 = extern_kernels.convolution(buf3, buf6, stride=(1, 1),
padding=(2, 2), dilation=(2, 2), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf7, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf8 = buf7
del buf7
triton_poi_fused_convolution_2[grid(294912)](buf8, primals_7,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_7
buf9 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf10 = reinterpret_tensor(buf9, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf9
buf11 = empty_strided_cuda((4, 18, 1, 1), (18, 1, 1, 1), torch.float32)
triton_per_fused__weight_norm_interface_4[grid(4)](buf10, primals_9,
primals_8, buf11, 4, 18, XBLOCK=1, num_warps=2, num_stages=1)
buf12 = extern_kernels.convolution(buf8, buf11, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf13 = buf12
del buf12
triton_poi_fused_convolution_5[grid(65536)](buf13, primals_10,
65536, XBLOCK=256, num_warps=4, num_stages=1)
del primals_10
return (buf13, buf1, buf6, buf11, primals_1, primals_2, primals_4,
primals_5, primals_6, primals_8, primals_9, buf0, buf1, buf3, buf5,
buf6, buf8, buf10, buf11)
class ConvBlockDNew(nn.Module):
def __init__(self, in_channels, out_channels, groups=3, ker_size=2):
super(ConvBlockDNew, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.groups = groups
def wn(x):
return torch.nn.utils.weight_norm(x)
self.group_conv = wn(nn.Conv2d(self.in_channels, self.in_channels,
1, groups=self.groups))
self.depth_conv = wn(nn.Conv2d(self.in_channels, self.in_channels,
3, padding=ker_size, dilation=ker_size, groups=in_channels))
self.point_conv = wn(nn.Conv2d(self.in_channels, self.out_channels,
1, groups=1))
def forward(self, input_0):
primals_3 = self.group_conv.bias
primals_1 = self.group_conv.weight_g
primals_2 = self.group_conv.weight_v
primals_7 = self.depth_conv.bias
primals_5 = self.depth_conv.weight_g
primals_6 = self.depth_conv.weight_v
primals_10 = self.point_conv.bias
primals_8 = self.point_conv.weight_g
primals_9 = self.point_conv.weight_v
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
|
wwjfsfs/wwjyyds
|
ConvBlockD
| false
| 13,119
|
[
"MIT"
] | 0
|
80cd6267fde7cd98838078a0d5178a557ceb7414
|
https://github.com/wwjfsfs/wwjyyds/tree/80cd6267fde7cd98838078a0d5178a557ceb7414
|
EmbedNet
|
from _paritybench_helpers import _mock_config
import torch
import torch.utils.data
import torch
from torchvision.transforms import functional as F
from torch import nn
import torch.nn.functional as F
class EmbedNet(nn.Module):
def __init__(self, cfg):
super(EmbedNet, self).__init__()
self.embed_conv1 = nn.Conv2d(1024, 512, kernel_size=1, stride=1)
self.embed_conv2 = nn.Conv2d(512, 512, kernel_size=3, stride=1,
padding=1)
self.embed_conv3 = nn.Conv2d(512, 2048, kernel_size=1, stride=1)
for l in [self.embed_conv1, self.embed_conv2, self.embed_conv3]:
nn.init.kaiming_uniform_(l.weight, a=1)
nn.init.zeros_(l.bias)
def forward(self, x):
x = F.relu(self.embed_conv1(x))
x = F.relu(self.embed_conv2(x))
x = self.embed_conv3(x)
return x
def get_inputs():
return [torch.rand([4, 1024, 64, 64])]
def get_init_inputs():
return [[], {'cfg': _mock_config()}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
import torch
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 1024
y1 = yindex // 1024
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None)
tl.store(out_ptr0 + (y0 + 1024 * x2 + 4194304 * y1), tmp0, None)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_3(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y0 = yindex % 2048
y1 = yindex // 2048
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 2048 * x2 + 8388608 * y1), None,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4096 * y3), tmp2, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (512, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_2, (512,), (1,))
assert_size_stride(primals_3, (4, 1024, 64, 64), (4194304, 4096, 64, 1))
assert_size_stride(primals_4, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_5, (512,), (1,))
assert_size_stride(primals_6, (2048, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_7, (2048,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1024, 64, 64), (4194304, 1, 65536,
1024), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(4096, 4096)](primals_3, buf0, 4096, 4096,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_3
buf1 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_1[grid(262144, 9)](primals_4, buf1, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf2 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 512, 64, 64), (2097152, 1, 32768, 512))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_2[grid(8388608)](buf3, primals_2,
8388608, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf4 = extern_kernels.convolution(buf3, buf1, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 512, 64, 64), (2097152, 1, 32768, 512))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(8388608)](buf5, primals_5,
8388608, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 2048, 64, 64), (8388608, 1, 131072, 2048))
buf7 = empty_strided_cuda((4, 2048, 64, 64), (8388608, 4096, 64, 1),
torch.float32)
triton_poi_fused_convolution_3[grid(8192, 4096)](buf6, primals_7,
buf7, 8192, 4096, XBLOCK=64, YBLOCK=64, num_warps=8, num_stages=1)
del buf6
del primals_7
return buf7, primals_1, buf0, buf1, primals_6, buf3, buf5
class EmbedNetNew(nn.Module):
def __init__(self, cfg):
super(EmbedNetNew, self).__init__()
self.embed_conv1 = nn.Conv2d(1024, 512, kernel_size=1, stride=1)
self.embed_conv2 = nn.Conv2d(512, 512, kernel_size=3, stride=1,
padding=1)
self.embed_conv3 = nn.Conv2d(512, 2048, kernel_size=1, stride=1)
for l in [self.embed_conv1, self.embed_conv2, self.embed_conv3]:
nn.init.kaiming_uniform_(l.weight, a=1)
nn.init.zeros_(l.bias)
def forward(self, input_0):
primals_1 = self.embed_conv1.weight
primals_2 = self.embed_conv1.bias
primals_4 = self.embed_conv2.weight
primals_5 = self.embed_conv2.bias
primals_6 = self.embed_conv3.weight
primals_7 = self.embed_conv3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
ron5569/mega.pytorch
|
EmbedNet
| false
| 13,120
|
[
"BSD-2-Clause"
] | 0
|
b845b7050da307576cd98ab73eb7be4e9a9088bc
|
https://github.com/ron5569/mega.pytorch/tree/b845b7050da307576cd98ab73eb7be4e9a9088bc
|
ESA
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class ESA(nn.Module):
def __init__(self, channel=64, reduction=4, bias=True):
super(ESA, self).__init__()
self.r_nc = channel // reduction
self.conv1 = nn.Conv2d(channel, self.r_nc, kernel_size=1)
self.conv21 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=1)
self.conv2 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, stride=
2, padding=0)
self.conv3 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, padding=1)
self.conv4 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, padding=1)
self.conv5 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, padding=1)
self.conv6 = nn.Conv2d(self.r_nc, channel, kernel_size=1)
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x1 = self.conv1(x)
x2 = F.max_pool2d(self.conv2(x1), kernel_size=7, stride=3)
x2 = self.relu(self.conv3(x2))
x2 = self.relu(self.conv4(x2))
x2 = F.interpolate(self.conv5(x2), (x.size(2), x.size(3)), mode=
'bilinear', align_corners=False)
x2 = self.conv6(x2 + self.conv21(x1))
return x.mul(self.sigmoid(x2))
def get_inputs():
return [torch.rand([4, 64, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 16
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 61504
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 961 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 5184
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 81 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused__to_copy_3(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.140625
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tl.store(out_ptr0 + x0, tmp9, xmask)
@triton.jit
def triton_poi_fused_add_clamp_4(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.140625
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tl.full([1], 1, tl.int64)
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 8, tl.int64)
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_5(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.140625
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp8 - tmp10
tmp12 = triton_helpers.maximum(tmp11, tmp7)
tmp13 = 1.0
tmp14 = triton_helpers.minimum(tmp12, tmp13)
tl.store(out_ptr0 + x0, tmp14, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_mul_sub_6(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7,
in_ptr8, in_ptr9, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 64 % 64
x0 = xindex % 64
x5 = xindex // 4096
x2 = xindex // 4096 % 16
x6 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr7 + x1, None, eviction_policy='evict_last')
tmp37 = tl.load(in_ptr8 + x6, None)
tmp38 = tl.load(in_ptr9 + x2, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 9, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 9 * tmp4 + 81 * x5), None,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp13 = tmp12 + tmp1
tmp14 = tmp12 < 0
tmp15 = tl.where(tmp14, tmp13, tmp12)
tmp16 = tl.load(in_ptr2 + (tmp15 + 9 * tmp4 + 81 * x5), None,
eviction_policy='evict_last')
tmp17 = tmp16 + tmp10
tmp18 = tmp17 - tmp11
tmp20 = tmp18 * tmp19
tmp21 = tmp11 + tmp20
tmp23 = tmp22 + tmp1
tmp24 = tmp22 < 0
tmp25 = tl.where(tmp24, tmp23, tmp22)
tmp26 = tl.load(in_ptr2 + (tmp8 + 9 * tmp25 + 81 * x5), None,
eviction_policy='evict_last')
tmp27 = tmp26 + tmp10
tmp28 = tl.load(in_ptr2 + (tmp15 + 9 * tmp25 + 81 * x5), None,
eviction_policy='evict_last')
tmp29 = tmp28 + tmp10
tmp30 = tmp29 - tmp27
tmp31 = tmp30 * tmp19
tmp32 = tmp27 + tmp31
tmp33 = tmp32 - tmp21
tmp35 = tmp33 * tmp34
tmp36 = tmp21 + tmp35
tmp39 = tmp37 + tmp38
tmp40 = tmp36 + tmp39
tl.store(in_out_ptr0 + x6, tmp40, None)
@triton.jit
def triton_poi_fused_convolution_mul_sigmoid_7(in_out_ptr0, in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, None)
tmp2 = tmp0 + tmp1
tmp4 = tl.sigmoid(tmp2)
tmp5 = tmp3 * tmp4
tl.store(in_out_ptr0 + x3, tmp2, None)
tl.store(out_ptr0 + x3, tmp5, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15) = args
args.clear()
assert_size_stride(primals_1, (16, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 64, 64, 64), (262144, 4096, 64, 1))
assert_size_stride(primals_4, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_7, (16,), (1,))
assert_size_stride(primals_8, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_9, (16,), (1,))
assert_size_stride(primals_10, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_11, (16,), (1,))
assert_size_stride(primals_12, (16, 16, 1, 1), (16, 1, 1, 1))
assert_size_stride(primals_13, (16,), (1,))
assert_size_stride(primals_14, (64, 16, 1, 1), (16, 1, 1, 1))
assert_size_stride(primals_15, (64,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(262144)](buf1, primals_2,
262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 16, 31, 31), (15376, 961, 31, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_1[grid(61504)](buf3, primals_5, 61504,
XBLOCK=512, num_warps=4, num_stages=1)
del primals_5
buf4 = torch.ops.aten.max_pool2d_with_indices.default(buf3, [7, 7],
[3, 3])
buf5 = buf4[0]
buf6 = buf4[1]
del buf4
buf7 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 16, 9, 9), (1296, 81, 9, 1))
buf8 = buf7
del buf7
triton_poi_fused_convolution_relu_2[grid(5184)](buf8, primals_7,
5184, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf9 = extern_kernels.convolution(buf8, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 16, 9, 9), (1296, 81, 9, 1))
buf10 = buf9
del buf9
triton_poi_fused_convolution_relu_2[grid(5184)](buf10, primals_9,
5184, XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf11 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 16, 9, 9), (1296, 81, 9, 1))
buf12 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_3[grid(64)](buf12, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf13 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_4[grid(64)](buf13, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf14 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused__to_copy_3[grid(64)](buf14, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf15 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused_add_clamp_4[grid(64)](buf15, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf16 = empty_strided_cuda((64,), (1,), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_5[grid(64)](buf16,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf18 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_5[grid(64)](buf18,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf20 = extern_kernels.convolution(buf1, primals_12, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf19 = empty_strided_cuda((4, 16, 64, 64), (65536, 4096, 64, 1),
torch.float32)
buf21 = buf19
del buf19
triton_poi_fused__unsafe_index_add_convolution_mul_sub_6[grid(262144)](
buf21, buf12, buf14, buf11, primals_11, buf15, buf16, buf13,
buf18, buf20, primals_13, 262144, XBLOCK=512, num_warps=8,
num_stages=1)
del buf11
del buf20
del primals_11
del primals_13
buf22 = extern_kernels.convolution(buf21, primals_14, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf23 = buf22
del buf22
buf24 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1),
torch.float32)
triton_poi_fused_convolution_mul_sigmoid_7[grid(1048576)](buf23,
primals_15, primals_3, buf24, 1048576, XBLOCK=512, num_warps=8,
num_stages=1)
del primals_15
return (buf24, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, primals_12, primals_14, buf1, buf3, buf5, buf6, buf8,
buf10, buf12, buf13, buf14, buf15, buf16, buf18, buf21, buf23)
class ESANew(nn.Module):
def __init__(self, channel=64, reduction=4, bias=True):
super(ESANew, self).__init__()
self.r_nc = channel // reduction
self.conv1 = nn.Conv2d(channel, self.r_nc, kernel_size=1)
self.conv21 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=1)
self.conv2 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, stride=
2, padding=0)
self.conv3 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, padding=1)
self.conv4 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, padding=1)
self.conv5 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, padding=1)
self.conv6 = nn.Conv2d(self.r_nc, channel, kernel_size=1)
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU(inplace=True)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_12 = self.conv21.weight
primals_5 = self.conv21.bias
primals_4 = self.conv2.weight
primals_7 = self.conv2.bias
primals_6 = self.conv3.weight
primals_9 = self.conv3.bias
primals_8 = self.conv4.weight
primals_11 = self.conv4.bias
primals_10 = self.conv5.weight
primals_13 = self.conv5.bias
primals_14 = self.conv6.weight
primals_15 = self.conv6.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15])
return output[0]
|
wwjfsfs/wwjyyds
|
ESA
| false
| 13,121
|
[
"MIT"
] | 0
|
80cd6267fde7cd98838078a0d5178a557ceb7414
|
https://github.com/wwjfsfs/wwjyyds/tree/80cd6267fde7cd98838078a0d5178a557ceb7414
|
PolicyNetwork
|
import torch
import numpy as np
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
class PolicyNetwork(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_size, learning_rate=
0.0003):
super(PolicyNetwork, self).__init__()
self.num_actions = num_actions
self.linear1 = nn.Linear(num_inputs, hidden_size)
self.linear2 = nn.Linear(hidden_size, num_actions)
self.optimizer = optim.Adam(self.parameters(), lr=learning_rate)
def forward(self, state):
x = F.relu(self.linear1(state))
x = F.softmax(self.linear2(x), dim=1)
return x
def get_action(self, state):
state = torch.from_numpy(state).float().unsqueeze(0)
probs = self.forward(Variable(state))
highest_prob_action = np.random.choice(self.num_actions, p=np.
squeeze(probs.detach().numpy()))
log_prob = torch.log(probs.squeeze(0)[highest_prob_action])
return highest_prob_action, log_prob
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_inputs': 4, 'num_actions': 4, 'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import numpy as np
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf2, buf3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused__softmax_2[grid(256)](buf3, buf4, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf3
return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf4, primals_4, buf5
class PolicyNetworkNew(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_size, learning_rate=
0.0003):
super(PolicyNetworkNew, self).__init__()
self.num_actions = num_actions
self.linear1 = nn.Linear(num_inputs, hidden_size)
self.linear2 = nn.Linear(hidden_size, num_actions)
self.optimizer = optim.Adam(self.parameters(), lr=learning_rate)
def get_action(self, state):
state = torch.from_numpy(state).float().unsqueeze(0)
probs = self.forward(Variable(state))
highest_prob_action = np.random.choice(self.num_actions, p=np.
squeeze(probs.detach().numpy()))
log_prob = torch.log(probs.squeeze(0)[highest_prob_action])
return highest_prob_action, log_prob
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
xuzhiyuan1528/tf2basic
|
PolicyNetwork
| false
| 13,122
|
[
"Apache-2.0"
] | 0
|
52ed7d8bcc72f16e198754f5f92a583fe16d544e
|
https://github.com/xuzhiyuan1528/tf2basic/tree/52ed7d8bcc72f16e198754f5f92a583fe16d544e
|
FCUDown
|
import torch
import torch.nn as nn
from functools import partial
class FCUDown(nn.Module):
""" CNN feature maps -> Transformer patch embeddings
"""
def __init__(self, inplanes, outplanes, dw_stride, act_layer=nn.GELU,
norm_layer=partial(nn.LayerNorm, eps=1e-06)):
super(FCUDown, self).__init__()
self.dw_stride = dw_stride
self.conv_project = nn.Conv2d(inplanes, outplanes, kernel_size=1,
stride=1, padding=0)
self.sample_pooling = nn.AvgPool2d(kernel_size=dw_stride, stride=
dw_stride)
self.ln = norm_layer(outplanes)
self.act = act_layer()
def forward(self, x, x_t):
x = self.conv_project(x)
x = self.sample_pooling(x).flatten(2).transpose(1, 2)
x = self.ln(x)
x = self.act(x)
x = torch.cat([x_t[:, 0][:, None, :], x], dim=1)
return x
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'inplanes': 4, 'outplanes': 4, 'dw_stride': 1}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from functools import partial
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_avg_pool2d_convolution_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 16
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp2, xmask)
tl.store(out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-06
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x2, tmp8, xmask)
tl.store(out_ptr1 + x2, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
x1 = xindex // 4 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr2 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 5
x0 = xindex % 4
x2 = xindex // 20
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 5, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x0 + 16 * x2 + (-1 + x1)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = 0.5
tmp11 = tmp9 * tmp10
tmp12 = 0.7071067811865476
tmp13 = tmp9 * tmp12
tmp14 = libdevice.erf(tmp13)
tmp15 = 1.0
tmp16 = tmp14 + tmp15
tmp17 = tmp11 * tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp6, tmp17, tmp18)
tmp20 = tl.where(tmp4, tmp5, tmp19)
tl.store(out_ptr0 + x3, tmp20, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1,
4, 4, 4), (64, 16, 4, 1), 0), primals_1, stride=(1, 1), padding
=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0,
0), groups=1, bias=None)
assert_size_stride(buf0, (1, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_avg_pool2d_convolution_0[grid(64)](buf1, primals_2,
buf2, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(16)](buf2, buf3, buf4, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
triton_poi_fused_native_layer_norm_2[grid(64)](buf2, buf3, buf4,
primals_4, primals_5, buf5, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf3
del buf4
buf6 = empty_strided_cuda((4, 5, 4), (20, 4, 1), torch.float32)
triton_poi_fused_cat_3[grid(80)](primals_6, buf5, buf6, 80, XBLOCK=
128, num_warps=4, num_stages=1)
del buf5
del primals_6
return buf6, primals_1, primals_4, primals_5, reinterpret_tensor(primals_3,
(1, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf1, (4, 4, 4
), (16, 4, 1), 0), buf2
class FCUDownNew(nn.Module):
""" CNN feature maps -> Transformer patch embeddings
"""
def __init__(self, inplanes, outplanes, dw_stride, act_layer=nn.GELU,
norm_layer=partial(nn.LayerNorm, eps=1e-06)):
super(FCUDownNew, self).__init__()
self.dw_stride = dw_stride
self.conv_project = nn.Conv2d(inplanes, outplanes, kernel_size=1,
stride=1, padding=0)
self.sample_pooling = nn.AvgPool2d(kernel_size=dw_stride, stride=
dw_stride)
self.ln = norm_layer(outplanes)
self.act = act_layer()
def forward(self, input_0, input_1):
primals_1 = self.conv_project.weight
primals_2 = self.conv_project.bias
primals_4 = self.ln.weight
primals_5 = self.ln.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
xuewengeophysics/Conformer
|
FCUDown
| false
| 13,123
|
[
"Apache-2.0"
] | 0
|
e769a1ac9ab110dae2a356a4de1e06ccd0e95041
|
https://github.com/xuewengeophysics/Conformer/tree/e769a1ac9ab110dae2a356a4de1e06ccd0e95041
|
ConvBlock
|
import torch
import torch.nn as nn
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, groups=3):
super(ConvBlock, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.groups = groups
def wn(x):
return torch.nn.utils.weight_norm(x)
self.group_conv = wn(nn.Conv2d(self.in_channels, self.in_channels,
1, groups=self.groups))
self.depth_conv = wn(nn.Conv2d(self.in_channels, self.in_channels,
3, padding=1, groups=in_channels))
self.point_conv = wn(nn.Conv2d(self.in_channels, self.out_channels,
1, groups=1))
def forward(self, x):
x = self.group_conv(x)
x = self.depth_conv(x)
x = self.point_conv(x)
return x
def get_inputs():
return [torch.rand([4, 18, 64, 64])]
def get_init_inputs():
return [[], {'in_channels': 18, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__weight_norm_interface_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 18
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 6 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 6 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 6 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + 6 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (4 + 6 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr0 + (5 + 6 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tl.store(out_ptr0 + x0, tmp17, xmask)
@triton.jit
def triton_poi_fused__weight_norm_interface_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 108
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 6
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 / tmp2
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 18
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_per_fused__weight_norm_interface_3(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 18
rnumel = 9
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 9 * x0), rmask & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(rmask & xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp8 = tmp7 / tmp6
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr0 + (r1 + 9 * x0), tmp9, rmask & xmask)
@triton.jit
def triton_per_fused__weight_norm_interface_4(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 18
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 18 * x0), rmask & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(rmask & xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp8 = tmp7 / tmp6
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr0 + (r1 + 18 * x0), tmp9, rmask & xmask)
@triton.jit
def triton_poi_fused_convolution_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 4
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_2, (18, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_3, (18,), (1,))
assert_size_stride(primals_4, (4, 18, 64, 64), (73728, 4096, 64, 1))
assert_size_stride(primals_5, (18, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_6, (18, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_7, (18,), (1,))
assert_size_stride(primals_8, (4, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_9, (4, 18, 1, 1), (18, 1, 1, 1))
assert_size_stride(primals_10, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((18, 1, 1, 1), (1, 1, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__weight_norm_interface_0[grid(18)](primals_2, buf0,
18, XBLOCK=32, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((18, 6, 1, 1), (6, 1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(108)](primals_2,
primals_1, buf0, buf1, 108, XBLOCK=128, num_warps=4, num_stages=1)
buf2 = extern_kernels.convolution(primals_4, buf1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf2, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_2[grid(294912)](buf3, primals_3,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_3
buf4 = empty_strided_cuda((18, 1, 1, 1), (1, 18, 18, 18), torch.float32
)
buf5 = reinterpret_tensor(buf4, (18, 1, 1, 1), (1, 1, 1, 1), 0)
del buf4
buf6 = empty_strided_cuda((18, 1, 3, 3), (9, 9, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_3[grid(18)](buf5, primals_6,
primals_5, buf6, 18, 9, XBLOCK=32, num_warps=4, num_stages=1)
buf7 = extern_kernels.convolution(buf3, buf6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=18, bias=None)
assert_size_stride(buf7, (4, 18, 64, 64), (73728, 4096, 64, 1))
buf8 = buf7
del buf7
triton_poi_fused_convolution_2[grid(294912)](buf8, primals_7,
294912, XBLOCK=512, num_warps=8, num_stages=1)
del primals_7
buf9 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf10 = reinterpret_tensor(buf9, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf9
buf11 = empty_strided_cuda((4, 18, 1, 1), (18, 1, 1, 1), torch.float32)
triton_per_fused__weight_norm_interface_4[grid(4)](buf10, primals_9,
primals_8, buf11, 4, 18, XBLOCK=1, num_warps=2, num_stages=1)
buf12 = extern_kernels.convolution(buf8, buf11, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf13 = buf12
del buf12
triton_poi_fused_convolution_5[grid(65536)](buf13, primals_10,
65536, XBLOCK=256, num_warps=4, num_stages=1)
del primals_10
return (buf13, buf1, buf6, buf11, primals_1, primals_2, primals_4,
primals_5, primals_6, primals_8, primals_9, buf0, buf1, buf3, buf5,
buf6, buf8, buf10, buf11)
class ConvBlockNew(nn.Module):
def __init__(self, in_channels, out_channels, groups=3):
super(ConvBlockNew, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.groups = groups
def wn(x):
return torch.nn.utils.weight_norm(x)
self.group_conv = wn(nn.Conv2d(self.in_channels, self.in_channels,
1, groups=self.groups))
self.depth_conv = wn(nn.Conv2d(self.in_channels, self.in_channels,
3, padding=1, groups=in_channels))
self.point_conv = wn(nn.Conv2d(self.in_channels, self.out_channels,
1, groups=1))
def forward(self, input_0):
primals_3 = self.group_conv.bias
primals_1 = self.group_conv.weight_g
primals_2 = self.group_conv.weight_v
primals_7 = self.depth_conv.bias
primals_5 = self.depth_conv.weight_g
primals_6 = self.depth_conv.weight_v
primals_10 = self.point_conv.bias
primals_8 = self.point_conv.weight_g
primals_9 = self.point_conv.weight_v
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
|
wwjfsfs/wwjyyds
|
ConvBlock
| false
| 13,124
|
[
"MIT"
] | 0
|
80cd6267fde7cd98838078a0d5178a557ceb7414
|
https://github.com/wwjfsfs/wwjyyds/tree/80cd6267fde7cd98838078a0d5178a557ceb7414
|
TVLoss
|
import torch
import torch.utils.data
import torch
import torch.nn as nn
class TVLoss(nn.Module):
def __init__(self, TVLoss_weight=1.0):
super(TVLoss, self).__init__()
self.TVLoss_weight = TVLoss_weight
def forward(self, x):
batch_size = x.size()[0]
h_x = x.size()[2]
w_x = x.size()[3]
count_h = self._tensor_size(x[:, :, 1:, :])
count_w = self._tensor_size(x[:, :, :, 1:])
h_tv = torch.pow(x[:, :, 1:, :] - x[:, :, :h_x - 1, :], 2).sum()
w_tv = torch.pow(x[:, :, :, 1:] - x[:, :, :, :w_x - 1], 2).sum()
return self.TVLoss_weight * 2 * (h_tv / count_h + w_tv / count_w
) / batch_size
def _tensor_size(self, t):
return t.size()[1] * t.size()[2] * t.size()[3]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
import torch
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
rnumel = 192
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r0 = rindex % 12
r1 = rindex // 12
r2 = rindex % 3
r3 = rindex // 3
tmp0 = tl.load(in_ptr0 + (4 + r0 + 16 * r1), rmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (r0 + 16 * r1), rmask, other=0.0)
tmp8 = tl.load(in_ptr0 + (1 + r2 + 4 * r3), rmask, other=0.0)
tmp9 = tl.load(in_ptr0 + (r2 + 4 * r3), rmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp10 = tmp8 - tmp9
tmp11 = tmp10 * tmp10
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.where(rmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tmp16 = 0.020833333333333332
tmp17 = tmp7 * tmp16
tmp18 = tmp15 * tmp16
tmp19 = tmp17 + tmp18
tmp20 = 2.0
tmp21 = tmp19 * tmp20
tmp22 = 0.25
tmp23 = tmp21 * tmp22
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp23, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mul_pow_sub_sum_0[grid(1)](buf2, arg0_1, 1,
192, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf2,
class TVLossNew(nn.Module):
def __init__(self, TVLoss_weight=1.0):
super(TVLossNew, self).__init__()
self.TVLoss_weight = TVLoss_weight
def _tensor_size(self, t):
return t.size()[1] * t.size()[2] * t.size()[3]
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
xyp8023/pytorch-CycleGAN-and-pix2pix
|
TVLoss
| false
| 13,125
|
[
"BSD-3-Clause"
] | 0
|
dce720d985a951a3cfed470ef4c2ef206c0e0817
|
https://github.com/xyp8023/pytorch-CycleGAN-and-pix2pix/tree/dce720d985a951a3cfed470ef4c2ef206c0e0817
|
C1
|
import torch
import torch.nn as nn
from collections import OrderedDict
class C1(nn.Module):
def __init__(self):
super(C1, self).__init__()
self.c1 = nn.Sequential(OrderedDict([('c1', nn.Conv2d(1, 6,
kernel_size=(5, 5))), ('relu1', nn.ReLU()), ('s1', nn.MaxPool2d
(kernel_size=(2, 2), stride=2))]))
def forward(self, img):
output = self.c1(img)
return output
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from collections import OrderedDict
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 86400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3600 % 6
x0 = xindex % 3600
x4 = xindex // 3600
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + 3616 * x4), tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 21600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 30
x1 = xindex // 30 % 30
x2 = xindex // 900
x5 = xindex
x4 = xindex // 5400
x6 = xindex % 5400
tmp0 = tl.load(in_ptr0 + (2 * x0 + 120 * x1 + 3616 * x2), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 120 * x1 + 3616 * x2), xmask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (60 + 2 * x0 + 120 * x1 + 3616 * x2), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (61 + 2 * x0 + 120 * x1 + 3616 * x2), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x5, tmp6, xmask)
tl.store(out_ptr1 + (x6 + 5504 * x4), tmp16, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (6, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_2, (6,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 6, 60, 60), (21600, 3600, 60, 1))
buf1 = empty_strided_cuda((4, 6, 60, 60), (21696, 3616, 60, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(86400)](buf0, primals_2,
buf1, 86400, XBLOCK=1024, num_warps=4, num_stages=1)
del buf0
del primals_2
buf2 = empty_strided_cuda((4, 6, 30, 30), (5400, 900, 30, 1), torch
.float32)
buf3 = empty_strided_cuda((4, 6, 30, 30), (5504, 900, 30, 1), torch
.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(21600)](buf1, buf2,
buf3, 21600, XBLOCK=128, num_warps=4, num_stages=1)
return buf2, primals_1, primals_3, buf1, buf3
class C1New(nn.Module):
def __init__(self):
super(C1New, self).__init__()
self.c1 = nn.Sequential(OrderedDict([('c1', nn.Conv2d(1, 6,
kernel_size=(5, 5))), ('relu1', nn.ReLU()), ('s1', nn.MaxPool2d
(kernel_size=(2, 2), stride=2))]))
def forward(self, input_0):
primals_1 = self.c1.c1.weight
primals_2 = self.c1.c1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
xxchenxx/otdd
|
C1
| false
| 13,126
|
[
"MIT"
] | 0
|
e63d1d170fed36957052b7bb0a0af1553b980381
|
https://github.com/xxchenxx/otdd/tree/e63d1d170fed36957052b7bb0a0af1553b980381
|
BoF_Pooling
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class BoF_Pooling(nn.Module):
def __init__(self, n_codewords, features, spatial_level=0, **kwargs):
super(BoF_Pooling, self).__init__()
"""
Initializes a BoF Pooling layer
:param n_codewords: the number of the codewords to be used
:param spatial_level: 0 -> no spatial pooling, 1 -> spatial pooling at level 1 (4 regions). Note that the
codebook is shared between the different spatial regions
:param kwargs:
"""
self.N_k = n_codewords
self.spatial_level = spatial_level
self.V, self.sigmas = None, None
self.relu = nn.ReLU()
self.init(features)
self.softmax = nn.Softmax(dim=1)
def init(self, features):
self.V = nn.Parameter(nn.init.uniform_(torch.empty((self.N_k,
features, 1, 1), requires_grad=True)))
self.sigmas = nn.Parameter(nn.init.constant_(torch.empty((1, self.
N_k, 1, 1), requires_grad=True), 0.1))
def forward(self, input):
x_square = torch.sum(input=input, dim=1, keepdim=True)
y_square = torch.sum(self.V ** 2, dim=1, keepdim=True).permute([3,
0, 1, 2])
dists = x_square + y_square - 2 * F.conv2d(input, self.V)
dists = self.relu(dists)
quantized_features = self.softmax(-dists / self.sigmas ** 2)
if self.spatial_level == 0:
histogram = torch.mean(quantized_features, dim=[2, 3])
elif self.spatial_level == 1:
shape = quantized_features.shape
mid_1 = shape[2] / 2
mid_1 = int(mid_1)
mid_2 = shape[3] / 2
mid_2 = int(mid_2)
histogram1 = torch.mean(quantized_features[:, :, :mid_1, :mid_2
], [2, 3])
histogram2 = torch.mean(quantized_features[:, :, mid_1:, :mid_2
], [2, 3])
histogram3 = torch.mean(quantized_features[:, :, :mid_1, mid_2:
], [2, 3])
histogram4 = torch.mean(quantized_features[:, :, mid_1:, mid_2:
], [2, 3])
histogram = torch.stack([histogram1, histogram2, histogram3,
histogram4], 1)
histogram = torch.reshape(histogram, (-1, 4 * self.N_k))
else:
assert False
return histogram * self.N_k
def compute_output_shape(self, input_shape):
if self.spatial_level == 0:
return input_shape[0], self.N_k
elif self.spatial_level == 1:
return input_shape[0], 4 * self.N_k
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_codewords': 4, 'features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_sub_sum_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex // 64
x1 = xindex // 16 % 4
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr2 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp7 * tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp13 = tmp12 * tmp12
tmp14 = tmp11 + tmp13
tmp16 = tmp15 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = tmp6 + tmp17
tmp20 = 2.0
tmp21 = tmp19 * tmp20
tmp22 = tmp18 - tmp21
tl.store(out_ptr0 + x3, tmp22, xmask)
@triton.jit
def triton_poi_fused__softmax_div_neg_pow_relu_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp8 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp11 = tl.load(in_ptr1 + 1)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp16 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp19 = tl.load(in_ptr1 + 2)
tmp20 = tl.broadcast_to(tmp19, [XBLOCK])
tmp24 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp27 = tl.load(in_ptr1 + 3)
tmp28 = tl.broadcast_to(tmp27, [XBLOCK])
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = -tmp2
tmp6 = tmp5 * tmp5
tmp7 = tmp3 / tmp6
tmp9 = triton_helpers.maximum(tmp1, tmp8)
tmp10 = -tmp9
tmp13 = tmp12 * tmp12
tmp14 = tmp10 / tmp13
tmp15 = triton_helpers.maximum(tmp7, tmp14)
tmp17 = triton_helpers.maximum(tmp1, tmp16)
tmp18 = -tmp17
tmp21 = tmp20 * tmp20
tmp22 = tmp18 / tmp21
tmp23 = triton_helpers.maximum(tmp15, tmp22)
tmp25 = triton_helpers.maximum(tmp1, tmp24)
tmp26 = -tmp25
tmp29 = tmp28 * tmp28
tmp30 = tmp26 / tmp29
tmp31 = triton_helpers.maximum(tmp23, tmp30)
tmp32 = tmp7 - tmp31
tmp33 = tl_math.exp(tmp32)
tmp34 = tmp14 - tmp31
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp33 + tmp35
tmp37 = tmp22 - tmp31
tmp38 = tl_math.exp(tmp37)
tmp39 = tmp36 + tmp38
tmp40 = tmp30 - tmp31
tmp41 = tl_math.exp(tmp40)
tmp42 = tmp39 + tmp41
tl.store(out_ptr0 + x2, tmp31, xmask)
tl.store(out_ptr1 + x2, tmp42, xmask)
@triton.jit
def triton_per_fused__softmax_div_mean_mul_neg_pow_relu_2(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + (r2 + 16 * x3), xmask, other=0.0)
tmp4 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr2 + (r2 + 16 * x1), xmask, eviction_policy=
'evict_last', other=0.0)
tmp10 = tl.load(in_ptr3 + (r2 + 16 * x1), xmask, eviction_policy=
'evict_last', other=0.0)
tmp1 = tl.full([1, 1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = -tmp2
tmp5 = tmp4 * tmp4
tmp6 = tmp3 / tmp5
tmp8 = tmp6 - tmp7
tmp9 = tl_math.exp(tmp8)
tmp11 = tmp9 / tmp10
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.where(xmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tmp16 = 16.0
tmp17 = tmp15 / tmp16
tmp18 = 4.0
tmp19 = tmp17 * tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + x3, tmp19, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_sub_sum_0[grid(256)](primals_1, primals_2,
buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
triton_poi_fused__softmax_div_neg_pow_relu_1[grid(64)](buf1,
primals_3, buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf5 = buf4
del buf4
triton_per_fused__softmax_div_mean_mul_neg_pow_relu_2[grid(16)](buf5,
buf1, primals_3, buf2, buf3, 16, 16, XBLOCK=1, num_warps=2,
num_stages=1)
del buf1
del buf2
del buf3
return buf5, primals_1, primals_2, primals_3, buf0
class BoF_PoolingNew(nn.Module):
def __init__(self, n_codewords, features, spatial_level=0, **kwargs):
super(BoF_PoolingNew, self).__init__()
"""
Initializes a BoF Pooling layer
:param n_codewords: the number of the codewords to be used
:param spatial_level: 0 -> no spatial pooling, 1 -> spatial pooling at level 1 (4 regions). Note that the
codebook is shared between the different spatial regions
:param kwargs:
"""
self.N_k = n_codewords
self.spatial_level = spatial_level
self.V, self.sigmas = None, None
self.relu = nn.ReLU()
self.init(features)
self.softmax = nn.Softmax(dim=1)
def init(self, features):
self.V = nn.Parameter(nn.init.uniform_(torch.empty((self.N_k,
features, 1, 1), requires_grad=True)))
self.sigmas = nn.Parameter(nn.init.constant_(torch.empty((1, self.
N_k, 1, 1), requires_grad=True), 0.1))
def compute_output_shape(self, input_shape):
if self.spatial_level == 0:
return input_shape[0], self.N_k
elif self.spatial_level == 1:
return input_shape[0], 4 * self.N_k
def forward(self, input_0):
primals_2 = self.V
primals_3 = self.sigmas
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
xujli/cbof_torch
|
BoF_Pooling
| false
| 13,127
|
[
"MIT"
] | 0
|
ed8d67dd7a41b6345305d970d0f8fa0892f8ccee
|
https://github.com/xujli/cbof_torch/tree/ed8d67dd7a41b6345305d970d0f8fa0892f8ccee
|
C2
|
import torch
import torch.nn as nn
from collections import OrderedDict
class C2(nn.Module):
def __init__(self):
super(C2, self).__init__()
self.c2 = nn.Sequential(OrderedDict([('c2', nn.Conv2d(6, 16,
kernel_size=(5, 5))), ('relu2', nn.ReLU()), ('s2', nn.MaxPool2d
(kernel_size=(2, 2), stride=2))]))
def forward(self, img):
output = self.c2(img)
return output
def get_inputs():
return [torch.rand([4, 6, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from collections import OrderedDict
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 230400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3600 % 16
x0 = xindex % 3600
x4 = xindex // 3600
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + 3616 * x4), tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 57600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 30
x1 = xindex // 30 % 30
x2 = xindex // 900
x5 = xindex
x4 = xindex // 14400
x6 = xindex % 14400
tmp0 = tl.load(in_ptr0 + (2 * x0 + 120 * x1 + 3616 * x2), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 120 * x1 + 3616 * x2), xmask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (60 + 2 * x0 + 120 * x1 + 3616 * x2), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (61 + 2 * x0 + 120 * x1 + 3616 * x2), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x5, tmp6, xmask)
tl.store(out_ptr1 + (x6 + 14464 * x4), tmp16, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (16, 6, 5, 5), (150, 25, 5, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 6, 64, 64), (24576, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 60, 60), (57600, 3600, 60, 1))
buf1 = empty_strided_cuda((4, 16, 60, 60), (57856, 3616, 60, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(230400)](buf0, primals_2,
buf1, 230400, XBLOCK=1024, num_warps=4, num_stages=1)
del buf0
del primals_2
buf2 = empty_strided_cuda((4, 16, 30, 30), (14400, 900, 30, 1),
torch.float32)
buf3 = empty_strided_cuda((4, 16, 30, 30), (14464, 900, 30, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(57600)](buf1, buf2,
buf3, 57600, XBLOCK=512, num_warps=4, num_stages=1)
return buf2, primals_1, primals_3, buf1, buf3
class C2New(nn.Module):
def __init__(self):
super(C2New, self).__init__()
self.c2 = nn.Sequential(OrderedDict([('c2', nn.Conv2d(6, 16,
kernel_size=(5, 5))), ('relu2', nn.ReLU()), ('s2', nn.MaxPool2d
(kernel_size=(2, 2), stride=2))]))
def forward(self, input_0):
primals_1 = self.c2.c2.weight
primals_2 = self.c2.c2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
xxchenxx/otdd
|
C2
| false
| 13,128
|
[
"MIT"
] | 0
|
e63d1d170fed36957052b7bb0a0af1553b980381
|
https://github.com/xxchenxx/otdd/tree/e63d1d170fed36957052b7bb0a0af1553b980381
|
FirstBlock
|
import torch
import numpy as np
import torch.nn as nn
class BatchNormLayer(nn.Module):
"""Implements batch normalization layer."""
def __init__(self, channels, gamma=False, beta=True, decay=0.9, epsilon
=1e-05):
"""Initializes with basic settings.
Args:
channels: Number of channels of the input tensor.
gamma: Whether the scale (weight) of the affine mapping is learnable.
beta: Whether the center (bias) of the affine mapping is learnable.
decay: Decay factor for moving average operations in this layer.
epsilon: A value added to the denominator for numerical stability.
"""
super().__init__()
self.bn = nn.BatchNorm2d(num_features=channels, affine=True,
track_running_stats=True, momentum=1 - decay, eps=epsilon)
self.bn.weight.requires_grad = gamma
self.bn.bias.requires_grad = beta
def forward(self, x):
return self.bn(x)
class FirstBlock(nn.Module):
"""Implements the first block, which is a convolutional block."""
def __init__(self, in_channels, out_channels, use_wscale=False,
wscale_gain=np.sqrt(2.0), use_bn=False, activation_type='lrelu'):
super().__init__()
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.scale = wscale_gain / np.sqrt(in_channels * 3 * 3
) if use_wscale else 1.0
self.bn = BatchNormLayer(channels=out_channels
) if use_bn else nn.Identity()
if activation_type == 'linear':
self.activate = nn.Identity()
elif activation_type == 'lrelu':
self.activate = nn.LeakyReLU(negative_slope=0.2, inplace=True)
else:
raise NotImplementedError(
f'Not implemented activation function: {activation_type}!')
def forward(self, x):
return self.activate(self.bn(self.conv(x) * self.scale))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_leaky_relu_leaky_relu_backward_mul_0(in_out_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(in_out_ptr0 + x0, tmp7, xmask)
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_leaky_relu_leaky_relu_backward_mul_0[grid(256)](buf1,
buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
return buf1, primals_1, primals_2, buf2
class BatchNormLayer(nn.Module):
"""Implements batch normalization layer."""
def __init__(self, channels, gamma=False, beta=True, decay=0.9, epsilon
=1e-05):
"""Initializes with basic settings.
Args:
channels: Number of channels of the input tensor.
gamma: Whether the scale (weight) of the affine mapping is learnable.
beta: Whether the center (bias) of the affine mapping is learnable.
decay: Decay factor for moving average operations in this layer.
epsilon: A value added to the denominator for numerical stability.
"""
super().__init__()
self.bn = nn.BatchNorm2d(num_features=channels, affine=True,
track_running_stats=True, momentum=1 - decay, eps=epsilon)
self.bn.weight.requires_grad = gamma
self.bn.bias.requires_grad = beta
def forward(self, x):
return self.bn(x)
class FirstBlockNew(nn.Module):
"""Implements the first block, which is a convolutional block."""
def __init__(self, in_channels, out_channels, use_wscale=False,
wscale_gain=np.sqrt(2.0), use_bn=False, activation_type='lrelu'):
super().__init__()
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.scale = wscale_gain / np.sqrt(in_channels * 3 * 3
) if use_wscale else 1.0
self.bn = BatchNormLayer(channels=out_channels
) if use_bn else nn.Identity()
if activation_type == 'linear':
self.activate = nn.Identity()
elif activation_type == 'lrelu':
self.activate = nn.LeakyReLU(negative_slope=0.2, inplace=True)
else:
raise NotImplementedError(
f'Not implemented activation function: {activation_type}!')
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
thunguyenphuoc/idinvert_pytorch
|
FirstBlock
| false
| 13,129
|
[
"MIT"
] | 0
|
bf8a81e75d193c22a05d9c4457907dc468389766
|
https://github.com/thunguyenphuoc/idinvert_pytorch/tree/bf8a81e75d193c22a05d9c4457907dc468389766
|
ConvCompress
|
import torch
from torch import nn
class ConvCompress(nn.Module):
def __init__(self, dim, ratio=4):
super().__init__()
self.conv = nn.Conv1d(dim, dim, ratio, stride=ratio)
def forward(self, mem):
mem = mem.transpose(1, 2)
compressed_mem = self.conv(mem)
return compressed_mem.transpose(1, 2)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(4,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 1), (4, 1, 1))
del buf0
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(16)](buf2, primals_3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
return reinterpret_tensor(buf2, (4, 1, 4), (4, 1, 1), 0
), primals_2, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0)
class ConvCompressNew(nn.Module):
def __init__(self, dim, ratio=4):
super().__init__()
self.conv = nn.Conv1d(dim, dim, ratio, stride=ratio)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_3 = self.conv.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
yhgon/cmtf
|
ConvCompress
| false
| 13,130
|
[
"MIT"
] | 0
|
7a3ffc3a59a7c546a00d3b73be58f7d1c2f1f0cf
|
https://github.com/yhgon/cmtf/tree/7a3ffc3a59a7c546a00d3b73be58f7d1c2f1f0cf
|
_FakeMegatronMLP
|
from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
import torch.nn.functional as F
class _FakeMegatronMLP(nn.Module):
"""
A fake mlp without model parallelism for correctness testing
"""
def __init__(self, args, _):
super().__init__()
self.fc1 = nn.Linear(args.hidden_size, args.hidden_hidden_size)
self.fc2 = nn.Linear(args.hidden_hidden_size, args.hidden_size)
def forward(self, x):
"""
Directly use GeLU
"""
x = self.fc1(x)
x = F.gelu(x)
x = self.fc2(x)
return x, torch.zeros_like(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'args': _mock_config(hidden_size=4, hidden_hidden_size=4),
'_': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_gelu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_zeros_like_1(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + x0, tmp0, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_gelu_0[grid(256)](buf0, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_zeros_like_1[grid(256)](buf3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4
class _FakeMegatronMLPNew(nn.Module):
"""
A fake mlp without model parallelism for correctness testing
"""
def __init__(self, args, _):
super().__init__()
self.fc1 = nn.Linear(args.hidden_size, args.hidden_hidden_size)
self.fc2 = nn.Linear(args.hidden_hidden_size, args.hidden_size)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1]
|
xxchenxx/fastmoe
|
_FakeMegatronMLP
| false
| 13,131
|
[
"Apache-2.0"
] | 0
|
f60dd0e1f9f0447e56ff265c9ede304b88d0556b
|
https://github.com/xxchenxx/fastmoe/tree/f60dd0e1f9f0447e56ff265c9ede304b88d0556b
|
C3
|
import torch
import torch.nn as nn
from collections import OrderedDict
class C3(nn.Module):
def __init__(self):
super(C3, self).__init__()
self.c3 = nn.Sequential(OrderedDict([('c3', nn.Conv2d(16, 120,
kernel_size=(5, 5))), ('relu3', nn.ReLU())]))
def forward(self, img):
output = self.c3(img)
return output
def get_inputs():
return [torch.rand([4, 16, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from collections import OrderedDict
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 1920
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 16
y1 = yindex // 16
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 16 * x2 + 400 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 16
y1 = yindex // 16
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 16 * x2 + 65536 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_2(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.
constexpr):
ynumel = 480
xnumel = 3600
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 120
y1 = yindex // 120
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 120 * x2 + 432000 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2 + 3600 * y3), tmp4, xmask & ymask)
tl.store(out_ptr1 + (y0 + 120 * x2 + 432000 * y1), tmp6, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (120, 16, 5, 5), (400, 25, 5, 1))
assert_size_stride(primals_2, (120,), (1,))
assert_size_stride(primals_3, (4, 16, 64, 64), (65536, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((120, 16, 5, 5), (400, 1, 80, 16), torch.
float32)
get_raw_stream(0)
triton_poi_fused_0[grid(1920, 25)](primals_1, buf0, 1920, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 16, 64, 64), (65536, 1, 1024, 16),
torch.float32)
triton_poi_fused_1[grid(64, 4096)](primals_3, buf1, 64, 4096,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_3
buf2 = extern_kernels.convolution(buf1, buf0, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 120, 60, 60), (432000, 1, 7200, 120))
buf3 = empty_strided_cuda((4, 120, 60, 60), (432000, 3600, 60, 1),
torch.float32)
buf4 = empty_strided_cuda((4, 120, 60, 60), (432000, 1, 7200, 120),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_2[grid(480, 3600)
](buf2, primals_2, buf3, buf4, 480, 3600, XBLOCK=32, YBLOCK=32,
num_warps=4, num_stages=1)
del buf2
del primals_2
return buf3, buf0, buf1, buf4
class C3New(nn.Module):
def __init__(self):
super(C3New, self).__init__()
self.c3 = nn.Sequential(OrderedDict([('c3', nn.Conv2d(16, 120,
kernel_size=(5, 5))), ('relu3', nn.ReLU())]))
def forward(self, input_0):
primals_1 = self.c3.c3.weight
primals_2 = self.c3.c3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
xxchenxx/otdd
|
C3
| false
| 13,132
|
[
"MIT"
] | 0
|
e63d1d170fed36957052b7bb0a0af1553b980381
|
https://github.com/xxchenxx/otdd/tree/e63d1d170fed36957052b7bb0a0af1553b980381
|
LastBlock
|
import torch
import numpy as np
import torch.nn as nn
class BatchNormLayer(nn.Module):
"""Implements batch normalization layer."""
def __init__(self, channels, gamma=False, beta=True, decay=0.9, epsilon
=1e-05):
"""Initializes with basic settings.
Args:
channels: Number of channels of the input tensor.
gamma: Whether the scale (weight) of the affine mapping is learnable.
beta: Whether the center (bias) of the affine mapping is learnable.
decay: Decay factor for moving average operations in this layer.
epsilon: A value added to the denominator for numerical stability.
"""
super().__init__()
self.bn = nn.BatchNorm2d(num_features=channels, affine=True,
track_running_stats=True, momentum=1 - decay, eps=epsilon)
self.bn.weight.requires_grad = gamma
self.bn.bias.requires_grad = beta
def forward(self, x):
return self.bn(x)
class LastBlock(nn.Module):
"""Implements the last block, which is a dense block."""
def __init__(self, in_channels, out_channels, use_wscale=False,
wscale_gain=1.0, use_bn=False):
super().__init__()
self.fc = nn.Linear(in_features=in_channels, out_features=
out_channels, bias=False)
self.scale = wscale_gain / np.sqrt(in_channels) if use_wscale else 1.0
self.bn = BatchNormLayer(channels=out_channels
) if use_bn else nn.Identity()
def forward(self, x):
x = x.view(x.shape[0], -1)
x = self.fc(x) * self.scale
x = x.view(x.shape[0], x.shape[1], 1, 1)
return self.bn(x).view(x.shape[0], x.shape[1])
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4),
(1, 4), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](buf1, 16, XBLOCK=16, num_warps=1,
num_stages=1)
return buf1, primals_1
class BatchNormLayer(nn.Module):
"""Implements batch normalization layer."""
def __init__(self, channels, gamma=False, beta=True, decay=0.9, epsilon
=1e-05):
"""Initializes with basic settings.
Args:
channels: Number of channels of the input tensor.
gamma: Whether the scale (weight) of the affine mapping is learnable.
beta: Whether the center (bias) of the affine mapping is learnable.
decay: Decay factor for moving average operations in this layer.
epsilon: A value added to the denominator for numerical stability.
"""
super().__init__()
self.bn = nn.BatchNorm2d(num_features=channels, affine=True,
track_running_stats=True, momentum=1 - decay, eps=epsilon)
self.bn.weight.requires_grad = gamma
self.bn.bias.requires_grad = beta
def forward(self, x):
return self.bn(x)
class LastBlockNew(nn.Module):
"""Implements the last block, which is a dense block."""
def __init__(self, in_channels, out_channels, use_wscale=False,
wscale_gain=1.0, use_bn=False):
super().__init__()
self.fc = nn.Linear(in_features=in_channels, out_features=
out_channels, bias=False)
self.scale = wscale_gain / np.sqrt(in_channels) if use_wscale else 1.0
self.bn = BatchNormLayer(channels=out_channels
) if use_bn else nn.Identity()
def forward(self, input_0):
primals_1 = self.fc.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
thunguyenphuoc/idinvert_pytorch
|
LastBlock
| false
| 13,133
|
[
"MIT"
] | 0
|
bf8a81e75d193c22a05d9c4457907dc468389766
|
https://github.com/thunguyenphuoc/idinvert_pytorch/tree/bf8a81e75d193c22a05d9c4457907dc468389766
|
MinMaxNorm
|
import torch
import torch.nn as nn
class MinMaxNorm(nn.Module):
def __init__(self, min, max, a=0, b=1):
super(MinMaxNorm, self).__init__()
self.min, self.max = min, max
self.a, self.b = a, b
def forward(self, x):
return self.a + (x - self.min) * (self.b - self.a) / (self.max -
self.min)
def inverse(self, x):
return self.min + (x - self.a) * (self.max - self.min) / (self.b -
self.a)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'min': 4, 'max': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mul_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 4.0
tmp2 = tmp0 - tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tmp5 = float('inf')
tmp6 = tmp4 * tmp5
tmp7 = 0.0
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mul_sub_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class MinMaxNormNew(nn.Module):
def __init__(self, min, max, a=0, b=1):
super(MinMaxNormNew, self).__init__()
self.min, self.max = min, max
self.a, self.b = a, b
def inverse(self, x):
return self.min + (x - self.a) * (self.max - self.min) / (self.b -
self.a)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
yhgon/speedyspeech
|
MinMaxNorm
| false
| 13,134
|
[
"BSD-3-Clause"
] | 0
|
574c6a94091431f313e2aae8e154b8c80e6908ce
|
https://github.com/yhgon/speedyspeech/tree/574c6a94091431f313e2aae8e154b8c80e6908ce
|
DisConvModule
|
import torch
import torch.nn as nn
from torch.nn.utils import spectral_norm as spectral_norm_fn
from torch.nn.utils import weight_norm as weight_norm_fn
def dis_conv(input_dim, output_dim, kernel_size=5, stride=2, padding=0,
rate=1, activation='lrelu'):
return Conv2dBlock(input_dim, output_dim, kernel_size, stride,
conv_padding=padding, dilation=rate, activation=activation)
class Conv2dBlock(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, stride, padding=
0, conv_padding=0, dilation=1, weight_norm='none', norm='none',
activation='relu', pad_type='zero', transpose=False):
super(Conv2dBlock, self).__init__()
self.use_bias = True
if pad_type == 'reflect':
self.pad = nn.ReflectionPad2d(padding)
elif pad_type == 'replicate':
self.pad = nn.ReplicationPad2d(padding)
elif pad_type == 'zero':
self.pad = nn.ZeroPad2d(padding)
elif pad_type == 'none':
self.pad = None
else:
assert 0, 'Unsupported padding type: {}'.format(pad_type)
norm_dim = output_dim
if norm == 'bn':
self.norm = nn.BatchNorm2d(norm_dim)
elif norm == 'in':
self.norm = nn.InstanceNorm2d(norm_dim)
elif norm == 'none':
self.norm = None
else:
assert 0, 'Unsupported normalization: {}'.format(norm)
if weight_norm == 'sn':
self.weight_norm = spectral_norm_fn
elif weight_norm == 'wn':
self.weight_norm = weight_norm_fn
elif weight_norm == 'none':
self.weight_norm = None
else:
assert 0, 'Unsupported normalization: {}'.format(weight_norm)
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'elu':
self.activation = nn.ELU(inplace=True)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.2, inplace=True)
elif activation == 'prelu':
self.activation = nn.PReLU()
elif activation == 'selu':
self.activation = nn.SELU(inplace=True)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'none':
self.activation = None
else:
assert 0, 'Unsupported activation: {}'.format(activation)
if transpose:
self.conv = nn.ConvTranspose2d(input_dim, output_dim,
kernel_size, stride, padding=conv_padding, output_padding=
conv_padding, dilation=dilation, bias=self.use_bias)
else:
self.conv = nn.Conv2d(input_dim, output_dim, kernel_size,
stride, padding=conv_padding, dilation=dilation, bias=self.
use_bias)
if self.weight_norm:
self.conv = self.weight_norm(self.conv)
def forward(self, x):
if self.pad:
x = self.conv(self.pad(x))
else:
x = self.conv(x)
if self.norm:
x = self.norm(x)
if self.activation:
x = self.activation(x)
return x
class DisConvModule(nn.Module):
def __init__(self, input_dim, cnum, use_cuda=True, device_ids=None):
super(DisConvModule, self).__init__()
self.use_cuda = use_cuda
self.device_ids = device_ids
self.conv1 = dis_conv(input_dim, cnum, 5, 2, 2)
self.conv2 = dis_conv(cnum, cnum * 2, 5, 2, 2)
self.conv3 = dis_conv(cnum * 2, cnum * 4, 5, 2, 2)
self.conv4 = dis_conv(cnum * 4, cnum * 4, 5, 2, 2)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'cnum': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from torch.nn.utils import spectral_norm as spectral_norm_fn
from torch.nn.utils import weight_norm as weight_norm_fn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + x3, tmp7, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_3(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(in_out_ptr0 + x2, tmp7, xmask)
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 5, 5), (100, 25, 5, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (8, 4, 5, 5), (100, 25, 5, 1))
assert_size_stride(primals_5, (8,), (1,))
assert_size_stride(primals_6, (16, 8, 5, 5), (200, 25, 5, 1))
assert_size_stride(primals_7, (16,), (1,))
assert_size_stride(primals_8, (16, 16, 5, 5), (400, 25, 5, 1))
assert_size_stride(primals_9, (16,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(2,
2), padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 2, 2), (16, 4, 2, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0[grid(64)](buf1, primals_3,
64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 8, 1, 1), (8, 1, 1, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_leaky_relu_1[grid(32)](buf3, primals_5,
32, XBLOCK=32, num_warps=1, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 1, 1), (16, 1, 1, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_leaky_relu_2[grid(64)](buf5, primals_7,
64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_7
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 16, 1, 1), (16, 1, 1, 1))
buf7 = buf6
del buf6
buf8 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 1, 1), torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_3[grid(64)
](buf7, primals_9, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_9
return (buf7, primals_1, primals_2, primals_4, primals_6, primals_8,
buf1, buf3, buf5, buf8)
def dis_conv(input_dim, output_dim, kernel_size=5, stride=2, padding=0,
rate=1, activation='lrelu'):
return Conv2dBlock(input_dim, output_dim, kernel_size, stride,
conv_padding=padding, dilation=rate, activation=activation)
class Conv2dBlock(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, stride, padding=
0, conv_padding=0, dilation=1, weight_norm='none', norm='none',
activation='relu', pad_type='zero', transpose=False):
super(Conv2dBlock, self).__init__()
self.use_bias = True
if pad_type == 'reflect':
self.pad = nn.ReflectionPad2d(padding)
elif pad_type == 'replicate':
self.pad = nn.ReplicationPad2d(padding)
elif pad_type == 'zero':
self.pad = nn.ZeroPad2d(padding)
elif pad_type == 'none':
self.pad = None
else:
assert 0, 'Unsupported padding type: {}'.format(pad_type)
norm_dim = output_dim
if norm == 'bn':
self.norm = nn.BatchNorm2d(norm_dim)
elif norm == 'in':
self.norm = nn.InstanceNorm2d(norm_dim)
elif norm == 'none':
self.norm = None
else:
assert 0, 'Unsupported normalization: {}'.format(norm)
if weight_norm == 'sn':
self.weight_norm = spectral_norm_fn
elif weight_norm == 'wn':
self.weight_norm = weight_norm_fn
elif weight_norm == 'none':
self.weight_norm = None
else:
assert 0, 'Unsupported normalization: {}'.format(weight_norm)
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'elu':
self.activation = nn.ELU(inplace=True)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.2, inplace=True)
elif activation == 'prelu':
self.activation = nn.PReLU()
elif activation == 'selu':
self.activation = nn.SELU(inplace=True)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'none':
self.activation = None
else:
assert 0, 'Unsupported activation: {}'.format(activation)
if transpose:
self.conv = nn.ConvTranspose2d(input_dim, output_dim,
kernel_size, stride, padding=conv_padding, output_padding=
conv_padding, dilation=dilation, bias=self.use_bias)
else:
self.conv = nn.Conv2d(input_dim, output_dim, kernel_size,
stride, padding=conv_padding, dilation=dilation, bias=self.
use_bias)
if self.weight_norm:
self.conv = self.weight_norm(self.conv)
def forward(self, x):
if self.pad:
x = self.conv(self.pad(x))
else:
x = self.conv(x)
if self.norm:
x = self.norm(x)
if self.activation:
x = self.activation(x)
return x
class DisConvModuleNew(nn.Module):
def __init__(self, input_dim, cnum, use_cuda=True, device_ids=None):
super(DisConvModuleNew, self).__init__()
self.use_cuda = use_cuda
self.device_ids = device_ids
self.conv1 = dis_conv(input_dim, cnum, 5, 2, 2)
self.conv2 = dis_conv(cnum, cnum * 2, 5, 2, 2)
self.conv3 = dis_conv(cnum * 2, cnum * 4, 5, 2, 2)
self.conv4 = dis_conv(cnum * 4, cnum * 4, 5, 2, 2)
def forward(self, input_0):
primals_2 = self.conv1.conv.weight
primals_3 = self.conv1.conv.bias
primals_4 = self.conv2.conv.weight
primals_5 = self.conv2.conv.bias
primals_6 = self.conv3.conv.weight
primals_7 = self.conv3.conv.bias
primals_8 = self.conv4.conv.weight
primals_9 = self.conv4.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
xy-gao/generative-inpainting-pytorch
|
DisConvModule
| false
| 13,135
|
[
"MIT"
] | 0
|
24f2183a11fd48a0383c9862e3d1a6354fbb6cda
|
https://github.com/xy-gao/generative-inpainting-pytorch/tree/24f2183a11fd48a0383c9862e3d1a6354fbb6cda
|
CriticVanilla
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class MLPBase(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(MLPBase, self).__init__()
self.l1 = nn.Linear(num_inputs, 400)
self.l2 = nn.Linear(400, 300)
self.l3 = nn.Linear(300, num_outputs)
def forward(self, inputs):
x = F.relu(self.l1(inputs))
x = F.relu(self.l2(x))
x = self.l3(x)
return x
class CriticVanilla(nn.Module):
"""a vanilla critic module that outputs a node's q-values given only its observation and action(no message between nodes)"""
def __init__(self, state_dim, action_dim):
super(CriticVanilla, self).__init__()
self.baseQ1 = MLPBase(state_dim + action_dim, 1)
self.baseQ2 = MLPBase(state_dim + action_dim, 1)
def forward(self, x, u):
xu = torch.cat([x, u], -1)
x1 = self.baseQ1(xu)
x2 = self.baseQ2(xu)
return x1, x2
def Q1(self, x, u):
xu = torch.cat([x, u], -1)
x1 = self.baseQ1(xu)
return x1
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4, 'action_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 400
x2 = xindex % 1600
x3 = xindex // 1600
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x2 + 1664 * x3), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 300
x2 = xindex // 1200
x3 = xindex % 1200
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3 + 1216 * x2), tmp4, xmask)
tl.store(out_ptr1 + (x3 + 1280 * x2), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_view_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 300
x1 = xindex // 300
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 300 * (x1 % 4) + 1216 * (x1 // 4)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (400, 8), (8, 1))
assert_size_stride(primals_4, (400,), (1,))
assert_size_stride(primals_5, (300, 400), (400, 1))
assert_size_stride(primals_6, (300,), (1,))
assert_size_stride(primals_7, (1, 300), (300, 1))
assert_size_stride(primals_8, (1,), (1,))
assert_size_stride(primals_9, (400, 8), (8, 1))
assert_size_stride(primals_10, (400,), (1,))
assert_size_stride(primals_11, (300, 400), (400, 1))
assert_size_stride(primals_12, (300,), (1,))
assert_size_stride(primals_13, (1, 300), (300, 1))
assert_size_stride(primals_14, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0),
reinterpret_tensor(primals_3, (8, 400), (1, 8), 0), out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 400), (6400, 1600, 400, 1), 0
)
del buf1
buf18 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(25600)](buf2,
primals_4, buf18, 25600, XBLOCK=256, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((64, 300), (300, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (64, 400), (400, 1), 0),
reinterpret_tensor(primals_5, (400, 300), (1, 400), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 300), (4864, 1216, 300, 1),
torch.float32)
buf17 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(19200)](buf3,
primals_6, buf4, buf17, 19200, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_6
buf5 = buf3
del buf3
triton_poi_fused_relu_view_3[grid(19200)](buf4, buf5, 19200, XBLOCK
=256, num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_8, buf5, reinterpret_tensor(primals_7,
(300, 1), (1, 300), 0), alpha=1, beta=1, out=buf7)
del primals_8
buf8 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0),
reinterpret_tensor(primals_9, (8, 400), (1, 8), 0), out=buf8)
del primals_9
buf9 = reinterpret_tensor(buf8, (4, 4, 4, 400), (6400, 1600, 400, 1), 0
)
del buf8
buf16 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(25600)](buf9,
primals_10, buf16, 25600, XBLOCK=256, num_warps=4, num_stages=1)
del primals_10
buf10 = empty_strided_cuda((64, 300), (300, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf9, (64, 400), (400, 1), 0),
reinterpret_tensor(primals_11, (400, 300), (1, 400), 0), out=buf10)
buf11 = buf4
del buf4
buf15 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(19200)](buf10,
primals_12, buf11, buf15, 19200, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_12
buf12 = buf10
del buf10
triton_poi_fused_relu_view_3[grid(19200)](buf11, buf12, 19200,
XBLOCK=256, num_warps=4, num_stages=1)
del buf11
buf14 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_14, buf12, reinterpret_tensor(
primals_13, (300, 1), (1, 300), 0), alpha=1, beta=1, out=buf14)
del primals_14
return (reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0),
reinterpret_tensor(buf14, (4, 4, 4, 1), (16, 4, 1, 1), 0),
reinterpret_tensor(buf0, (64, 8), (8, 1), 0), reinterpret_tensor(
buf2, (64, 400), (400, 1), 0), buf5, reinterpret_tensor(buf9, (64,
400), (400, 1), 0), buf12, primals_13, buf15, primals_11, buf16,
primals_7, buf17, primals_5, buf18)
class MLPBase(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(MLPBase, self).__init__()
self.l1 = nn.Linear(num_inputs, 400)
self.l2 = nn.Linear(400, 300)
self.l3 = nn.Linear(300, num_outputs)
def forward(self, inputs):
x = F.relu(self.l1(inputs))
x = F.relu(self.l2(x))
x = self.l3(x)
return x
class CriticVanillaNew(nn.Module):
"""a vanilla critic module that outputs a node's q-values given only its observation and action(no message between nodes)"""
def __init__(self, state_dim, action_dim):
super(CriticVanillaNew, self).__init__()
self.baseQ1 = MLPBase(state_dim + action_dim, 1)
self.baseQ2 = MLPBase(state_dim + action_dim, 1)
def Q1(self, x, u):
xu = torch.cat([x, u], -1)
x1 = self.baseQ1(xu)
return x1
def forward(self, input_0, input_1):
primals_3 = self.baseQ1.l1.weight
primals_4 = self.baseQ1.l1.bias
primals_5 = self.baseQ1.l2.weight
primals_6 = self.baseQ1.l2.bias
primals_7 = self.baseQ1.l3.weight
primals_8 = self.baseQ1.l3.bias
primals_9 = self.baseQ2.l1.weight
primals_10 = self.baseQ2.l1.bias
primals_11 = self.baseQ2.l2.weight
primals_12 = self.baseQ2.l2.bias
primals_13 = self.baseQ2.l3.weight
primals_14 = self.baseQ2.l3.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14])
return output[0], output[1]
|
yangfanthu/modular-rl
|
CriticVanilla
| false
| 13,136
|
[
"BSD-2-Clause"
] | 0
|
25c599bab641a7e732dbaf116cd240fa2358f113
|
https://github.com/yangfanthu/modular-rl/tree/25c599bab641a7e732dbaf116cd240fa2358f113
|
CFRB
|
import torch
import torch.nn as nn
from collections import OrderedDict
import torch.nn.functional as F
def sequential(*args):
"""Advanced nn.Sequential.
Args:
nn.Sequential, nn.Module
Returns:
nn.Sequential
"""
if len(args) == 1:
if isinstance(args[0], OrderedDict):
raise NotImplementedError(
'sequential does not support OrderedDict input.')
return args[0]
modules = []
for module in args:
if isinstance(module, nn.Sequential):
for submodule in module.children():
modules.append(submodule)
elif isinstance(module, nn.Module):
modules.append(module)
return nn.Sequential(*modules)
def conv(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=
1, bias=True, mode='CBR', negative_slope=0.2):
L = []
for t in mode:
if t == 'C':
L.append(nn.Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=kernel_size, stride=stride,
padding=padding, bias=bias))
elif t == 'T':
L.append(nn.ConvTranspose2d(in_channels=in_channels,
out_channels=out_channels, kernel_size=kernel_size, stride=
stride, padding=padding, bias=bias))
elif t == 'B':
L.append(nn.BatchNorm2d(out_channels, momentum=0.9, eps=0.0001,
affine=True))
elif t == 'I':
L.append(nn.InstanceNorm2d(out_channels, affine=True))
elif t == 'R':
L.append(nn.ReLU(inplace=True))
elif t == 'r':
L.append(nn.ReLU(inplace=False))
elif t == 'L':
L.append(nn.LeakyReLU(negative_slope=negative_slope, inplace=True))
elif t == 'l':
L.append(nn.LeakyReLU(negative_slope=negative_slope, inplace=False)
)
elif t == '2':
L.append(nn.PixelShuffle(upscale_factor=2))
elif t == '3':
L.append(nn.PixelShuffle(upscale_factor=3))
elif t == '4':
L.append(nn.PixelShuffle(upscale_factor=4))
elif t == 'U':
L.append(nn.Upsample(scale_factor=2, mode='nearest'))
elif t == 'u':
L.append(nn.Upsample(scale_factor=3, mode='nearest'))
elif t == 'v':
L.append(nn.Upsample(scale_factor=4, mode='nearest'))
elif t == 'M':
L.append(nn.MaxPool2d(kernel_size=kernel_size, stride=stride,
padding=0))
elif t == 'A':
L.append(nn.AvgPool2d(kernel_size=kernel_size, stride=stride,
padding=0))
else:
raise NotImplementedError('Undefined type: ')
return sequential(*L)
class ESA(nn.Module):
def __init__(self, channel=64, reduction=4, bias=True):
super(ESA, self).__init__()
self.r_nc = channel // reduction
self.conv1 = nn.Conv2d(channel, self.r_nc, kernel_size=1)
self.conv21 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=1)
self.conv2 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, stride=
2, padding=0)
self.conv3 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, padding=1)
self.conv4 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, padding=1)
self.conv5 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, padding=1)
self.conv6 = nn.Conv2d(self.r_nc, channel, kernel_size=1)
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x1 = self.conv1(x)
x2 = F.max_pool2d(self.conv2(x1), kernel_size=7, stride=3)
x2 = self.relu(self.conv3(x2))
x2 = self.relu(self.conv4(x2))
x2 = F.interpolate(self.conv5(x2), (x.size(2), x.size(3)), mode=
'bilinear', align_corners=False)
x2 = self.conv6(x2 + self.conv21(x1))
return x.mul(self.sigmoid(x2))
class CFRB(nn.Module):
def __init__(self, in_channels=50, out_channels=50, kernel_size=3,
stride=1, padding=1, bias=True, mode='CL', d_rate=0.5,
negative_slope=0.05):
super(CFRB, self).__init__()
self.d_nc = int(in_channels * d_rate)
self.r_nc = in_channels
assert mode[0] == 'C', 'convolutional layer first'
self.conv1_d = conv(in_channels, self.d_nc, kernel_size=1, stride=1,
padding=0, bias=bias, mode=mode[0])
self.conv1_r = conv(in_channels, self.r_nc, kernel_size, stride,
padding, bias=bias, mode=mode[0])
self.conv2_d = conv(self.r_nc, self.d_nc, kernel_size=1, stride=1,
padding=0, bias=bias, mode=mode[0])
self.conv2_r = conv(self.r_nc, self.r_nc, kernel_size, stride,
padding, bias=bias, mode=mode[0])
self.conv3_d = conv(self.r_nc, self.d_nc, kernel_size=1, stride=1,
padding=0, bias=bias, mode=mode[0])
self.conv3_r = conv(self.r_nc, self.r_nc, kernel_size, stride,
padding, bias=bias, mode=mode[0])
self.conv4_d = conv(self.r_nc, self.d_nc, kernel_size, stride,
padding, bias=bias, mode=mode[0])
self.conv1x1 = conv(self.d_nc * 4, out_channels, kernel_size=1,
stride=1, padding=0, bias=bias, mode=mode[0])
self.act = conv(mode=mode[-1], negative_slope=negative_slope)
self.esa = ESA(in_channels, reduction=4, bias=True)
def forward(self, x):
d1 = self.conv1_d(x)
x = self.act(self.conv1_r(x) + x)
d2 = self.conv2_d(x)
x = self.act(self.conv2_r(x) + x)
d3 = self.conv3_d(x)
x = self.act(self.conv3_r(x) + x)
x = self.conv4_d(x)
x = self.act(torch.cat([d1, d2, d3, x], dim=1))
x = self.esa(self.conv1x1(x))
return x
def get_inputs():
return [torch.rand([4, 50, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from collections import OrderedDict
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 50
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = tmp4 > tmp5
tmp7 = 0.05
tmp8 = tmp4 * tmp7
tmp9 = tl.where(tmp6, tmp4, tmp8)
tl.store(in_out_ptr0 + x3, tmp9, None)
@triton.jit
def triton_poi_fused_cat_leaky_relu_1(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 100
x0 = xindex % 4096
x2 = xindex // 409600
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 25, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 102400 * x2), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 50, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tmp10 & tmp12
tmp14 = tl.load(in_ptr2 + (x0 + 4096 * (-25 + x1) + 102400 * x2), tmp13,
other=0.0)
tmp15 = tl.load(in_ptr3 + (-25 + x1), tmp13, eviction_policy=
'evict_last', other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp13, tmp16, tmp17)
tmp19 = tmp0 >= tmp11
tmp20 = tl.full([1], 75, tl.int64)
tmp21 = tmp0 < tmp20
tmp22 = tmp19 & tmp21
tmp23 = tl.load(in_ptr4 + (x0 + 4096 * (-50 + x1) + 102400 * x2), tmp22,
other=0.0)
tmp24 = tl.load(in_ptr5 + (-50 + x1), tmp22, eviction_policy=
'evict_last', other=0.0)
tmp25 = tmp23 + tmp24
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp22, tmp25, tmp26)
tmp28 = tmp0 >= tmp20
tl.full([1], 100, tl.int64)
tmp31 = tl.load(in_ptr6 + (x0 + 4096 * (-75 + x1) + 102400 * x2), tmp28,
other=0.0)
tmp32 = tl.load(in_ptr7 + (-75 + x1), tmp28, eviction_policy=
'evict_last', other=0.0)
tmp33 = tmp31 + tmp32
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp28, tmp33, tmp34)
tmp36 = tl.where(tmp22, tmp27, tmp35)
tmp37 = tl.where(tmp13, tmp18, tmp36)
tmp38 = tl.where(tmp4, tmp9, tmp37)
tmp39 = 0.0
tmp40 = tmp38 > tmp39
tmp41 = 0.05
tmp42 = tmp38 * tmp41
tmp43 = tl.where(tmp40, tmp38, tmp42)
tl.store(in_out_ptr0 + x3, tmp43, None)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 50
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 12
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 46128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 961 % 12
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 3888
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 81 % 12
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused__to_copy_6(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.140625
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tl.store(out_ptr0 + x0, tmp9, xmask)
@triton.jit
def triton_poi_fused_add_clamp_7(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.140625
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tl.full([1], 1, tl.int64)
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 8, tl.int64)
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_8(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.140625
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp8 - tmp10
tmp12 = triton_helpers.maximum(tmp11, tmp7)
tmp13 = 1.0
tmp14 = triton_helpers.minimum(tmp12, tmp13)
tl.store(out_ptr0 + x0, tmp14, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_mul_sub_9(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7,
in_ptr8, in_ptr9, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 64 % 64
x0 = xindex % 64
x5 = xindex // 4096
x2 = xindex // 4096 % 12
x6 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr7 + x1, None, eviction_policy='evict_last')
tmp37 = tl.load(in_ptr8 + x6, None)
tmp38 = tl.load(in_ptr9 + x2, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 9, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 9 * tmp4 + 81 * x5), None,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp13 = tmp12 + tmp1
tmp14 = tmp12 < 0
tmp15 = tl.where(tmp14, tmp13, tmp12)
tmp16 = tl.load(in_ptr2 + (tmp15 + 9 * tmp4 + 81 * x5), None,
eviction_policy='evict_last')
tmp17 = tmp16 + tmp10
tmp18 = tmp17 - tmp11
tmp20 = tmp18 * tmp19
tmp21 = tmp11 + tmp20
tmp23 = tmp22 + tmp1
tmp24 = tmp22 < 0
tmp25 = tl.where(tmp24, tmp23, tmp22)
tmp26 = tl.load(in_ptr2 + (tmp8 + 9 * tmp25 + 81 * x5), None,
eviction_policy='evict_last')
tmp27 = tmp26 + tmp10
tmp28 = tl.load(in_ptr2 + (tmp15 + 9 * tmp25 + 81 * x5), None,
eviction_policy='evict_last')
tmp29 = tmp28 + tmp10
tmp30 = tmp29 - tmp27
tmp31 = tmp30 * tmp19
tmp32 = tmp27 + tmp31
tmp33 = tmp32 - tmp21
tmp35 = tmp33 * tmp34
tmp36 = tmp21 + tmp35
tmp39 = tmp37 + tmp38
tmp40 = tmp36 + tmp39
tl.store(in_out_ptr0 + x6, tmp40, None)
@triton.jit
def triton_poi_fused_convolution_mul_sigmoid_10(in_out_ptr0, in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 50
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, None)
tmp2 = tmp0 + tmp1
tmp4 = tl.sigmoid(tmp2)
tmp5 = tmp3 * tmp4
tl.store(in_out_ptr0 + x3, tmp2, None)
tl.store(out_ptr0 + x3, tmp5, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31) = args
args.clear()
assert_size_stride(primals_1, (25, 50, 1, 1), (50, 1, 1, 1))
assert_size_stride(primals_2, (25,), (1,))
assert_size_stride(primals_3, (4, 50, 64, 64), (204800, 4096, 64, 1))
assert_size_stride(primals_4, (50, 50, 3, 3), (450, 9, 3, 1))
assert_size_stride(primals_5, (50,), (1,))
assert_size_stride(primals_6, (25, 50, 1, 1), (50, 1, 1, 1))
assert_size_stride(primals_7, (25,), (1,))
assert_size_stride(primals_8, (50, 50, 3, 3), (450, 9, 3, 1))
assert_size_stride(primals_9, (50,), (1,))
assert_size_stride(primals_10, (25, 50, 1, 1), (50, 1, 1, 1))
assert_size_stride(primals_11, (25,), (1,))
assert_size_stride(primals_12, (50, 50, 3, 3), (450, 9, 3, 1))
assert_size_stride(primals_13, (50,), (1,))
assert_size_stride(primals_14, (25, 50, 3, 3), (450, 9, 3, 1))
assert_size_stride(primals_15, (25,), (1,))
assert_size_stride(primals_16, (50, 100, 1, 1), (100, 1, 1, 1))
assert_size_stride(primals_17, (50,), (1,))
assert_size_stride(primals_18, (12, 50, 1, 1), (50, 1, 1, 1))
assert_size_stride(primals_19, (12,), (1,))
assert_size_stride(primals_20, (12, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_21, (12,), (1,))
assert_size_stride(primals_22, (12, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_23, (12,), (1,))
assert_size_stride(primals_24, (12, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_25, (12,), (1,))
assert_size_stride(primals_26, (12, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_27, (12,), (1,))
assert_size_stride(primals_28, (12, 12, 1, 1), (12, 1, 1, 1))
assert_size_stride(primals_29, (12,), (1,))
assert_size_stride(primals_30, (50, 12, 1, 1), (12, 1, 1, 1))
assert_size_stride(primals_31, (50,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 25, 64, 64), (102400, 4096, 64, 1))
buf1 = extern_kernels.convolution(primals_3, primals_4, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 50, 64, 64), (204800, 4096, 64, 1))
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_add_convolution_leaky_relu_0[grid(819200)](buf2,
primals_5, primals_3, 819200, XBLOCK=1024, num_warps=4,
num_stages=1)
del primals_5
buf3 = extern_kernels.convolution(buf2, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 25, 64, 64), (102400, 4096, 64, 1))
buf4 = extern_kernels.convolution(buf2, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 50, 64, 64), (204800, 4096, 64, 1))
buf5 = buf4
del buf4
triton_poi_fused_add_convolution_leaky_relu_0[grid(819200)](buf5,
primals_9, buf2, 819200, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_9
buf6 = extern_kernels.convolution(buf5, primals_10, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 25, 64, 64), (102400, 4096, 64, 1))
buf7 = extern_kernels.convolution(buf5, primals_12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 50, 64, 64), (204800, 4096, 64, 1))
buf8 = buf7
del buf7
triton_poi_fused_add_convolution_leaky_relu_0[grid(819200)](buf8,
primals_13, buf5, 819200, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_13
buf9 = extern_kernels.convolution(buf8, primals_14, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 25, 64, 64), (102400, 4096, 64, 1))
buf10 = empty_strided_cuda((4, 100, 64, 64), (409600, 4096, 64, 1),
torch.float32)
buf11 = buf10
del buf10
triton_poi_fused_cat_leaky_relu_1[grid(1638400)](buf11, buf0,
primals_2, buf3, primals_7, buf6, primals_11, buf9, primals_15,
1638400, XBLOCK=512, num_warps=8, num_stages=1)
del buf0
del buf3
del buf6
del buf9
del primals_11
del primals_15
del primals_2
del primals_7
buf12 = extern_kernels.convolution(buf11, primals_16, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 50, 64, 64), (204800, 4096, 64, 1))
buf13 = buf12
del buf12
triton_poi_fused_convolution_2[grid(819200)](buf13, primals_17,
819200, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_17
buf14 = extern_kernels.convolution(buf13, primals_18, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 12, 64, 64), (49152, 4096, 64, 1))
buf15 = buf14
del buf14
triton_poi_fused_convolution_3[grid(196608)](buf15, primals_19,
196608, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_19
buf16 = extern_kernels.convolution(buf15, primals_20, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 12, 31, 31), (11532, 961, 31, 1))
buf17 = buf16
del buf16
triton_poi_fused_convolution_4[grid(46128)](buf17, primals_21,
46128, XBLOCK=512, num_warps=4, num_stages=1)
del primals_21
buf18 = torch.ops.aten.max_pool2d_with_indices.default(buf17, [7, 7
], [3, 3])
buf19 = buf18[0]
buf20 = buf18[1]
del buf18
buf21 = extern_kernels.convolution(buf19, primals_22, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf21, (4, 12, 9, 9), (972, 81, 9, 1))
buf22 = buf21
del buf21
triton_poi_fused_convolution_relu_5[grid(3888)](buf22, primals_23,
3888, XBLOCK=256, num_warps=4, num_stages=1)
del primals_23
buf23 = extern_kernels.convolution(buf22, primals_24, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf23, (4, 12, 9, 9), (972, 81, 9, 1))
buf24 = buf23
del buf23
triton_poi_fused_convolution_relu_5[grid(3888)](buf24, primals_25,
3888, XBLOCK=256, num_warps=4, num_stages=1)
del primals_25
buf25 = extern_kernels.convolution(buf24, primals_26, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf25, (4, 12, 9, 9), (972, 81, 9, 1))
buf26 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_6[grid(64)](buf26, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf27 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_7[grid(64)](buf27, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf28 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused__to_copy_6[grid(64)](buf28, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf29 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused_add_clamp_7[grid(64)](buf29, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf30 = empty_strided_cuda((64,), (1,), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_8[grid(64)](buf30,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf32 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_8[grid(64)](buf32,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf34 = extern_kernels.convolution(buf15, primals_28, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf34, (4, 12, 64, 64), (49152, 4096, 64, 1))
buf33 = empty_strided_cuda((4, 12, 64, 64), (49152, 4096, 64, 1),
torch.float32)
buf35 = buf33
del buf33
triton_poi_fused__unsafe_index_add_convolution_mul_sub_9[grid(196608)](
buf35, buf26, buf28, buf25, primals_27, buf29, buf30, buf27,
buf32, buf34, primals_29, 196608, XBLOCK=512, num_warps=8,
num_stages=1)
del buf25
del buf34
del primals_27
del primals_29
buf36 = extern_kernels.convolution(buf35, primals_30, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf36, (4, 50, 64, 64), (204800, 4096, 64, 1))
buf37 = buf36
del buf36
buf38 = empty_strided_cuda((4, 50, 64, 64), (204800, 4096, 64, 1),
torch.float32)
triton_poi_fused_convolution_mul_sigmoid_10[grid(819200)](buf37,
primals_31, buf13, buf38, 819200, XBLOCK=512, num_warps=8,
num_stages=1)
del primals_31
return (buf38, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, primals_12, primals_14, primals_16, primals_18,
primals_20, primals_22, primals_24, primals_26, primals_28,
primals_30, buf2, buf5, buf8, buf11, buf13, buf15, buf17, buf19,
buf20, buf22, buf24, buf26, buf27, buf28, buf29, buf30, buf32,
buf35, buf37)
def sequential(*args):
"""Advanced nn.Sequential.
Args:
nn.Sequential, nn.Module
Returns:
nn.Sequential
"""
if len(args) == 1:
if isinstance(args[0], OrderedDict):
raise NotImplementedError(
'sequential does not support OrderedDict input.')
return args[0]
modules = []
for module in args:
if isinstance(module, nn.Sequential):
for submodule in module.children():
modules.append(submodule)
elif isinstance(module, nn.Module):
modules.append(module)
return nn.Sequential(*modules)
def conv(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=
1, bias=True, mode='CBR', negative_slope=0.2):
L = []
for t in mode:
if t == 'C':
L.append(nn.Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=kernel_size, stride=stride,
padding=padding, bias=bias))
elif t == 'T':
L.append(nn.ConvTranspose2d(in_channels=in_channels,
out_channels=out_channels, kernel_size=kernel_size, stride=
stride, padding=padding, bias=bias))
elif t == 'B':
L.append(nn.BatchNorm2d(out_channels, momentum=0.9, eps=0.0001,
affine=True))
elif t == 'I':
L.append(nn.InstanceNorm2d(out_channels, affine=True))
elif t == 'R':
L.append(nn.ReLU(inplace=True))
elif t == 'r':
L.append(nn.ReLU(inplace=False))
elif t == 'L':
L.append(nn.LeakyReLU(negative_slope=negative_slope, inplace=True))
elif t == 'l':
L.append(nn.LeakyReLU(negative_slope=negative_slope, inplace=False)
)
elif t == '2':
L.append(nn.PixelShuffle(upscale_factor=2))
elif t == '3':
L.append(nn.PixelShuffle(upscale_factor=3))
elif t == '4':
L.append(nn.PixelShuffle(upscale_factor=4))
elif t == 'U':
L.append(nn.Upsample(scale_factor=2, mode='nearest'))
elif t == 'u':
L.append(nn.Upsample(scale_factor=3, mode='nearest'))
elif t == 'v':
L.append(nn.Upsample(scale_factor=4, mode='nearest'))
elif t == 'M':
L.append(nn.MaxPool2d(kernel_size=kernel_size, stride=stride,
padding=0))
elif t == 'A':
L.append(nn.AvgPool2d(kernel_size=kernel_size, stride=stride,
padding=0))
else:
raise NotImplementedError('Undefined type: ')
return sequential(*L)
class ESA(nn.Module):
def __init__(self, channel=64, reduction=4, bias=True):
super(ESA, self).__init__()
self.r_nc = channel // reduction
self.conv1 = nn.Conv2d(channel, self.r_nc, kernel_size=1)
self.conv21 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=1)
self.conv2 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, stride=
2, padding=0)
self.conv3 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, padding=1)
self.conv4 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, padding=1)
self.conv5 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, padding=1)
self.conv6 = nn.Conv2d(self.r_nc, channel, kernel_size=1)
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x1 = self.conv1(x)
x2 = F.max_pool2d(self.conv2(x1), kernel_size=7, stride=3)
x2 = self.relu(self.conv3(x2))
x2 = self.relu(self.conv4(x2))
x2 = F.interpolate(self.conv5(x2), (x.size(2), x.size(3)), mode=
'bilinear', align_corners=False)
x2 = self.conv6(x2 + self.conv21(x1))
return x.mul(self.sigmoid(x2))
class CFRBNew(nn.Module):
def __init__(self, in_channels=50, out_channels=50, kernel_size=3,
stride=1, padding=1, bias=True, mode='CL', d_rate=0.5,
negative_slope=0.05):
super(CFRBNew, self).__init__()
self.d_nc = int(in_channels * d_rate)
self.r_nc = in_channels
assert mode[0] == 'C', 'convolutional layer first'
self.conv1_d = conv(in_channels, self.d_nc, kernel_size=1, stride=1,
padding=0, bias=bias, mode=mode[0])
self.conv1_r = conv(in_channels, self.r_nc, kernel_size, stride,
padding, bias=bias, mode=mode[0])
self.conv2_d = conv(self.r_nc, self.d_nc, kernel_size=1, stride=1,
padding=0, bias=bias, mode=mode[0])
self.conv2_r = conv(self.r_nc, self.r_nc, kernel_size, stride,
padding, bias=bias, mode=mode[0])
self.conv3_d = conv(self.r_nc, self.d_nc, kernel_size=1, stride=1,
padding=0, bias=bias, mode=mode[0])
self.conv3_r = conv(self.r_nc, self.r_nc, kernel_size, stride,
padding, bias=bias, mode=mode[0])
self.conv4_d = conv(self.r_nc, self.d_nc, kernel_size, stride,
padding, bias=bias, mode=mode[0])
self.conv1x1 = conv(self.d_nc * 4, out_channels, kernel_size=1,
stride=1, padding=0, bias=bias, mode=mode[0])
self.act = conv(mode=mode[-1], negative_slope=negative_slope)
self.esa = ESA(in_channels, reduction=4, bias=True)
def forward(self, input_0):
primals_1 = self.conv1_d.weight
primals_2 = self.conv1_d.bias
primals_4 = self.conv1_r.weight
primals_5 = self.conv1_r.bias
primals_6 = self.conv2_d.weight
primals_7 = self.conv2_d.bias
primals_8 = self.conv2_r.weight
primals_9 = self.conv2_r.bias
primals_10 = self.conv3_d.weight
primals_11 = self.conv3_d.bias
primals_12 = self.conv3_r.weight
primals_13 = self.conv3_r.bias
primals_14 = self.conv4_d.weight
primals_15 = self.conv4_d.bias
primals_16 = self.conv1x1.weight
primals_17 = self.conv1x1.bias
primals_18 = self.esa.conv1.weight
primals_19 = self.esa.conv1.bias
primals_28 = self.esa.conv21.weight
primals_21 = self.esa.conv21.bias
primals_20 = self.esa.conv2.weight
primals_23 = self.esa.conv2.bias
primals_22 = self.esa.conv3.weight
primals_25 = self.esa.conv3.bias
primals_24 = self.esa.conv4.weight
primals_27 = self.esa.conv4.bias
primals_26 = self.esa.conv5.weight
primals_29 = self.esa.conv5.bias
primals_30 = self.esa.conv6.weight
primals_31 = self.esa.conv6.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31])
return output[0]
|
wwjfsfs/wwjyyds
|
CFRB
| false
| 13,137
|
[
"MIT"
] | 0
|
80cd6267fde7cd98838078a0d5178a557ceb7414
|
https://github.com/wwjfsfs/wwjyyds/tree/80cd6267fde7cd98838078a0d5178a557ceb7414
|
FFN
|
import torch
import torch.nn as nn
import torch as t
class Conv(nn.Module):
"""
Convolution Module
"""
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=0, dilation=1, bias=True, w_init='linear'):
"""
:param in_channels: dimension of input
:param out_channels: dimension of output
:param kernel_size: size of kernel
:param stride: size of stride
:param padding: size of padding
:param dilation: dilation rate
:param bias: boolean. if True, bias is included.
:param w_init: str. weight inits with xavier initialization.
"""
super(Conv, self).__init__()
self.conv = nn.Conv1d(in_channels, out_channels, kernel_size=
kernel_size, stride=stride, padding=padding, dilation=dilation,
bias=bias)
nn.init.xavier_uniform_(self.conv.weight, gain=nn.init.
calculate_gain(w_init))
def forward(self, x):
x = self.conv(x)
return x
class FFN(nn.Module):
"""
Positionwise Feed-Forward Network
"""
def __init__(self, num_hidden):
"""
:param num_hidden: dimension of hidden
"""
super(FFN, self).__init__()
self.w_1 = Conv(num_hidden, num_hidden * 4, kernel_size=1, w_init=
'relu')
self.w_2 = Conv(num_hidden * 4, num_hidden, kernel_size=1)
self.dropout = nn.Dropout(p=0.1)
self.layer_norm = nn.LayerNorm(num_hidden)
def forward(self, input_):
x = input_.transpose(1, 2)
x = self.w_2(t.relu(self.w_1(x)))
x = x.transpose(1, 2)
x = x + input_
x = self.dropout(x)
x = self.layer_norm(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'num_hidden': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_3(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp4 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp8 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
tl.store(out_ptr1 + x2, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2 + 4 * y3), xmask & ymask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + y3, ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + y3, ymask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2 + 4 * y3), tmp13, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (16, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (16,), (1,))
assert_size_stride(primals_4, (4, 16, 1), (16, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 16, 4), (64, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_relu_1[grid(256)](buf2, primals_3, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4), (16, 4, 1))
buf4 = buf3
del buf3
triton_poi_fused_convolution_2[grid(64)](buf4, primals_5, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_native_layer_norm_3[grid(16)](buf4, primals_1,
buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf7 = buf0
del buf0
triton_poi_fused_add_native_layer_norm_4[grid(16, 4)](buf4,
primals_1, buf5, buf6, primals_6, primals_7, buf7, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
del buf5
del buf6
del primals_7
return buf7, primals_1, primals_2, primals_4, primals_6, buf2, buf4
class Conv(nn.Module):
"""
Convolution Module
"""
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=0, dilation=1, bias=True, w_init='linear'):
"""
:param in_channels: dimension of input
:param out_channels: dimension of output
:param kernel_size: size of kernel
:param stride: size of stride
:param padding: size of padding
:param dilation: dilation rate
:param bias: boolean. if True, bias is included.
:param w_init: str. weight inits with xavier initialization.
"""
super(Conv, self).__init__()
self.conv = nn.Conv1d(in_channels, out_channels, kernel_size=
kernel_size, stride=stride, padding=padding, dilation=dilation,
bias=bias)
nn.init.xavier_uniform_(self.conv.weight, gain=nn.init.
calculate_gain(w_init))
def forward(self, x):
x = self.conv(x)
return x
class FFNNew(nn.Module):
"""
Positionwise Feed-Forward Network
"""
def __init__(self, num_hidden):
"""
:param num_hidden: dimension of hidden
"""
super(FFNNew, self).__init__()
self.w_1 = Conv(num_hidden, num_hidden * 4, kernel_size=1, w_init=
'relu')
self.w_2 = Conv(num_hidden * 4, num_hidden, kernel_size=1)
self.dropout = nn.Dropout(p=0.1)
self.layer_norm = nn.LayerNorm(num_hidden)
def forward(self, input_0):
primals_2 = self.w_1.conv.weight
primals_3 = self.w_1.conv.bias
primals_4 = self.w_2.conv.weight
primals_5 = self.w_2.conv.bias
primals_6 = self.layer_norm.weight
primals_7 = self.layer_norm.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
yhgon/Transformer-TTS
|
FFN
| false
| 13,138
|
[
"MIT"
] | 0
|
5f34945cb5500d484275700c4e393ed125d5e753
|
https://github.com/yhgon/Transformer-TTS/tree/5f34945cb5500d484275700c4e393ed125d5e753
|
MLP
|
import torch
import torch.autograd
import torch.nn as nn
class MLP(nn.Module):
def __init__(self, n_in, n_out, dropout=0):
super().__init__()
self.linear = nn.Linear(n_in, n_out)
self.activation = nn.GELU()
self.dropout = nn.Dropout(dropout)
def forward(self, x):
x = self.dropout(x)
x = self.linear(x)
x = self.activation(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_in': 4, 'n_out': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.autograd
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_gelu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64,
4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_gelu_0[grid(256)](buf0, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
return buf1, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf0
class MLPNew(nn.Module):
def __init__(self, n_in, n_out, dropout=0):
super().__init__()
self.linear = nn.Linear(n_in, n_out)
self.activation = nn.GELU()
self.dropout = nn.Dropout(dropout)
def forward(self, input_0):
primals_2 = self.linear.weight
primals_3 = self.linear.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
yifding/W2NER
|
MLP
| false
| 13,139
|
[
"MIT"
] | 0
|
d13128e45f3930a8b8faa794318939dc90a75974
|
https://github.com/yifding/W2NER/tree/d13128e45f3930a8b8faa794318939dc90a75974
|
ActorDownAction
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class MLPBase(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(MLPBase, self).__init__()
self.l1 = nn.Linear(num_inputs, 400)
self.l2 = nn.Linear(400, 300)
self.l3 = nn.Linear(300, num_outputs)
def forward(self, inputs):
x = F.relu(self.l1(inputs))
x = F.relu(self.l2(x))
x = self.l3(x)
return x
class ActorDownAction(nn.Module):
"""a top-down module used in bothway message passing that passes messages to children and outputs action"""
def __init__(self, self_input_dim, action_dim, msg_dim, max_action,
max_children):
super(ActorDownAction, self).__init__()
self.max_action = max_action
self.action_base = MLPBase(self_input_dim + msg_dim, action_dim)
self.msg_base = MLPBase(self_input_dim + msg_dim, msg_dim *
max_children)
def forward(self, x, m):
xm = torch.cat((x, m), dim=-1)
xm = torch.tanh(xm)
action = self.max_action * torch.tanh(self.action_base(xm))
msg_down = self.msg_base(xm)
msg_down = F.normalize(msg_down, dim=-1)
return action, msg_down
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'self_input_dim': 4, 'action_dim': 4, 'msg_dim': 4,
'max_action': 4, 'max_children': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = libdevice.tanh(tmp5)
tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype)
tmp8 = tl.where(tmp4, tmp6, tmp7)
tmp9 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp12 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp13 = libdevice.tanh(tmp12)
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp9, tmp13, tmp14)
tmp16 = tl.where(tmp4, tmp8, tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 400
x2 = xindex % 1600
x3 = xindex // 1600
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x2 + 1664 * x3), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 300
x2 = xindex // 1200
x3 = xindex % 1200
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3 + 1216 * x2), tmp4, xmask)
tl.store(out_ptr1 + (x3 + 1280 * x2), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_view_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 300
x1 = xindex // 300
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 300 * (x1 % 4) + 1216 * (x1 // 4)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_mul_tanh_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tmp2 = 4.0
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_per_fused_div_linalg_vector_norm_5(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp7 = 1e-12
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp0 / tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr0 + (r1 + 16 * x0), tmp9, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (400, 8), (8, 1))
assert_size_stride(primals_4, (400,), (1,))
assert_size_stride(primals_5, (300, 400), (400, 1))
assert_size_stride(primals_6, (300,), (1,))
assert_size_stride(primals_7, (4, 300), (300, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (400, 8), (8, 1))
assert_size_stride(primals_10, (400,), (1,))
assert_size_stride(primals_11, (300, 400), (400, 1))
assert_size_stride(primals_12, (300,), (1,))
assert_size_stride(primals_13, (16, 300), (300, 1))
assert_size_stride(primals_14, (16,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0),
reinterpret_tensor(primals_3, (8, 400), (1, 8), 0), out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 400), (6400, 1600, 400, 1), 0
)
del buf1
buf20 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(25600)](buf2,
primals_4, buf20, 25600, XBLOCK=256, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((64, 300), (300, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (64, 400), (400, 1), 0),
reinterpret_tensor(primals_5, (400, 300), (1, 400), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 300), (4864, 1216, 300, 1),
torch.float32)
buf19 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(19200)](buf3,
primals_6, buf4, buf19, 19200, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_6
buf5 = buf3
del buf3
triton_poi_fused_relu_view_3[grid(19200)](buf4, buf5, 19200, XBLOCK
=256, num_warps=4, num_stages=1)
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_8, buf5, reinterpret_tensor(primals_7,
(300, 4), (1, 300), 0), alpha=1, beta=1, out=buf6)
del primals_8
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_tanh_4[grid(256)](buf6, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf8 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0),
reinterpret_tensor(primals_9, (8, 400), (1, 8), 0), out=buf8)
del primals_9
buf9 = reinterpret_tensor(buf8, (4, 4, 4, 400), (6400, 1600, 400, 1), 0
)
del buf8
buf18 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(25600)](buf9,
primals_10, buf18, 25600, XBLOCK=256, num_warps=4, num_stages=1)
del primals_10
buf10 = empty_strided_cuda((64, 300), (300, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf9, (64, 400), (400, 1), 0),
reinterpret_tensor(primals_11, (400, 300), (1, 400), 0), out=buf10)
buf11 = buf4
del buf4
buf17 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(19200)](buf10,
primals_12, buf11, buf17, 19200, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_12
buf12 = buf10
del buf10
triton_poi_fused_relu_view_3[grid(19200)](buf11, buf12, 19200,
XBLOCK=256, num_warps=4, num_stages=1)
del buf11
buf13 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_14, buf12, reinterpret_tensor(
primals_13, (300, 16), (1, 300), 0), alpha=1, beta=1, out=buf13)
del primals_14
buf14 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf15 = reinterpret_tensor(buf14, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf14
buf16 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.
float32)
triton_per_fused_div_linalg_vector_norm_5[grid(64)](buf15, buf13,
buf16, 64, 16, XBLOCK=32, num_warps=4, num_stages=1)
return (buf7, buf16, reinterpret_tensor(buf0, (64, 8), (8, 1), 0),
reinterpret_tensor(buf2, (64, 400), (400, 1), 0), buf5, buf6,
reinterpret_tensor(buf9, (64, 400), (400, 1), 0), buf12, buf13,
buf15, primals_13, buf17, primals_11, buf18, primals_7, buf19,
primals_5, buf20)
class MLPBase(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(MLPBase, self).__init__()
self.l1 = nn.Linear(num_inputs, 400)
self.l2 = nn.Linear(400, 300)
self.l3 = nn.Linear(300, num_outputs)
def forward(self, inputs):
x = F.relu(self.l1(inputs))
x = F.relu(self.l2(x))
x = self.l3(x)
return x
class ActorDownActionNew(nn.Module):
"""a top-down module used in bothway message passing that passes messages to children and outputs action"""
def __init__(self, self_input_dim, action_dim, msg_dim, max_action,
max_children):
super(ActorDownActionNew, self).__init__()
self.max_action = max_action
self.action_base = MLPBase(self_input_dim + msg_dim, action_dim)
self.msg_base = MLPBase(self_input_dim + msg_dim, msg_dim *
max_children)
def forward(self, input_0, input_1):
primals_3 = self.action_base.l1.weight
primals_4 = self.action_base.l1.bias
primals_5 = self.action_base.l2.weight
primals_6 = self.action_base.l2.bias
primals_7 = self.action_base.l3.weight
primals_8 = self.action_base.l3.bias
primals_9 = self.msg_base.l1.weight
primals_10 = self.msg_base.l1.bias
primals_11 = self.msg_base.l2.weight
primals_12 = self.msg_base.l2.bias
primals_13 = self.msg_base.l3.weight
primals_14 = self.msg_base.l3.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14])
return output[0], output[1]
|
yangfanthu/modular-rl
|
ActorDownAction
| false
| 13,140
|
[
"BSD-2-Clause"
] | 0
|
25c599bab641a7e732dbaf116cd240fa2358f113
|
https://github.com/yangfanthu/modular-rl/tree/25c599bab641a7e732dbaf116cd240fa2358f113
|
Block
|
import torch
import torch.nn as nn
from functools import partial
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None,
attn_drop=0.0, proj_drop=0.0):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads
).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = q @ k.transpose(-2, -1) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False,
qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn
.GELU, norm_layer=partial(nn.LayerNorm, eps=1e-06), vis=False):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias,
qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path
) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
def forward(self, x):
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4, 'num_heads': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
from functools import partial
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-06
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (4 + y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (8 + y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + 1)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr2 + 2)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr2 + 3)
tmp23 = tl.broadcast_to(tmp22, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp10 = tmp7 + tmp9
tmp11 = tmp6 + tmp10
tmp12 = tmp5 + tmp11
tmp17 = tmp14 + tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp12 + tmp18
tmp24 = tmp21 + tmp23
tmp25 = tmp20 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + x0, tmp28, xmask)
tl.store(out_ptr1 + x0, tmp40, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp6 = tmp4 - tmp5
tmp8 = 1e-06
tmp9 = tmp7 + tmp8
tmp10 = libdevice.rsqrt(tmp9)
tmp11 = tmp6 * tmp10
tmp13 = tmp11 * tmp12
tmp15 = tmp13 + tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_gelu_10(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_out_ptr0 + x2, xmask)
tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp4 + tmp7
tl.store(in_out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (16, 4), (4, 1))
assert_size_stride(primals_10, (16,), (1,))
assert_size_stride(primals_11, (4, 16), (16, 1))
assert_size_stride(primals_12, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(16)](primals_3, buf0,
buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(64)](primals_3, buf0,
buf1, primals_1, primals_2, buf2, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del primals_1
del primals_2
buf3 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 12), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_2[grid(16, 4)](buf3, buf4, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32)
triton_poi_fused_clone_3[grid(16, 4)](buf3, buf5, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(256)](buf6, buf7, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf6
triton_poi_fused__softmax_5[grid(256)](buf7, buf8, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_6[grid(16, 4)](buf3, buf9, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
del buf3
buf10 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10)
buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_7[grid(16, 4)](buf10, buf11, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0)
del buf10
extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf12)
buf13 = buf1
del buf1
buf14 = buf0
del buf0
triton_poi_fused_add_native_layer_norm_8[grid(16)](primals_3, buf12,
primals_6, buf13, buf14, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_9[grid(64)](primals_3, buf12,
primals_6, buf13, buf14, primals_7, primals_8, buf15, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf13
del buf14
del primals_8
buf16 = reinterpret_tensor(buf7, (16, 16), (16, 1), 0)
del buf7
extern_kernels.addmm(primals_10, reinterpret_tensor(buf15, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_9, (4, 16), (1, 4), 0),
alpha=1, beta=1, out=buf16)
del primals_10
buf17 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
triton_poi_fused_gelu_10[grid(256)](buf16, buf17, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf18 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf17, (16, 16), (16, 1), 0),
reinterpret_tensor(primals_11, (16, 4), (1, 16), 0), out=buf18)
buf19 = reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0)
del buf18
triton_poi_fused_add_11[grid(64)](buf19, primals_3, buf12,
primals_6, primals_12, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_12
return buf19, primals_3, primals_6, primals_7, reinterpret_tensor(buf2,
(16, 4), (4, 1), 0), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0
), buf12, reinterpret_tensor(buf15, (16, 4), (4, 1), 0
), buf16, reinterpret_tensor(buf17, (16, 16), (16, 1), 0
), primals_11, primals_9, primals_5, reinterpret_tensor(buf9, (16,
1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0), primals_4
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None,
attn_drop=0.0, proj_drop=0.0):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads
).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = q @ k.transpose(-2, -1) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class BlockNew(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False,
qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn
.GELU, norm_layer=partial(nn.LayerNorm, eps=1e-06), vis=False):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias,
qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path
) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
def forward(self, input_0):
primals_1 = self.norm1.weight
primals_2 = self.norm1.bias
primals_4 = self.attn.qkv.weight
primals_5 = self.attn.proj.weight
primals_6 = self.attn.proj.bias
primals_7 = self.norm2.weight
primals_8 = self.norm2.bias
primals_9 = self.mlp.fc1.weight
primals_10 = self.mlp.fc1.bias
primals_11 = self.mlp.fc2.weight
primals_12 = self.mlp.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
|
xuewengeophysics/Conformer
|
Block
| false
| 13,141
|
[
"Apache-2.0"
] | 0
|
e769a1ac9ab110dae2a356a4de1e06ccd0e95041
|
https://github.com/xuewengeophysics/Conformer/tree/e769a1ac9ab110dae2a356a4de1e06ccd0e95041
|
Biaffine
|
import torch
import torch.autograd
import torch.nn as nn
class Biaffine(nn.Module):
def __init__(self, n_in, n_out=1, bias_x=True, bias_y=True):
super(Biaffine, self).__init__()
self.n_in = n_in
self.n_out = n_out
self.bias_x = bias_x
self.bias_y = bias_y
weight = torch.zeros((n_out, n_in + int(bias_x), n_in + int(bias_y)))
nn.init.xavier_normal_(weight)
self.weight = nn.Parameter(weight, requires_grad=True)
def extra_repr(self):
s = f'n_in={self.n_in}, n_out={self.n_out}'
if self.bias_x:
s += f', bias_x={self.bias_x}'
if self.bias_y:
s += f', bias_y={self.bias_y}'
return s
def forward(self, x, y):
if self.bias_x:
x = torch.cat((x, torch.ones_like(x[..., :1])), -1)
if self.bias_y:
y = torch.cat((y, torch.ones_like(y[..., :1])), -1)
s = torch.einsum('bxi,oij,byj->boxy', x, self.weight, y)
s = s.permute(0, 2, 3, 1)
return s
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'n_in': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.autograd
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = xindex // 5
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 5, tl.int64)
tmp9 = 1.0
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp6, tmp9, tmp10)
tmp12 = tl.where(tmp4, tmp5, tmp11)
tl.store(out_ptr0 + x2, tmp12, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (1, 5, 5), (25, 5, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(80)](primals_1, buf0, 80, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((1, 16, 5), (80, 5, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (1, 16, 5), (0, 5, 1),
0), primals_3, out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32)
triton_poi_fused_cat_0[grid(80)](primals_2, buf2, 80, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_2
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf2, reinterpret_tensor(buf1, (4, 5, 4), (20, 1,
5), 0), out=buf3)
del buf1
return reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 1, 4, 4), 0
), reinterpret_tensor(buf2, (4, 5, 4), (20, 1, 5), 0
), reinterpret_tensor(buf0, (1, 5, 16), (80, 1, 5), 0)
class BiaffineNew(nn.Module):
def __init__(self, n_in, n_out=1, bias_x=True, bias_y=True):
super(BiaffineNew, self).__init__()
self.n_in = n_in
self.n_out = n_out
self.bias_x = bias_x
self.bias_y = bias_y
weight = torch.zeros((n_out, n_in + int(bias_x), n_in + int(bias_y)))
nn.init.xavier_normal_(weight)
self.weight = nn.Parameter(weight, requires_grad=True)
def extra_repr(self):
s = f'n_in={self.n_in}, n_out={self.n_out}'
if self.bias_x:
s += f', bias_x={self.bias_x}'
if self.bias_y:
s += f', bias_y={self.bias_y}'
return s
def forward(self, input_0, input_1):
primals_3 = self.weight
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
|
yifding/W2NER
|
Biaffine
| false
| 13,142
|
[
"MIT"
] | 0
|
d13128e45f3930a8b8faa794318939dc90a75974
|
https://github.com/yifding/W2NER/tree/d13128e45f3930a8b8faa794318939dc90a75974
|
SmoothBCEwLogits
|
import torch
import torch.nn.functional as F
from torch.nn.modules.loss import _WeightedLoss
class SmoothBCEwLogits(_WeightedLoss):
def __init__(self, weight=None, reduction='mean', smoothing=0.0):
super().__init__(weight=weight, reduction=reduction)
self.smoothing = smoothing
self.weight = weight
self.reduction = reduction
@staticmethod
def _smooth(targets: 'torch.Tensor', n_labels: 'int', smoothing=0.0):
assert 0 <= smoothing < 1
with torch.no_grad():
targets = targets * (1.0 - smoothing) + 0.5 * smoothing
return targets
def forward(self, inputs, targets):
targets = SmoothBCEwLogits._smooth(targets, inputs.size(-1), self.
smoothing)
loss = F.binary_cross_entropy_with_logits(inputs, targets, self.weight)
if self.reduction == 'sum':
loss = loss.sum()
elif self.reduction == 'mean':
loss = loss.mean()
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch.nn.modules.loss import _WeightedLoss
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_0(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp6 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = 0.0
tmp4 = tmp2 + tmp3
tmp5 = tmp1 - tmp4
tmp7 = tmp5 * tmp6
tmp8 = triton_helpers.minimum(tmp3, tmp6)
tmp9 = tl_math.abs(tmp6)
tmp10 = -tmp9
tmp11 = tl_math.exp(tmp10)
tmp12 = libdevice.log1p(tmp11)
tmp13 = tmp8 - tmp12
tmp14 = tmp7 - tmp13
tmp15 = tl.broadcast_to(tmp14, [RBLOCK])
tmp17 = triton_helpers.promote_to_tensor(tl.sum(tmp15, 0))
tmp18 = 256.0
tmp19 = tmp17 / tmp18
tmp20 = tmp19 / tmp1
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp20, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_0[grid
(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class SmoothBCEwLogitsNew(_WeightedLoss):
def __init__(self, weight=None, reduction='mean', smoothing=0.0):
super().__init__(weight=weight, reduction=reduction)
self.smoothing = smoothing
self.weight = weight
self.reduction = reduction
@staticmethod
def _smooth(targets: 'torch.Tensor', n_labels: 'int', smoothing=0.0):
assert 0 <= smoothing < 1
with torch.no_grad():
targets = targets * (1.0 - smoothing) + 0.5 * smoothing
return targets
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
yota-p/kaggle_titanic
|
SmoothBCEwLogits
| false
| 13,143
|
[
"MIT"
] | 0
|
36d2c53711482195f519d9280abadf0d6afa9a15
|
https://github.com/yota-p/kaggle_titanic/tree/36d2c53711482195f519d9280abadf0d6afa9a15
|
LayerNorm
|
import torch
import torch.autograd
import torch.nn as nn
class LayerNorm(nn.Module):
def __init__(self, input_dim, cond_dim=0, center=True, scale=True,
epsilon=None, conditional=False, hidden_units=None,
hidden_activation='linear', hidden_initializer='xaiver', **kwargs):
super(LayerNorm, self).__init__()
"""
input_dim: inputs.shape[-1]
cond_dim: cond.shape[-1]
"""
self.center = center
self.scale = scale
self.conditional = conditional
self.hidden_units = hidden_units
self.hidden_initializer = hidden_initializer
self.epsilon = epsilon or 1e-12
self.input_dim = input_dim
self.cond_dim = cond_dim
if self.center:
self.beta = nn.Parameter(torch.zeros(input_dim))
if self.scale:
self.gamma = nn.Parameter(torch.ones(input_dim))
if self.conditional:
if self.hidden_units is not None:
self.hidden_dense = nn.Linear(in_features=self.cond_dim,
out_features=self.hidden_units, bias=False)
if self.center:
self.beta_dense = nn.Linear(in_features=self.cond_dim,
out_features=input_dim, bias=False)
if self.scale:
self.gamma_dense = nn.Linear(in_features=self.cond_dim,
out_features=input_dim, bias=False)
self.initialize_weights()
def initialize_weights(self):
if self.conditional:
if self.hidden_units is not None:
if self.hidden_initializer == 'normal':
torch.nn.init.normal(self.hidden_dense.weight)
elif self.hidden_initializer == 'xavier':
torch.nn.init.xavier_uniform_(self.hidden_dense.weight)
if self.center:
torch.nn.init.constant_(self.beta_dense.weight, 0)
if self.scale:
torch.nn.init.constant_(self.gamma_dense.weight, 0)
def forward(self, inputs, cond=None):
if self.conditional:
if self.hidden_units is not None:
cond = self.hidden_dense(cond)
for _ in range(len(inputs.shape) - len(cond.shape)):
cond = cond.unsqueeze(1)
if self.center:
beta = self.beta_dense(cond) + self.beta
if self.scale:
gamma = self.gamma_dense(cond) + self.gamma
else:
if self.center:
beta = self.beta
if self.scale:
gamma = self.gamma
outputs = inputs
if self.center:
mean = torch.mean(outputs, dim=-1).unsqueeze(-1)
outputs = outputs - mean
if self.scale:
variance = torch.mean(outputs ** 2, dim=-1).unsqueeze(-1)
std = (variance + self.epsilon) ** 0.5
outputs = outputs / std
outputs = outputs * gamma
if self.center:
outputs = outputs + beta
return outputs
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.autograd
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_div_mul_pow_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = 1e-12
tmp15 = tmp13 + tmp14
tmp16 = libdevice.sqrt(tmp15)
tmp17 = tmp0 / tmp16
tmp19 = tmp17 * tmp18
tmp21 = tmp19 + tmp20
tl.store(out_ptr0 + x2, tmp21, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_sub_0[grid(256)](primals_3, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_mul_pow_1[grid(256)](buf0, primals_2,
primals_1, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_1
del primals_2
return buf1, primals_3
class LayerNormNew(nn.Module):
def __init__(self, input_dim, cond_dim=0, center=True, scale=True,
epsilon=None, conditional=False, hidden_units=None,
hidden_activation='linear', hidden_initializer='xaiver', **kwargs):
super(LayerNormNew, self).__init__()
"""
input_dim: inputs.shape[-1]
cond_dim: cond.shape[-1]
"""
self.center = center
self.scale = scale
self.conditional = conditional
self.hidden_units = hidden_units
self.hidden_initializer = hidden_initializer
self.epsilon = epsilon or 1e-12
self.input_dim = input_dim
self.cond_dim = cond_dim
if self.center:
self.beta = nn.Parameter(torch.zeros(input_dim))
if self.scale:
self.gamma = nn.Parameter(torch.ones(input_dim))
if self.conditional:
if self.hidden_units is not None:
self.hidden_dense = nn.Linear(in_features=self.cond_dim,
out_features=self.hidden_units, bias=False)
if self.center:
self.beta_dense = nn.Linear(in_features=self.cond_dim,
out_features=input_dim, bias=False)
if self.scale:
self.gamma_dense = nn.Linear(in_features=self.cond_dim,
out_features=input_dim, bias=False)
self.initialize_weights()
def initialize_weights(self):
if self.conditional:
if self.hidden_units is not None:
if self.hidden_initializer == 'normal':
torch.nn.init.normal(self.hidden_dense.weight)
elif self.hidden_initializer == 'xavier':
torch.nn.init.xavier_uniform_(self.hidden_dense.weight)
if self.center:
torch.nn.init.constant_(self.beta_dense.weight, 0)
if self.scale:
torch.nn.init.constant_(self.gamma_dense.weight, 0)
def forward(self, input_0):
primals_1 = self.beta
primals_2 = self.gamma
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
yifding/W2NER
|
LayerNorm
| false
| 13,144
|
[
"MIT"
] | 0
|
d13128e45f3930a8b8faa794318939dc90a75974
|
https://github.com/yifding/W2NER/tree/d13128e45f3930a8b8faa794318939dc90a75974
|
CriticDownAction
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class MLPBase(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(MLPBase, self).__init__()
self.l1 = nn.Linear(num_inputs, 400)
self.l2 = nn.Linear(400, 300)
self.l3 = nn.Linear(300, num_outputs)
def forward(self, inputs):
x = F.relu(self.l1(inputs))
x = F.relu(self.l2(x))
x = self.l3(x)
return x
class CriticDownAction(nn.Module):
"""a top-down module used in bothway message passing that passes messages to children and outputs q-values"""
def __init__(self, self_input_dim, action_dim, msg_dim, max_children):
super(CriticDownAction, self).__init__()
self.baseQ1 = MLPBase(self_input_dim + action_dim + msg_dim, 1)
self.baseQ2 = MLPBase(self_input_dim + action_dim + msg_dim, 1)
self.msg_base = MLPBase(self_input_dim + msg_dim, msg_dim *
max_children)
def forward(self, x, u, m):
xum = torch.cat([x, u, m], dim=-1)
x1 = self.baseQ1(xum)
x2 = self.baseQ2(xum)
xm = torch.cat([x, m], dim=-1)
xm = torch.tanh(xm)
msg_down = self.msg_base(xm)
msg_down = F.normalize(msg_down, dim=-1)
return x1, x2, msg_down
def Q1(self, x, u, m):
xum = torch.cat([x, u, m], dim=-1)
x1 = self.baseQ1(xum)
xm = torch.cat([x, m], dim=-1)
xm = torch.tanh(xm)
msg_down = self.msg_base(xm)
msg_down = F.normalize(msg_down, dim=-1)
return x1, msg_down
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'self_input_dim': 4, 'action_dim': 4, 'msg_dim': 4,
'max_children': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 12
x1 = xindex // 12
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tl.full([1], 12, tl.int64)
tmp14 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp11 & xmask,
eviction_policy='evict_last', other=0.0)
tmp15 = tl.where(tmp9, tmp10, tmp14)
tmp16 = tl.where(tmp4, tmp5, tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 400
x2 = xindex % 1600
x3 = xindex // 1600
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x2 + 1664 * x3), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 300
x2 = xindex // 1200
x3 = xindex % 1200
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3 + 1216 * x2), tmp4, xmask)
tl.store(out_ptr1 + (x3 + 1280 * x2), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_view_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 300
x1 = xindex // 300
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 300 * (x1 % 4) + 1216 * (x1 // 4)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_cat_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = libdevice.tanh(tmp5)
tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype)
tmp8 = tl.where(tmp4, tmp6, tmp7)
tmp9 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp12 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp13 = libdevice.tanh(tmp12)
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp9, tmp13, tmp14)
tmp16 = tl.where(tmp4, tmp8, tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_per_fused_div_linalg_vector_norm_5(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp7 = 1e-12
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp0 / tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr0 + (r1 + 16 * x0), tmp9, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (400, 12), (12, 1))
assert_size_stride(primals_5, (400,), (1,))
assert_size_stride(primals_6, (300, 400), (400, 1))
assert_size_stride(primals_7, (300,), (1,))
assert_size_stride(primals_8, (1, 300), (300, 1))
assert_size_stride(primals_9, (1,), (1,))
assert_size_stride(primals_10, (400, 12), (12, 1))
assert_size_stride(primals_11, (400,), (1,))
assert_size_stride(primals_12, (300, 400), (400, 1))
assert_size_stride(primals_13, (300,), (1,))
assert_size_stride(primals_14, (1, 300), (300, 1))
assert_size_stride(primals_15, (1,), (1,))
assert_size_stride(primals_16, (400, 8), (8, 1))
assert_size_stride(primals_17, (400,), (1,))
assert_size_stride(primals_18, (300, 400), (400, 1))
assert_size_stride(primals_19, (300,), (1,))
assert_size_stride(primals_20, (16, 300), (300, 1))
assert_size_stride(primals_21, (16,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 12), (192, 48, 12, 1), torch.
float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(768)](primals_1, primals_2, primals_3,
buf0, 768, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 12), (12, 1), 0),
reinterpret_tensor(primals_4, (12, 400), (1, 12), 0), out=buf1)
del primals_4
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 400), (6400, 1600, 400, 1), 0
)
del buf1
buf30 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(25600)](buf2,
primals_5, buf30, 25600, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf3 = empty_strided_cuda((64, 300), (300, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (64, 400), (400, 1), 0),
reinterpret_tensor(primals_6, (400, 300), (1, 400), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 300), (4864, 1216, 300, 1),
torch.float32)
buf29 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(19200)](buf3,
primals_7, buf4, buf29, 19200, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_7
buf5 = buf3
del buf3
triton_poi_fused_relu_view_3[grid(19200)](buf4, buf5, 19200, XBLOCK
=256, num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_9, buf5, reinterpret_tensor(primals_8,
(300, 1), (1, 300), 0), alpha=1, beta=1, out=buf7)
del primals_9
buf8 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 12), (12, 1), 0),
reinterpret_tensor(primals_10, (12, 400), (1, 12), 0), out=buf8)
del primals_10
buf9 = reinterpret_tensor(buf8, (4, 4, 4, 400), (6400, 1600, 400, 1), 0
)
del buf8
buf28 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(25600)](buf9,
primals_11, buf28, 25600, XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
buf10 = empty_strided_cuda((64, 300), (300, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf9, (64, 400), (400, 1), 0),
reinterpret_tensor(primals_12, (400, 300), (1, 400), 0), out=buf10)
buf11 = buf4
del buf4
buf27 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(19200)](buf10,
primals_13, buf11, buf27, 19200, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_13
buf12 = buf10
del buf10
triton_poi_fused_relu_view_3[grid(19200)](buf11, buf12, 19200,
XBLOCK=256, num_warps=4, num_stages=1)
buf14 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_15, buf12, reinterpret_tensor(
primals_14, (300, 1), (1, 300), 0), alpha=1, beta=1, out=buf14)
del primals_15
buf15 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32
)
triton_poi_fused_cat_4[grid(512)](primals_1, primals_3, buf15, 512,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_3
buf16 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf15, (64, 8), (8, 1), 0),
reinterpret_tensor(primals_16, (8, 400), (1, 8), 0), out=buf16)
del primals_16
buf17 = reinterpret_tensor(buf16, (4, 4, 4, 400), (6400, 1600, 400,
1), 0)
del buf16
buf26 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(25600)](buf17,
primals_17, buf26, 25600, XBLOCK=256, num_warps=4, num_stages=1)
del primals_17
buf18 = empty_strided_cuda((64, 300), (300, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf17, (64, 400), (400, 1), 0),
reinterpret_tensor(primals_18, (400, 300), (1, 400), 0), out=buf18)
buf19 = buf11
del buf11
buf25 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(19200)](buf18,
primals_19, buf19, buf25, 19200, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_19
buf20 = buf18
del buf18
triton_poi_fused_relu_view_3[grid(19200)](buf19, buf20, 19200,
XBLOCK=256, num_warps=4, num_stages=1)
del buf19
buf21 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_21, buf20, reinterpret_tensor(
primals_20, (300, 16), (1, 300), 0), alpha=1, beta=1, out=buf21)
del primals_21
buf22 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf23 = reinterpret_tensor(buf22, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf22
buf24 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.
float32)
triton_per_fused_div_linalg_vector_norm_5[grid(64)](buf23, buf21,
buf24, 64, 16, XBLOCK=32, num_warps=4, num_stages=1)
return (reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0),
reinterpret_tensor(buf14, (4, 4, 4, 1), (16, 4, 1, 1), 0), buf24,
reinterpret_tensor(buf0, (64, 12), (12, 1), 0), reinterpret_tensor(
buf2, (64, 400), (400, 1), 0), buf5, reinterpret_tensor(buf9, (64,
400), (400, 1), 0), buf12, reinterpret_tensor(buf15, (64, 8), (8, 1
), 0), reinterpret_tensor(buf17, (64, 400), (400, 1), 0), buf20,
buf21, buf23, primals_20, buf25, primals_18, buf26, primals_14,
buf27, primals_12, buf28, primals_8, buf29, primals_6, buf30)
class MLPBase(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(MLPBase, self).__init__()
self.l1 = nn.Linear(num_inputs, 400)
self.l2 = nn.Linear(400, 300)
self.l3 = nn.Linear(300, num_outputs)
def forward(self, inputs):
x = F.relu(self.l1(inputs))
x = F.relu(self.l2(x))
x = self.l3(x)
return x
class CriticDownActionNew(nn.Module):
"""a top-down module used in bothway message passing that passes messages to children and outputs q-values"""
def __init__(self, self_input_dim, action_dim, msg_dim, max_children):
super(CriticDownActionNew, self).__init__()
self.baseQ1 = MLPBase(self_input_dim + action_dim + msg_dim, 1)
self.baseQ2 = MLPBase(self_input_dim + action_dim + msg_dim, 1)
self.msg_base = MLPBase(self_input_dim + msg_dim, msg_dim *
max_children)
def Q1(self, x, u, m):
xum = torch.cat([x, u, m], dim=-1)
x1 = self.baseQ1(xum)
xm = torch.cat([x, m], dim=-1)
xm = torch.tanh(xm)
msg_down = self.msg_base(xm)
msg_down = F.normalize(msg_down, dim=-1)
return x1, msg_down
def forward(self, input_0, input_1, input_2):
primals_4 = self.baseQ1.l1.weight
primals_5 = self.baseQ1.l1.bias
primals_6 = self.baseQ1.l2.weight
primals_7 = self.baseQ1.l2.bias
primals_8 = self.baseQ1.l3.weight
primals_9 = self.baseQ1.l3.bias
primals_10 = self.baseQ2.l1.weight
primals_11 = self.baseQ2.l1.bias
primals_12 = self.baseQ2.l2.weight
primals_13 = self.baseQ2.l2.bias
primals_14 = self.baseQ2.l3.weight
primals_15 = self.baseQ2.l3.bias
primals_16 = self.msg_base.l1.weight
primals_17 = self.msg_base.l1.bias
primals_18 = self.msg_base.l2.weight
primals_19 = self.msg_base.l2.bias
primals_20 = self.msg_base.l3.weight
primals_21 = self.msg_base.l3.bias
primals_1 = input_0
primals_2 = input_1
primals_3 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21])
return output[0], output[1], output[2]
|
yangfanthu/modular-rl
|
CriticDownAction
| false
| 13,145
|
[
"BSD-2-Clause"
] | 0
|
25c599bab641a7e732dbaf116cd240fa2358f113
|
https://github.com/yangfanthu/modular-rl/tree/25c599bab641a7e732dbaf116cd240fa2358f113
|
Attention
|
import math
import torch
import torch.nn as nn
import torch as t
class Linear(nn.Module):
"""
Linear Module
"""
def __init__(self, in_dim, out_dim, bias=True, w_init='linear'):
"""
:param in_dim: dimension of input
:param out_dim: dimension of output
:param bias: boolean. if True, bias is included.
:param w_init: str. weight inits with xavier initialization.
"""
super(Linear, self).__init__()
self.linear_layer = nn.Linear(in_dim, out_dim, bias=bias)
nn.init.xavier_uniform_(self.linear_layer.weight, gain=nn.init.
calculate_gain(w_init))
def forward(self, x):
return self.linear_layer(x)
class MultiheadAttention(nn.Module):
"""
Multihead attention mechanism (dot attention)
"""
def __init__(self, num_hidden_k):
"""
:param num_hidden_k: dimension of hidden
"""
super(MultiheadAttention, self).__init__()
self.num_hidden_k = num_hidden_k
self.attn_dropout = nn.Dropout(p=0.1)
def forward(self, key, value, query, mask=None, query_mask=None):
attn = t.bmm(query, key.transpose(1, 2))
attn = attn / math.sqrt(self.num_hidden_k)
if mask is not None:
attn = attn.masked_fill(mask, -2 ** 32 + 1)
attn = t.softmax(attn, dim=-1)
else:
attn = t.softmax(attn, dim=-1)
if query_mask is not None:
attn = attn * query_mask
attn = self.attn_dropout(attn)
result = t.bmm(attn, value)
return result, attn
class Attention(nn.Module):
"""
Attention Network
"""
def __init__(self, num_hidden, h=4):
"""
:param num_hidden: dimension of hidden
:param h: num of heads
"""
super(Attention, self).__init__()
self.num_hidden = num_hidden
self.num_hidden_per_attn = num_hidden // h
self.h = h
self.key = Linear(num_hidden, num_hidden, bias=False)
self.value = Linear(num_hidden, num_hidden, bias=False)
self.query = Linear(num_hidden, num_hidden, bias=False)
self.multihead = MultiheadAttention(self.num_hidden_per_attn)
self.residual_dropout = nn.Dropout(p=0.1)
self.final_linear = Linear(num_hidden * 2, num_hidden)
self.layer_norm_1 = nn.LayerNorm(num_hidden)
def forward(self, memory, decoder_input, mask=None, query_mask=None):
batch_size = memory.size(0)
seq_k = memory.size(1)
seq_q = decoder_input.size(1)
if query_mask is not None:
query_mask = query_mask.unsqueeze(-1).repeat(1, 1, seq_k)
query_mask = query_mask.repeat(self.h, 1, 1)
if mask is not None:
mask = mask.repeat(self.h, 1, 1)
key = self.key(memory).view(batch_size, seq_k, self.h, self.
num_hidden_per_attn)
value = self.value(memory).view(batch_size, seq_k, self.h, self.
num_hidden_per_attn)
query = self.query(decoder_input).view(batch_size, seq_q, self.h,
self.num_hidden_per_attn)
key = key.permute(2, 0, 1, 3).contiguous().view(-1, seq_k, self.
num_hidden_per_attn)
value = value.permute(2, 0, 1, 3).contiguous().view(-1, seq_k, self
.num_hidden_per_attn)
query = query.permute(2, 0, 1, 3).contiguous().view(-1, seq_q, self
.num_hidden_per_attn)
result, attns = self.multihead(key, value, query, mask=mask,
query_mask=query_mask)
result = result.view(self.h, batch_size, seq_q, self.
num_hidden_per_attn)
result = result.permute(1, 2, 0, 3).contiguous().view(batch_size,
seq_q, -1)
result = t.cat([decoder_input, result], dim=-1)
result = self.final_linear(result)
result = self.residual_dropout(result)
result = result + decoder_input
result = self.layer_norm_1(result)
return result, attns
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'num_hidden': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.nn as nn
import torch as t
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (x1 + 16 * (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, 8), (8, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0)
del primals_3
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(4, 16)](buf2, buf3, 4, 16, XBLOCK=16,
YBLOCK=4, num_warps=1, num_stages=1)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf2
triton_poi_fused_clone_0[grid(4, 16)](buf0, buf4, 4, 16, XBLOCK=16,
YBLOCK=4, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf7 = buf5
del buf5
triton_poi_fused__softmax_2[grid(256)](buf6, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf6
buf8 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf0
triton_poi_fused_clone_0[grid(4, 16)](buf1, buf8, 4, 16, XBLOCK=16,
YBLOCK=4, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 1), 0)
del buf1
extern_kernels.bmm(buf7, reinterpret_tensor(buf8, (16, 4, 1), (4, 1,
0), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
triton_poi_fused_cat_3[grid(128)](primals_2, buf9, buf10, 128,
XBLOCK=128, num_warps=4, num_stages=1)
buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0)
del buf9
extern_kernels.addmm(primals_7, reinterpret_tensor(buf10, (16, 8),
(8, 1), 0), reinterpret_tensor(primals_6, (8, 4), (1, 8), 0),
alpha=1, beta=1, out=buf11)
del primals_7
buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_native_layer_norm_4[grid(16)](buf11, primals_2,
buf12, buf13, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_5[grid(64)](buf11, primals_2,
buf12, buf13, primals_8, primals_9, buf14, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf12
del buf13
del primals_9
return buf14, buf7, primals_2, primals_8, reinterpret_tensor(primals_1,
(16, 4), (4, 1), 0), buf7, reinterpret_tensor(buf10, (16, 8), (8, 1), 0
), buf11, primals_6, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 1), 0)
class Linear(nn.Module):
"""
Linear Module
"""
def __init__(self, in_dim, out_dim, bias=True, w_init='linear'):
"""
:param in_dim: dimension of input
:param out_dim: dimension of output
:param bias: boolean. if True, bias is included.
:param w_init: str. weight inits with xavier initialization.
"""
super(Linear, self).__init__()
self.linear_layer = nn.Linear(in_dim, out_dim, bias=bias)
nn.init.xavier_uniform_(self.linear_layer.weight, gain=nn.init.
calculate_gain(w_init))
def forward(self, x):
return self.linear_layer(x)
class MultiheadAttention(nn.Module):
"""
Multihead attention mechanism (dot attention)
"""
def __init__(self, num_hidden_k):
"""
:param num_hidden_k: dimension of hidden
"""
super(MultiheadAttention, self).__init__()
self.num_hidden_k = num_hidden_k
self.attn_dropout = nn.Dropout(p=0.1)
def forward(self, key, value, query, mask=None, query_mask=None):
attn = t.bmm(query, key.transpose(1, 2))
attn = attn / math.sqrt(self.num_hidden_k)
if mask is not None:
attn = attn.masked_fill(mask, -2 ** 32 + 1)
attn = t.softmax(attn, dim=-1)
else:
attn = t.softmax(attn, dim=-1)
if query_mask is not None:
attn = attn * query_mask
attn = self.attn_dropout(attn)
result = t.bmm(attn, value)
return result, attn
class AttentionNew(nn.Module):
"""
Attention Network
"""
def __init__(self, num_hidden, h=4):
"""
:param num_hidden: dimension of hidden
:param h: num of heads
"""
super(AttentionNew, self).__init__()
self.num_hidden = num_hidden
self.num_hidden_per_attn = num_hidden // h
self.h = h
self.key = Linear(num_hidden, num_hidden, bias=False)
self.value = Linear(num_hidden, num_hidden, bias=False)
self.query = Linear(num_hidden, num_hidden, bias=False)
self.multihead = MultiheadAttention(self.num_hidden_per_attn)
self.residual_dropout = nn.Dropout(p=0.1)
self.final_linear = Linear(num_hidden * 2, num_hidden)
self.layer_norm_1 = nn.LayerNorm(num_hidden)
def forward(self, input_0, input_1):
primals_3 = self.key.linear_layer.weight
primals_4 = self.value.linear_layer.weight
primals_5 = self.query.linear_layer.weight
primals_6 = self.final_linear.linear_layer.weight
primals_7 = self.final_linear.linear_layer.bias
primals_8 = self.layer_norm_1.weight
primals_9 = self.layer_norm_1.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0], output[1]
|
yhgon/Transformer-TTS
|
Attention
| false
| 13,146
|
[
"MIT"
] | 0
|
5f34945cb5500d484275700c4e393ed125d5e753
|
https://github.com/yhgon/Transformer-TTS/tree/5f34945cb5500d484275700c4e393ed125d5e753
|
SelfAttentionLayer
|
import torch
from torch import nn
from torch.nn import functional as F
class SelfAttentionLayer(nn.Module):
def __init__(self, dim, da, alpha=0.2, dropout=0.5):
super(SelfAttentionLayer, self).__init__()
self.dim = dim
self.da = da
self.alpha = alpha
self.dropout = dropout
self.a = nn.Parameter(torch.zeros(size=(self.dim, self.da)))
self.b = nn.Parameter(torch.zeros(size=(self.da, 1)))
nn.init.xavier_uniform_(self.a.data, gain=1.414)
nn.init.xavier_uniform_(self.b.data, gain=1.414)
def forward(self, h):
h.shape[0]
e = torch.matmul(torch.tanh(torch.matmul(h, self.a)), self.b).squeeze(
dim=1)
attention = F.softmax(e)
return torch.matmul(attention, h)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'dim': 4, 'da': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = triton_helpers.max2(tmp1, 1)[:, None]
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.sum(tmp6, 1)[:, None]
tmp9 = tmp5 / tmp8
tl.store(out_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp9, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, primals_2, out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(16)](buf1, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf1, primals_3, out=buf2)
buf5 = empty_strided_cuda((4,), (1,), torch.float32)
triton_per_fused__softmax_1[grid(1)](buf2, buf5, 1, 4, XBLOCK=1,
num_warps=2, num_stages=1)
buf6 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf5, (1, 4), (0, 1), 0),
primals_1, out=buf6)
del buf5
return reinterpret_tensor(buf6, (4,), (1,), 0
), buf1, buf2, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), reinterpret_tensor(primals_3, (1, 4), (1, 1), 0)
class SelfAttentionLayerNew(nn.Module):
def __init__(self, dim, da, alpha=0.2, dropout=0.5):
super(SelfAttentionLayerNew, self).__init__()
self.dim = dim
self.da = da
self.alpha = alpha
self.dropout = dropout
self.a = nn.Parameter(torch.zeros(size=(self.dim, self.da)))
self.b = nn.Parameter(torch.zeros(size=(self.da, 1)))
nn.init.xavier_uniform_(self.a.data, gain=1.414)
nn.init.xavier_uniform_(self.b.data, gain=1.414)
def forward(self, input_0):
primals_1 = self.a
primals_3 = self.b
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
yuka1369/KBRD
|
SelfAttentionLayer
| false
| 13,147
|
[
"MIT"
] | 0
|
fc0f723c448299f00eef6daabff675640a930c26
|
https://github.com/yuka1369/KBRD/tree/fc0f723c448299f00eef6daabff675640a930c26
|
CRF
|
import torch
import torch.nn as nn
class CRF(nn.Module):
"""
Implements Conditional Random Fields that can be trained via
backpropagation.
"""
def __init__(self, num_tags):
super(CRF, self).__init__()
self.num_tags = num_tags
self.transitions = nn.Parameter(torch.Tensor(num_tags, num_tags))
self.start_transitions = nn.Parameter(torch.randn(num_tags))
self.stop_transitions = nn.Parameter(torch.randn(num_tags))
nn.init.xavier_normal_(self.transitions)
def forward(self, feats):
if len(feats.shape) != 3:
raise ValueError('feats must be 3-d got {}-d'.format(feats.shape))
return self._viterbi(feats)
def loss(self, feats, tags):
"""
Computes negative log likelihood between features and tags.
Essentially difference between individual sequence scores and
sum of all possible sequence scores (partition function)
Parameters:
feats: Input features [batch size, sequence length, number of tags]
tags: Target tag indices [batch size, sequence length]. Should be between
0 and num_tags
Returns:
Negative log likelihood [a scalar]
"""
if len(feats.shape) != 3:
raise ValueError('feats must be 3-d got {}-d'.format(feats.shape))
if len(tags.shape) != 2:
raise ValueError('tags must be 2-d but got {}-d'.format(tags.shape)
)
if feats.shape[:2] != tags.shape:
raise ValueError(
'First two dimensions of feats and tags must match')
sequence_score = self._sequence_score(feats, tags)
partition_function = self._partition_function(feats)
log_probability = sequence_score - partition_function
return -log_probability.mean()
def _sequence_score(self, feats, tags):
"""
Parameters:
feats: Input features [batch size, sequence length, number of tags]
tags: Target tag indices [batch size, sequence length]. Should be between
0 and num_tags
Returns: Sequence score of shape [batch size]
"""
feats.shape[0]
feat_score = feats.gather(2, tags.unsqueeze(-1)).squeeze(-1).sum(dim=-1
)
tags_pairs = tags.unfold(1, 2, 1)
indices = tags_pairs.permute(2, 0, 1).chunk(2)
trans_score = self.transitions[indices].squeeze(0).sum(dim=-1)
start_score = self.start_transitions[tags[:, 0]]
stop_score = self.stop_transitions[tags[:, -1]]
return feat_score + start_score + trans_score + stop_score
def _partition_function(self, feats):
"""
Computes the partitition function for CRF using the forward algorithm.
Basically calculate scores for all possible tag sequences for
the given feature vector sequence
Parameters:
feats: Input features [batch size, sequence length, number of tags]
Returns:
Total scores of shape [batch size]
"""
_, seq_size, num_tags = feats.shape
if self.num_tags != num_tags:
raise ValueError('num_tags should be {} but got {}'.format(self
.num_tags, num_tags))
a = feats[:, 0] + self.start_transitions.unsqueeze(0)
transitions = self.transitions.unsqueeze(0)
for i in range(1, seq_size):
feat = feats[:, i].unsqueeze(1)
a = self._log_sum_exp(a.unsqueeze(-1) + transitions + feat, 1)
return self._log_sum_exp(a + self.stop_transitions.unsqueeze(0), 1)
def _viterbi(self, feats):
"""
Uses Viterbi algorithm to predict the best sequence
Parameters:
feats: Input features [batch size, sequence length, number of tags]
Returns: Best tag sequence [batch size, sequence length]
"""
_, seq_size, num_tags = feats.shape
if self.num_tags != num_tags:
raise ValueError('num_tags should be {} but got {}'.format(self
.num_tags, num_tags))
v = feats[:, 0] + self.start_transitions.unsqueeze(0)
transitions = self.transitions.unsqueeze(0)
paths = []
for i in range(1, seq_size):
feat = feats[:, i]
v, idx = (v.unsqueeze(-1) + transitions).max(1)
paths.append(idx)
v = v + feat
v, tag = (v + self.stop_transitions.unsqueeze(0)).max(1, True)
tags = [tag]
for idx in reversed(paths):
tag = idx.gather(1, tag)
tags.append(tag)
tags.reverse()
return torch.cat(tags, 1)
def _log_sum_exp(self, logits, dim):
"""
Computes log-sum-exp in a stable way
"""
max_val, _ = logits.max(dim)
return max_val + (logits - max_val.unsqueeze(dim)).exp().sum(dim).log()
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'num_tags': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_max_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp7 = tl.load(in_ptr1 + 1)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp10 = tl.load(in_ptr2 + (4 + x0), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (2 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr1 + 2)
tmp15 = tl.broadcast_to(tmp14, [XBLOCK])
tmp17 = tl.load(in_ptr2 + (8 + x0), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr0 + (3 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr1 + 3)
tmp22 = tl.broadcast_to(tmp21, [XBLOCK])
tmp24 = tl.load(in_ptr2 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp0 + tmp2
tmp5 = tmp3 + tmp4
tmp9 = tmp6 + tmp8
tmp11 = tmp9 + tmp10
tmp12 = triton_helpers.maximum(tmp5, tmp11)
tmp16 = tmp13 + tmp15
tmp18 = tmp16 + tmp17
tmp19 = triton_helpers.maximum(tmp12, tmp18)
tmp23 = tmp20 + tmp22
tmp25 = tmp23 + tmp24
tmp26 = triton_helpers.maximum(tmp19, tmp25)
tmp27 = tmp5 > tmp11
tmp28 = tmp5 == tmp11
tmp29 = tmp5 != tmp5
tmp30 = tmp11 != tmp11
tmp31 = tmp29 > tmp30
tmp32 = tmp27 | tmp31
tmp33 = tmp29 & tmp30
tmp34 = tmp28 | tmp33
tmp35 = tl.full([1], 0, tl.int64)
tmp36 = tl.full([1], 1, tl.int64)
tmp37 = tmp35 < tmp36
tmp38 = tmp34 & tmp37
tmp39 = tmp32 | tmp38
tmp40 = tl.where(tmp39, tmp5, tmp11)
tmp41 = tl.where(tmp39, tmp35, tmp36)
tmp42 = tmp40 > tmp18
tmp43 = tmp40 == tmp18
tmp44 = tmp40 != tmp40
tmp45 = tmp18 != tmp18
tmp46 = tmp44 > tmp45
tmp47 = tmp42 | tmp46
tmp48 = tmp44 & tmp45
tmp49 = tmp43 | tmp48
tmp50 = tl.full([1], 2, tl.int64)
tmp51 = tmp41 < tmp50
tmp52 = tmp49 & tmp51
tmp53 = tmp47 | tmp52
tmp54 = tl.where(tmp53, tmp40, tmp18)
tmp55 = tl.where(tmp53, tmp41, tmp50)
tmp56 = tmp54 > tmp25
tmp57 = tmp54 == tmp25
tmp58 = tmp54 != tmp54
tmp59 = tmp25 != tmp25
tmp60 = tmp58 > tmp59
tmp61 = tmp56 | tmp60
tmp62 = tmp58 & tmp59
tmp63 = tmp57 | tmp62
tmp64 = tl.full([1], 3, tl.int64)
tmp65 = tmp55 < tmp64
tmp66 = tmp63 & tmp65
tmp67 = tmp61 | tmp66
tl.where(tmp67, tmp54, tmp25)
tmp69 = tl.where(tmp67, tmp55, tmp64)
tl.store(out_ptr0 + x2, tmp26, xmask)
tl.store(out_ptr1 + x2, tmp69, xmask)
@triton.jit
def triton_poi_fused_add_max_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (5 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr2 + (4 + x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (6 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr2 + (8 + x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr1 + (7 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr2 + (12 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp7 = tmp5 + tmp6
tmp9 = tmp7 + tmp8
tmp10 = triton_helpers.maximum(tmp4, tmp9)
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = triton_helpers.maximum(tmp10, tmp15)
tmp19 = tmp17 + tmp18
tmp21 = tmp19 + tmp20
tmp22 = triton_helpers.maximum(tmp16, tmp21)
tmp23 = tmp4 > tmp9
tmp24 = tmp4 == tmp9
tmp25 = tmp4 != tmp4
tmp26 = tmp9 != tmp9
tmp27 = tmp25 > tmp26
tmp28 = tmp23 | tmp27
tmp29 = tmp25 & tmp26
tmp30 = tmp24 | tmp29
tmp31 = tl.full([1], 0, tl.int64)
tmp32 = tl.full([1], 1, tl.int64)
tmp33 = tmp31 < tmp32
tmp34 = tmp30 & tmp33
tmp35 = tmp28 | tmp34
tmp36 = tl.where(tmp35, tmp4, tmp9)
tmp37 = tl.where(tmp35, tmp31, tmp32)
tmp38 = tmp36 > tmp15
tmp39 = tmp36 == tmp15
tmp40 = tmp36 != tmp36
tmp41 = tmp15 != tmp15
tmp42 = tmp40 > tmp41
tmp43 = tmp38 | tmp42
tmp44 = tmp40 & tmp41
tmp45 = tmp39 | tmp44
tmp46 = tl.full([1], 2, tl.int64)
tmp47 = tmp37 < tmp46
tmp48 = tmp45 & tmp47
tmp49 = tmp43 | tmp48
tmp50 = tl.where(tmp49, tmp36, tmp15)
tmp51 = tl.where(tmp49, tmp37, tmp46)
tmp52 = tmp50 > tmp21
tmp53 = tmp50 == tmp21
tmp54 = tmp50 != tmp50
tmp55 = tmp21 != tmp21
tmp56 = tmp54 > tmp55
tmp57 = tmp52 | tmp56
tmp58 = tmp54 & tmp55
tmp59 = tmp53 | tmp58
tmp60 = tl.full([1], 3, tl.int64)
tmp61 = tmp51 < tmp60
tmp62 = tmp59 & tmp61
tmp63 = tmp57 | tmp62
tl.where(tmp63, tmp50, tmp21)
tmp65 = tl.where(tmp63, tmp51, tmp60)
tl.store(out_ptr0 + x2, tmp22, xmask)
tl.store(out_ptr1 + x2, tmp65, xmask)
@triton.jit
def triton_poi_fused_add_max_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (8 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (9 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr2 + (4 + x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (10 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr2 + (8 + x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr1 + (11 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr2 + (12 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp7 = tmp5 + tmp6
tmp9 = tmp7 + tmp8
tmp10 = triton_helpers.maximum(tmp4, tmp9)
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = triton_helpers.maximum(tmp10, tmp15)
tmp19 = tmp17 + tmp18
tmp21 = tmp19 + tmp20
tmp22 = triton_helpers.maximum(tmp16, tmp21)
tmp23 = tmp4 > tmp9
tmp24 = tmp4 == tmp9
tmp25 = tmp4 != tmp4
tmp26 = tmp9 != tmp9
tmp27 = tmp25 > tmp26
tmp28 = tmp23 | tmp27
tmp29 = tmp25 & tmp26
tmp30 = tmp24 | tmp29
tmp31 = tl.full([1], 0, tl.int64)
tmp32 = tl.full([1], 1, tl.int64)
tmp33 = tmp31 < tmp32
tmp34 = tmp30 & tmp33
tmp35 = tmp28 | tmp34
tmp36 = tl.where(tmp35, tmp4, tmp9)
tmp37 = tl.where(tmp35, tmp31, tmp32)
tmp38 = tmp36 > tmp15
tmp39 = tmp36 == tmp15
tmp40 = tmp36 != tmp36
tmp41 = tmp15 != tmp15
tmp42 = tmp40 > tmp41
tmp43 = tmp38 | tmp42
tmp44 = tmp40 & tmp41
tmp45 = tmp39 | tmp44
tmp46 = tl.full([1], 2, tl.int64)
tmp47 = tmp37 < tmp46
tmp48 = tmp45 & tmp47
tmp49 = tmp43 | tmp48
tmp50 = tl.where(tmp49, tmp36, tmp15)
tmp51 = tl.where(tmp49, tmp37, tmp46)
tmp52 = tmp50 > tmp21
tmp53 = tmp50 == tmp21
tmp54 = tmp50 != tmp50
tmp55 = tmp21 != tmp21
tmp56 = tmp54 > tmp55
tmp57 = tmp52 | tmp56
tmp58 = tmp54 & tmp55
tmp59 = tmp53 | tmp58
tmp60 = tl.full([1], 3, tl.int64)
tmp61 = tmp51 < tmp60
tmp62 = tmp59 & tmp61
tmp63 = tmp57 | tmp62
tl.where(tmp63, tmp50, tmp21)
tmp65 = tl.where(tmp63, tmp51, tmp60)
tl.store(out_ptr0 + x2, tmp22, xmask)
tl.store(out_ptr1 + x2, tmp65, xmask)
@triton.jit
def triton_poi_fused_add_gather_max_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, in_ptr5, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr2 + 1)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK])
tmp27 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp28 = tl.load(in_ptr1 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp30 = tl.load(in_ptr2 + 2)
tmp31 = tl.broadcast_to(tmp30, [XBLOCK])
tmp47 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp48 = tl.load(in_ptr1 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp50 = tl.load(in_ptr2 + 3)
tmp51 = tl.broadcast_to(tmp50, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp5 = tmp2 + tmp4
tmp8 = tmp6 + tmp7
tmp11 = tmp8 + tmp10
tmp12 = tmp5 > tmp11
tmp13 = tmp5 == tmp11
tmp14 = tmp5 != tmp5
tmp15 = tmp11 != tmp11
tmp16 = tmp14 > tmp15
tmp17 = tmp12 | tmp16
tmp18 = tmp14 & tmp15
tmp19 = tmp13 | tmp18
tmp20 = tl.full([1], 0, tl.int64)
tmp21 = tl.full([1], 1, tl.int64)
tmp22 = tmp20 < tmp21
tmp23 = tmp19 & tmp22
tmp24 = tmp17 | tmp23
tmp25 = tl.where(tmp24, tmp5, tmp11)
tmp26 = tl.where(tmp24, tmp20, tmp21)
tmp29 = tmp27 + tmp28
tmp32 = tmp29 + tmp31
tmp33 = tmp25 > tmp32
tmp34 = tmp25 == tmp32
tmp35 = tmp25 != tmp25
tmp36 = tmp32 != tmp32
tmp37 = tmp35 > tmp36
tmp38 = tmp33 | tmp37
tmp39 = tmp35 & tmp36
tmp40 = tmp34 | tmp39
tmp41 = tl.full([1], 2, tl.int64)
tmp42 = tmp26 < tmp41
tmp43 = tmp40 & tmp42
tmp44 = tmp38 | tmp43
tmp45 = tl.where(tmp44, tmp25, tmp32)
tmp46 = tl.where(tmp44, tmp26, tmp41)
tmp49 = tmp47 + tmp48
tmp52 = tmp49 + tmp51
tmp53 = tmp45 > tmp52
tmp54 = tmp45 == tmp52
tmp55 = tmp45 != tmp45
tmp56 = tmp52 != tmp52
tmp57 = tmp55 > tmp56
tmp58 = tmp53 | tmp57
tmp59 = tmp55 & tmp56
tmp60 = tmp54 | tmp59
tmp61 = tl.full([1], 3, tl.int64)
tmp62 = tmp46 < tmp61
tmp63 = tmp60 & tmp62
tmp64 = tmp58 | tmp63
tl.where(tmp64, tmp45, tmp52)
tmp66 = tl.where(tmp64, tmp46, tmp61)
tmp67 = tl.full([XBLOCK], 4, tl.int32)
tmp68 = tmp66 + tmp67
tmp69 = tmp66 < 0
tmp70 = tl.where(tmp69, tmp68, tmp66)
tl.device_assert((0 <= tmp70) & (tmp70 < 4) | ~xmask,
'index out of bounds: 0 <= tmp70 < 4')
tmp72 = tl.load(in_ptr3 + (tmp70 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp73 = tmp72 + tmp67
tmp74 = tmp72 < 0
tmp75 = tl.where(tmp74, tmp73, tmp72)
tl.device_assert((0 <= tmp75) & (tmp75 < 4) | ~xmask,
'index out of bounds: 0 <= tmp75 < 4')
tmp77 = tl.load(in_ptr4 + (tmp75 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp78 = tmp77 + tmp67
tmp79 = tmp77 < 0
tmp80 = tl.where(tmp79, tmp78, tmp77)
tl.device_assert((0 <= tmp80) & (tmp80 < 4) | ~xmask,
'index out of bounds: 0 <= tmp80 < 4')
tmp82 = tl.load(in_ptr5 + (tmp80 + 4 * x0), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + 4 * x0, tmp66, xmask)
tl.store(out_ptr1 + 4 * x0, tmp82, xmask)
tl.store(out_ptr2 + 4 * x0, tmp77, xmask)
tl.store(out_ptr3 + 4 * x0, tmp72, xmask)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4,), (1,))
assert_size_stride(arg2_1, (4, 4), (4, 1))
assert_size_stride(arg3_1, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
get_raw_stream(0)
triton_poi_fused_add_max_0[grid(16)](arg0_1, arg1_1, arg2_1, buf0,
buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
triton_poi_fused_add_max_1[grid(16)](buf0, arg0_1, arg2_1, buf2,
buf3, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf4 = buf0
del buf0
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
triton_poi_fused_add_max_2[grid(16)](buf2, arg0_1, arg2_1, buf4,
buf5, 16, XBLOCK=16, num_warps=1, num_stages=1)
del arg2_1
del buf2
buf10 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
buf6 = reinterpret_tensor(buf10, (4, 1), (4, 1), 3)
buf7 = reinterpret_tensor(buf10, (4, 1), (4, 1), 0)
buf8 = reinterpret_tensor(buf10, (4, 1), (4, 1), 1)
buf9 = reinterpret_tensor(buf10, (4, 1), (4, 1), 2)
triton_poi_fused_add_gather_max_3[grid(4)](buf4, arg0_1, arg3_1,
buf5, buf3, buf1, buf6, buf7, buf8, buf9, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del arg0_1
del arg3_1
del buf1
del buf3
del buf4
del buf5
return buf10,
class CRFNew(nn.Module):
"""
Implements Conditional Random Fields that can be trained via
backpropagation.
"""
def __init__(self, num_tags):
super(CRFNew, self).__init__()
self.num_tags = num_tags
self.transitions = nn.Parameter(torch.Tensor(num_tags, num_tags))
self.start_transitions = nn.Parameter(torch.randn(num_tags))
self.stop_transitions = nn.Parameter(torch.randn(num_tags))
nn.init.xavier_normal_(self.transitions)
def loss(self, feats, tags):
"""
Computes negative log likelihood between features and tags.
Essentially difference between individual sequence scores and
sum of all possible sequence scores (partition function)
Parameters:
feats: Input features [batch size, sequence length, number of tags]
tags: Target tag indices [batch size, sequence length]. Should be between
0 and num_tags
Returns:
Negative log likelihood [a scalar]
"""
if len(feats.shape) != 3:
raise ValueError('feats must be 3-d got {}-d'.format(feats.shape))
if len(tags.shape) != 2:
raise ValueError('tags must be 2-d but got {}-d'.format(tags.shape)
)
if feats.shape[:2] != tags.shape:
raise ValueError(
'First two dimensions of feats and tags must match')
sequence_score = self._sequence_score(feats, tags)
partition_function = self._partition_function(feats)
log_probability = sequence_score - partition_function
return -log_probability.mean()
def _sequence_score(self, feats, tags):
"""
Parameters:
feats: Input features [batch size, sequence length, number of tags]
tags: Target tag indices [batch size, sequence length]. Should be between
0 and num_tags
Returns: Sequence score of shape [batch size]
"""
feats.shape[0]
feat_score = feats.gather(2, tags.unsqueeze(-1)).squeeze(-1).sum(dim=-1
)
tags_pairs = tags.unfold(1, 2, 1)
indices = tags_pairs.permute(2, 0, 1).chunk(2)
trans_score = self.transitions[indices].squeeze(0).sum(dim=-1)
start_score = self.start_transitions[tags[:, 0]]
stop_score = self.stop_transitions[tags[:, -1]]
return feat_score + start_score + trans_score + stop_score
def _partition_function(self, feats):
"""
Computes the partitition function for CRF using the forward algorithm.
Basically calculate scores for all possible tag sequences for
the given feature vector sequence
Parameters:
feats: Input features [batch size, sequence length, number of tags]
Returns:
Total scores of shape [batch size]
"""
_, seq_size, num_tags = feats.shape
if self.num_tags != num_tags:
raise ValueError('num_tags should be {} but got {}'.format(self
.num_tags, num_tags))
a = feats[:, 0] + self.start_transitions.unsqueeze(0)
transitions = self.transitions.unsqueeze(0)
for i in range(1, seq_size):
feat = feats[:, i].unsqueeze(1)
a = self._log_sum_exp(a.unsqueeze(-1) + transitions + feat, 1)
return self._log_sum_exp(a + self.stop_transitions.unsqueeze(0), 1)
def _viterbi(self, feats):
"""
Uses Viterbi algorithm to predict the best sequence
Parameters:
feats: Input features [batch size, sequence length, number of tags]
Returns: Best tag sequence [batch size, sequence length]
"""
_, seq_size, num_tags = feats.shape
if self.num_tags != num_tags:
raise ValueError('num_tags should be {} but got {}'.format(self
.num_tags, num_tags))
v = feats[:, 0] + self.start_transitions.unsqueeze(0)
transitions = self.transitions.unsqueeze(0)
paths = []
for i in range(1, seq_size):
feat = feats[:, i]
v, idx = (v.unsqueeze(-1) + transitions).max(1)
paths.append(idx)
v = v + feat
v, tag = (v + self.stop_transitions.unsqueeze(0)).max(1, True)
tags = [tag]
for idx in reversed(paths):
tag = idx.gather(1, tag)
tags.append(tag)
tags.reverse()
return torch.cat(tags, 1)
def _log_sum_exp(self, logits, dim):
"""
Computes log-sum-exp in a stable way
"""
max_val, _ = logits.max(dim)
return max_val + (logits - max_val.unsqueeze(dim)).exp().sum(dim).log()
def forward(self, input_0):
arg2_1 = self.transitions
arg1_1 = self.start_transitions
arg3_1 = self.stop_transitions
arg0_1 = input_0
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0]
|
yezhengli-Mr9/torchnlp
|
CRF
| false
| 13,148
|
[
"Apache-2.0"
] | 0
|
0f2ad6d149a413da9f03c6f6694c429746de6551
|
https://github.com/yezhengli-Mr9/torchnlp/tree/0f2ad6d149a413da9f03c6f6694c429746de6551
|
ScaledDotAttention
|
import torch
import torch.nn as nn
from torch.nn import LayerNorm
def scaled_dot_attention(q, k, v, mask=None, noise=0, dropout=lambda x: x):
"""
:param q: queries, (batch, time1, channels1)
:param k: keys, (batch, time2, channels1)
:param v: values, (batch, time2, channels2)
:param mask: boolean mask, (batch, time1, time2)
:param dropout: a dropout function - this allows keeping dropout as a module -> better control when training/eval
:return: (batch, time1, channels2), (batch, time1, time2)
"""
weights = torch.matmul(q, k.transpose(2, 1))
if mask is not None:
weights = weights.masked_fill(~mask, float('-inf'))
if noise:
weights += noise * torch.randn(weights.shape)
weights = torch.softmax(weights, dim=-1)
weights = dropout(weights)
result = torch.matmul(weights, v)
return result, weights
def mask(x, lengths, dim=-1):
assert dim != 0, 'Masking not available for batch dimension'
assert len(lengths) == x.shape[0
], 'Lengths must contain as many elements as there are items in the batch'
lengths = torch.as_tensor(lengths)
to_expand = [1] * (x.ndim - 1) + [-1]
mask = torch.arange(x.shape[dim]).expand(to_expand).transpose(dim, -1
).expand(x.shape)
mask = mask < lengths.expand(to_expand).transpose(0, -1)
return mask
class Conv1d(nn.Conv1d):
"""A wrapper around nn.Conv1d, that works on (batch, time, channels)"""
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
dilation=1, groups=1, bias=True, padding=0):
super(Conv1d, self).__init__(in_channels=in_channels, out_channels=
out_channels, kernel_size=kernel_size, stride=stride, dilation=
dilation, groups=groups, bias=bias, padding=padding)
def forward(self, x):
return super().forward(x.transpose(2, 1)).transpose(2, 1)
class ScaledDotAttention(nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels, noise=0,
normalize=False, dropout=False):
super(ScaledDotAttention, self).__init__()
self.noise = noise
self.dropout = torch.nn.Dropout(p=dropout)
self.normalize = normalize
self.fc_query = Conv1d(in_channels, hidden_channels)
self.fc_keys = Conv1d(in_channels, hidden_channels)
if normalize:
self.qnorm = LayerNorm(in_channels)
self.knorm = LayerNorm(in_channels)
self.fc_keys.weight = torch.nn.Parameter(self.fc_query.weight.clone())
self.fc_keys.bias = torch.nn.Parameter(self.fc_query.bias.clone())
self.fc_values = Conv1d(in_channels, hidden_channels)
self.fc_out = Conv1d(hidden_channels, out_channels)
def forward(self, q, k, v, mask=None):
"""
:param q: queries, (batch, time1, channels1)
:param k: keys, (batch, time2, channels1)
:param v: values, (batch, time2, channels2)
:param mask: boolean mask, (batch, time1, time2)
:return: (batch, time1, channels2), (batch, time1, time2)
"""
noise = self.noise if self.training else 0
if self.normalize:
q = self.qnorm(q)
k = self.knorm(k)
alignment, weights = scaled_dot_attention(self.fc_query(q), self.
fc_keys(k), self.fc_values(v), mask, noise=noise, dropout=self.
dropout)
alignment = self.fc_out(alignment)
return alignment, weights
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'in_channels': 4, 'hidden_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
from torch.nn import LayerNorm
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_8, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4), (16, 4, 1))
buf2 = buf0
del buf0
triton_poi_fused_convolution_0[grid(16, 4)](primals_4, buf2, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf3 = extern_kernels.convolution(buf2, primals_5, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4), (16, 4, 1))
buf4 = buf2
del buf2
triton_poi_fused_convolution_0[grid(16, 4)](primals_7, buf4, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf5 = extern_kernels.convolution(buf4, primals_8, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 4), (16, 4, 1))
buf6 = buf1
del buf1
triton_poi_fused_convolution_1[grid(64)](buf6, primals_3, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
buf7 = buf3
del buf3
triton_poi_fused_convolution_1[grid(64)](buf7, primals_6, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_6
buf8 = buf4
del buf4
extern_kernels.bmm(reinterpret_tensor(buf6, (4, 4, 4), (16, 1, 4),
0), buf7, out=buf8)
buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(64)](buf8, buf9, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf10 = buf8
del buf8
triton_poi_fused__softmax_3[grid(64)](buf9, buf10, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf11 = buf5
del buf5
triton_poi_fused_convolution_1[grid(64)](buf11, primals_9, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_9
buf12 = buf9
del buf9
extern_kernels.bmm(buf10, reinterpret_tensor(buf11, (4, 4, 4), (16,
1, 4), 0), out=buf12)
buf13 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_convolution_0[grid(16, 4)](buf12, buf13, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf14 = extern_kernels.convolution(buf13, primals_10, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf14, (4, 4, 4), (16, 4, 1))
del buf13
buf15 = buf14
del buf14
triton_poi_fused_convolution_1[grid(64)](buf15, primals_11, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_11
return reinterpret_tensor(buf15, (4, 4, 4), (16, 1, 4), 0
), buf10, primals_2, primals_5, primals_8, primals_10, reinterpret_tensor(
primals_1, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(primals_4,
(4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(primals_7, (4, 4, 4),
(16, 1, 4), 0), buf10, reinterpret_tensor(buf12, (4, 4, 4), (16, 1,
4), 0), buf11, buf6, reinterpret_tensor(buf7, (4, 4, 4), (16, 1, 4), 0)
def scaled_dot_attention(q, k, v, mask=None, noise=0, dropout=lambda x: x):
"""
:param q: queries, (batch, time1, channels1)
:param k: keys, (batch, time2, channels1)
:param v: values, (batch, time2, channels2)
:param mask: boolean mask, (batch, time1, time2)
:param dropout: a dropout function - this allows keeping dropout as a module -> better control when training/eval
:return: (batch, time1, channels2), (batch, time1, time2)
"""
weights = torch.matmul(q, k.transpose(2, 1))
if mask is not None:
weights = weights.masked_fill(~mask, float('-inf'))
if noise:
weights += noise * torch.randn(weights.shape)
weights = torch.softmax(weights, dim=-1)
weights = dropout(weights)
result = torch.matmul(weights, v)
return result, weights
def mask(x, lengths, dim=-1):
assert dim != 0, 'Masking not available for batch dimension'
assert len(lengths) == x.shape[0
], 'Lengths must contain as many elements as there are items in the batch'
lengths = torch.as_tensor(lengths)
to_expand = [1] * (x.ndim - 1) + [-1]
mask = torch.arange(x.shape[dim]).expand(to_expand).transpose(dim, -1
).expand(x.shape)
mask = mask < lengths.expand(to_expand).transpose(0, -1)
return mask
class Conv1d(nn.Conv1d):
"""A wrapper around nn.Conv1d, that works on (batch, time, channels)"""
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
dilation=1, groups=1, bias=True, padding=0):
super(Conv1d, self).__init__(in_channels=in_channels, out_channels=
out_channels, kernel_size=kernel_size, stride=stride, dilation=
dilation, groups=groups, bias=bias, padding=padding)
def forward(self, x):
return super().forward(x.transpose(2, 1)).transpose(2, 1)
class ScaledDotAttentionNew(nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels, noise=0,
normalize=False, dropout=False):
super(ScaledDotAttentionNew, self).__init__()
self.noise = noise
self.dropout = torch.nn.Dropout(p=dropout)
self.normalize = normalize
self.fc_query = Conv1d(in_channels, hidden_channels)
self.fc_keys = Conv1d(in_channels, hidden_channels)
if normalize:
self.qnorm = LayerNorm(in_channels)
self.knorm = LayerNorm(in_channels)
self.fc_keys.weight = torch.nn.Parameter(self.fc_query.weight.clone())
self.fc_keys.bias = torch.nn.Parameter(self.fc_query.bias.clone())
self.fc_values = Conv1d(in_channels, hidden_channels)
self.fc_out = Conv1d(hidden_channels, out_channels)
def forward(self, input_0, input_1, input_2):
primals_2 = self.fc_query.weight
primals_3 = self.fc_query.bias
primals_5 = self.fc_keys.weight
primals_6 = self.fc_keys.bias
primals_8 = self.fc_values.weight
primals_9 = self.fc_values.bias
primals_10 = self.fc_out.weight
primals_11 = self.fc_out.bias
primals_1 = input_0
primals_4 = input_1
primals_7 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0], output[1]
|
yhgon/speedyspeech
|
ScaledDotAttention
| false
| 13,149
|
[
"BSD-3-Clause"
] | 0
|
574c6a94091431f313e2aae8e154b8c80e6908ce
|
https://github.com/yhgon/speedyspeech/tree/574c6a94091431f313e2aae8e154b8c80e6908ce
|
CoPredictor
|
import torch
import torch.autograd
import torch.nn as nn
class Biaffine(nn.Module):
def __init__(self, n_in, n_out=1, bias_x=True, bias_y=True):
super(Biaffine, self).__init__()
self.n_in = n_in
self.n_out = n_out
self.bias_x = bias_x
self.bias_y = bias_y
weight = torch.zeros((n_out, n_in + int(bias_x), n_in + int(bias_y)))
nn.init.xavier_normal_(weight)
self.weight = nn.Parameter(weight, requires_grad=True)
def extra_repr(self):
s = f'n_in={self.n_in}, n_out={self.n_out}'
if self.bias_x:
s += f', bias_x={self.bias_x}'
if self.bias_y:
s += f', bias_y={self.bias_y}'
return s
def forward(self, x, y):
if self.bias_x:
x = torch.cat((x, torch.ones_like(x[..., :1])), -1)
if self.bias_y:
y = torch.cat((y, torch.ones_like(y[..., :1])), -1)
s = torch.einsum('bxi,oij,byj->boxy', x, self.weight, y)
s = s.permute(0, 2, 3, 1)
return s
class MLP(nn.Module):
def __init__(self, n_in, n_out, dropout=0):
super().__init__()
self.linear = nn.Linear(n_in, n_out)
self.activation = nn.GELU()
self.dropout = nn.Dropout(dropout)
def forward(self, x):
x = self.dropout(x)
x = self.linear(x)
x = self.activation(x)
return x
class CoPredictor(nn.Module):
def __init__(self, cls_num, hid_size, biaffine_size, channels,
ffnn_hid_size, dropout=0):
super().__init__()
self.mlp1 = MLP(n_in=hid_size, n_out=biaffine_size, dropout=dropout)
self.mlp2 = MLP(n_in=hid_size, n_out=biaffine_size, dropout=dropout)
self.biaffine = Biaffine(n_in=biaffine_size, n_out=cls_num, bias_x=
True, bias_y=True)
self.mlp_rel = MLP(channels, ffnn_hid_size, dropout=dropout)
self.linear = nn.Linear(ffnn_hid_size, cls_num)
self.dropout = nn.Dropout(dropout)
def forward(self, x, y, z):
ent_sub = self.dropout(self.mlp1(x))
ent_obj = self.dropout(self.mlp2(y))
o1 = self.biaffine(ent_sub, ent_obj)
z = self.dropout(self.mlp_rel(z))
o2 = self.linear(z)
return o1 + o2
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'cls_num': 4, 'hid_size': 4, 'biaffine_size': 4,
'channels': 4, 'ffnn_hid_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.autograd
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = xindex // 5
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = 0.5
tmp7 = tmp5 * tmp6
tmp8 = 0.7071067811865476
tmp9 = tmp5 * tmp8
tmp10 = libdevice.erf(tmp9)
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp7 * tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp4, tmp13, tmp14)
tmp16 = tmp0 >= tmp3
tl.full([1], 5, tl.int64)
tmp19 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp20 = tl.where(tmp16, tmp11, tmp19)
tmp21 = tl.where(tmp4, tmp15, tmp20)
tl.store(out_ptr0 + x2, tmp21, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 100
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = xindex // 5 % 4
x2 = xindex // 20
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 5 * x2 + 25 * x1), xmask)
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 20
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex % 4
x3 = xindex // 4
y0 = yindex % 5
y1 = yindex // 5
x5 = xindex
y4 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 5 * x3 + 20 * x2 + 80 * y1), xmask &
ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x5 + 16 * y4), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_gelu_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
x5 = xindex // 4 % 16
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + (x5 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x4, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 5, 5), (25, 5, 1))
assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (16,
4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, reinterpret_tensor(primals_4, (16,
4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_5
del primals_6
buf2 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(80)](buf0, buf2, 80, XBLOCK=128,
num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((5, 4, 1, 5, 1, 1), (20, 5, 5, 1, 1, 1),
torch.float32)
triton_poi_fused_clone_1[grid(100)](primals_7, buf3, 100, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_7
buf4 = empty_strided_cuda((1, 16, 20), (320, 20, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf2, (1, 16, 5), (0, 5, 1),
0), reinterpret_tensor(buf3, (1, 5, 20), (0, 20, 1), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32)
triton_poi_fused_cat_0[grid(80)](buf1, buf5, 80, XBLOCK=128,
num_warps=4, num_stages=1)
buf6 = empty_strided_cuda((4, 5, 4, 4, 1, 1), (80, 16, 4, 1, 1, 1),
torch.float32)
triton_poi_fused_clone_2[grid(20, 16)](buf4, buf6, 20, 16, XBLOCK=
16, YBLOCK=32, num_warps=4, num_stages=1)
del buf4
buf7 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
extern_kernels.bmm(buf5, reinterpret_tensor(buf6, (4, 5, 16), (80,
16, 1), 0), out=buf7)
buf8 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_10, reinterpret_tensor(primals_8, (16,
4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf8)
del primals_10
del primals_9
buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_gelu_3[grid(64)](buf8, buf9, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf10 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf9, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf10)
buf11 = reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 1, 16, 4), 0)
del buf7
triton_poi_fused_add_4[grid(256)](buf11, buf10, primals_12, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del buf10
del primals_12
return buf11, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), buf0, reinterpret_tensor(primals_4, (16, 4), (4, 1), 0
), buf1, reinterpret_tensor(primals_8, (16, 4), (4, 1), 0
), buf8, reinterpret_tensor(buf9, (16, 4), (4, 1), 0
), primals_11, reinterpret_tensor(buf5, (4, 5, 4), (20, 1, 5), 0
), reinterpret_tensor(buf6, (4, 16, 5), (80, 1, 16), 0
), reinterpret_tensor(buf2, (1, 5, 16), (80, 1, 5), 0
), reinterpret_tensor(buf3, (1, 20, 5), (100, 1, 20), 0)
class Biaffine(nn.Module):
def __init__(self, n_in, n_out=1, bias_x=True, bias_y=True):
super(Biaffine, self).__init__()
self.n_in = n_in
self.n_out = n_out
self.bias_x = bias_x
self.bias_y = bias_y
weight = torch.zeros((n_out, n_in + int(bias_x), n_in + int(bias_y)))
nn.init.xavier_normal_(weight)
self.weight = nn.Parameter(weight, requires_grad=True)
def extra_repr(self):
s = f'n_in={self.n_in}, n_out={self.n_out}'
if self.bias_x:
s += f', bias_x={self.bias_x}'
if self.bias_y:
s += f', bias_y={self.bias_y}'
return s
def forward(self, x, y):
if self.bias_x:
x = torch.cat((x, torch.ones_like(x[..., :1])), -1)
if self.bias_y:
y = torch.cat((y, torch.ones_like(y[..., :1])), -1)
s = torch.einsum('bxi,oij,byj->boxy', x, self.weight, y)
s = s.permute(0, 2, 3, 1)
return s
class MLP(nn.Module):
def __init__(self, n_in, n_out, dropout=0):
super().__init__()
self.linear = nn.Linear(n_in, n_out)
self.activation = nn.GELU()
self.dropout = nn.Dropout(dropout)
def forward(self, x):
x = self.dropout(x)
x = self.linear(x)
x = self.activation(x)
return x
class CoPredictorNew(nn.Module):
def __init__(self, cls_num, hid_size, biaffine_size, channels,
ffnn_hid_size, dropout=0):
super().__init__()
self.mlp1 = MLP(n_in=hid_size, n_out=biaffine_size, dropout=dropout)
self.mlp2 = MLP(n_in=hid_size, n_out=biaffine_size, dropout=dropout)
self.biaffine = Biaffine(n_in=biaffine_size, n_out=cls_num, bias_x=
True, bias_y=True)
self.mlp_rel = MLP(channels, ffnn_hid_size, dropout=dropout)
self.linear = nn.Linear(ffnn_hid_size, cls_num)
self.dropout = nn.Dropout(dropout)
def forward(self, input_0, input_1, input_2):
primals_2 = self.mlp1.linear.weight
primals_3 = self.mlp1.linear.bias
primals_5 = self.mlp2.linear.weight
primals_6 = self.mlp2.linear.bias
primals_7 = self.biaffine.weight
primals_9 = self.mlp_rel.linear.weight
primals_10 = self.mlp_rel.linear.bias
primals_11 = self.linear.weight
primals_12 = self.linear.bias
primals_1 = input_0
primals_4 = input_1
primals_8 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
|
yifding/W2NER
|
CoPredictor
| false
| 13,150
|
[
"MIT"
] | 0
|
d13128e45f3930a8b8faa794318939dc90a75974
|
https://github.com/yifding/W2NER/tree/d13128e45f3930a8b8faa794318939dc90a75974
|
CRFOutputLayer
|
import torch
import torch.nn as nn
class CRF(nn.Module):
"""
Implements Conditional Random Fields that can be trained via
backpropagation.
"""
def __init__(self, num_tags):
super(CRF, self).__init__()
self.num_tags = num_tags
self.transitions = nn.Parameter(torch.Tensor(num_tags, num_tags))
self.start_transitions = nn.Parameter(torch.randn(num_tags))
self.stop_transitions = nn.Parameter(torch.randn(num_tags))
nn.init.xavier_normal_(self.transitions)
def forward(self, feats):
if len(feats.shape) != 3:
raise ValueError('feats must be 3-d got {}-d'.format(feats.shape))
return self._viterbi(feats)
def loss(self, feats, tags):
"""
Computes negative log likelihood between features and tags.
Essentially difference between individual sequence scores and
sum of all possible sequence scores (partition function)
Parameters:
feats: Input features [batch size, sequence length, number of tags]
tags: Target tag indices [batch size, sequence length]. Should be between
0 and num_tags
Returns:
Negative log likelihood [a scalar]
"""
if len(feats.shape) != 3:
raise ValueError('feats must be 3-d got {}-d'.format(feats.shape))
if len(tags.shape) != 2:
raise ValueError('tags must be 2-d but got {}-d'.format(tags.shape)
)
if feats.shape[:2] != tags.shape:
raise ValueError(
'First two dimensions of feats and tags must match')
sequence_score = self._sequence_score(feats, tags)
partition_function = self._partition_function(feats)
log_probability = sequence_score - partition_function
return -log_probability.mean()
def _sequence_score(self, feats, tags):
"""
Parameters:
feats: Input features [batch size, sequence length, number of tags]
tags: Target tag indices [batch size, sequence length]. Should be between
0 and num_tags
Returns: Sequence score of shape [batch size]
"""
feats.shape[0]
feat_score = feats.gather(2, tags.unsqueeze(-1)).squeeze(-1).sum(dim=-1
)
tags_pairs = tags.unfold(1, 2, 1)
indices = tags_pairs.permute(2, 0, 1).chunk(2)
trans_score = self.transitions[indices].squeeze(0).sum(dim=-1)
start_score = self.start_transitions[tags[:, 0]]
stop_score = self.stop_transitions[tags[:, -1]]
return feat_score + start_score + trans_score + stop_score
def _partition_function(self, feats):
"""
Computes the partitition function for CRF using the forward algorithm.
Basically calculate scores for all possible tag sequences for
the given feature vector sequence
Parameters:
feats: Input features [batch size, sequence length, number of tags]
Returns:
Total scores of shape [batch size]
"""
_, seq_size, num_tags = feats.shape
if self.num_tags != num_tags:
raise ValueError('num_tags should be {} but got {}'.format(self
.num_tags, num_tags))
a = feats[:, 0] + self.start_transitions.unsqueeze(0)
transitions = self.transitions.unsqueeze(0)
for i in range(1, seq_size):
feat = feats[:, i].unsqueeze(1)
a = self._log_sum_exp(a.unsqueeze(-1) + transitions + feat, 1)
return self._log_sum_exp(a + self.stop_transitions.unsqueeze(0), 1)
def _viterbi(self, feats):
"""
Uses Viterbi algorithm to predict the best sequence
Parameters:
feats: Input features [batch size, sequence length, number of tags]
Returns: Best tag sequence [batch size, sequence length]
"""
_, seq_size, num_tags = feats.shape
if self.num_tags != num_tags:
raise ValueError('num_tags should be {} but got {}'.format(self
.num_tags, num_tags))
v = feats[:, 0] + self.start_transitions.unsqueeze(0)
transitions = self.transitions.unsqueeze(0)
paths = []
for i in range(1, seq_size):
feat = feats[:, i]
v, idx = (v.unsqueeze(-1) + transitions).max(1)
paths.append(idx)
v = v + feat
v, tag = (v + self.stop_transitions.unsqueeze(0)).max(1, True)
tags = [tag]
for idx in reversed(paths):
tag = idx.gather(1, tag)
tags.append(tag)
tags.reverse()
return torch.cat(tags, 1)
def _log_sum_exp(self, logits, dim):
"""
Computes log-sum-exp in a stable way
"""
max_val, _ = logits.max(dim)
return max_val + (logits - max_val.unsqueeze(dim)).exp().sum(dim).log()
class OutputLayer(nn.Module):
"""
Abstract base class for output layer.
Handles projection to output labels
"""
def __init__(self, hidden_size, output_size):
super(OutputLayer, self).__init__()
self.output_size = output_size
self.output_projection = nn.Linear(hidden_size, output_size)
def loss(self, hidden, labels):
raise NotImplementedError('Must implement {}.loss'.format(self.
__class__.__name__))
class CRFOutputLayer(OutputLayer):
"""
Implements a CRF based output layer
"""
def __init__(self, hidden_size, output_size):
super(CRFOutputLayer, self).__init__(hidden_size, output_size)
self.crf = CRF(output_size)
def forward(self, hidden):
feats = self.output_projection(hidden)
return self.crf(feats)
def loss(self, hidden, labels):
feats = self.output_projection(hidden)
return self.crf.loss(feats, labels)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4, 'output_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_max_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (1 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr1 + 1)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp13 = tl.load(in_ptr2 + 1)
tmp14 = tl.broadcast_to(tmp13, [XBLOCK])
tmp16 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (2 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr1 + 2)
tmp21 = tl.broadcast_to(tmp20, [XBLOCK])
tmp23 = tl.load(in_ptr2 + 2)
tmp24 = tl.broadcast_to(tmp23, [XBLOCK])
tmp26 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (3 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp30 = tl.load(in_ptr1 + 3)
tmp31 = tl.broadcast_to(tmp30, [XBLOCK])
tmp33 = tl.load(in_ptr2 + 3)
tmp34 = tl.broadcast_to(tmp33, [XBLOCK])
tmp36 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp0 + tmp2
tmp6 = tmp3 + tmp5
tmp8 = tmp6 + tmp7
tmp12 = tmp9 + tmp11
tmp15 = tmp12 + tmp14
tmp17 = tmp15 + tmp16
tmp18 = triton_helpers.maximum(tmp8, tmp17)
tmp22 = tmp19 + tmp21
tmp25 = tmp22 + tmp24
tmp27 = tmp25 + tmp26
tmp28 = triton_helpers.maximum(tmp18, tmp27)
tmp32 = tmp29 + tmp31
tmp35 = tmp32 + tmp34
tmp37 = tmp35 + tmp36
tmp38 = triton_helpers.maximum(tmp28, tmp37)
tmp39 = tmp8 > tmp17
tmp40 = tmp8 == tmp17
tmp41 = tmp8 != tmp8
tmp42 = tmp17 != tmp17
tmp43 = tmp41 > tmp42
tmp44 = tmp39 | tmp43
tmp45 = tmp41 & tmp42
tmp46 = tmp40 | tmp45
tmp47 = tl.full([1], 0, tl.int64)
tmp48 = tl.full([1], 1, tl.int64)
tmp49 = tmp47 < tmp48
tmp50 = tmp46 & tmp49
tmp51 = tmp44 | tmp50
tmp52 = tl.where(tmp51, tmp8, tmp17)
tmp53 = tl.where(tmp51, tmp47, tmp48)
tmp54 = tmp52 > tmp27
tmp55 = tmp52 == tmp27
tmp56 = tmp52 != tmp52
tmp57 = tmp27 != tmp27
tmp58 = tmp56 > tmp57
tmp59 = tmp54 | tmp58
tmp60 = tmp56 & tmp57
tmp61 = tmp55 | tmp60
tmp62 = tl.full([1], 2, tl.int64)
tmp63 = tmp53 < tmp62
tmp64 = tmp61 & tmp63
tmp65 = tmp59 | tmp64
tmp66 = tl.where(tmp65, tmp52, tmp27)
tmp67 = tl.where(tmp65, tmp53, tmp62)
tmp68 = tmp66 > tmp37
tmp69 = tmp66 == tmp37
tmp70 = tmp66 != tmp66
tmp71 = tmp37 != tmp37
tmp72 = tmp70 > tmp71
tmp73 = tmp68 | tmp72
tmp74 = tmp70 & tmp71
tmp75 = tmp69 | tmp74
tmp76 = tl.full([1], 3, tl.int64)
tmp77 = tmp67 < tmp76
tmp78 = tmp75 & tmp77
tmp79 = tmp73 | tmp78
tl.where(tmp79, tmp66, tmp37)
tmp81 = tl.where(tmp79, tmp67, tmp76)
tl.store(out_ptr0 + x2, tmp38, xmask)
tl.store(out_ptr1 + x2, tmp81, xmask)
@triton.jit
def triton_poi_fused_add_max_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (5 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr2 + 1)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp14 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr1 + (6 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr2 + 2)
tmp20 = tl.broadcast_to(tmp19, [XBLOCK])
tmp23 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp27 = tl.load(in_ptr1 + (7 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr2 + 3)
tmp29 = tl.broadcast_to(tmp28, [XBLOCK])
tmp32 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last')
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp7 = tmp5 + tmp6
tmp12 = tmp9 + tmp11
tmp13 = tmp8 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = triton_helpers.maximum(tmp7, tmp15)
tmp21 = tmp18 + tmp20
tmp22 = tmp17 + tmp21
tmp24 = tmp22 + tmp23
tmp25 = triton_helpers.maximum(tmp16, tmp24)
tmp30 = tmp27 + tmp29
tmp31 = tmp26 + tmp30
tmp33 = tmp31 + tmp32
tmp34 = triton_helpers.maximum(tmp25, tmp33)
tmp35 = tmp7 > tmp15
tmp36 = tmp7 == tmp15
tmp37 = tmp7 != tmp7
tmp38 = tmp15 != tmp15
tmp39 = tmp37 > tmp38
tmp40 = tmp35 | tmp39
tmp41 = tmp37 & tmp38
tmp42 = tmp36 | tmp41
tmp43 = tl.full([1], 0, tl.int64)
tmp44 = tl.full([1], 1, tl.int64)
tmp45 = tmp43 < tmp44
tmp46 = tmp42 & tmp45
tmp47 = tmp40 | tmp46
tmp48 = tl.where(tmp47, tmp7, tmp15)
tmp49 = tl.where(tmp47, tmp43, tmp44)
tmp50 = tmp48 > tmp24
tmp51 = tmp48 == tmp24
tmp52 = tmp48 != tmp48
tmp53 = tmp24 != tmp24
tmp54 = tmp52 > tmp53
tmp55 = tmp50 | tmp54
tmp56 = tmp52 & tmp53
tmp57 = tmp51 | tmp56
tmp58 = tl.full([1], 2, tl.int64)
tmp59 = tmp49 < tmp58
tmp60 = tmp57 & tmp59
tmp61 = tmp55 | tmp60
tmp62 = tl.where(tmp61, tmp48, tmp24)
tmp63 = tl.where(tmp61, tmp49, tmp58)
tmp64 = tmp62 > tmp33
tmp65 = tmp62 == tmp33
tmp66 = tmp62 != tmp62
tmp67 = tmp33 != tmp33
tmp68 = tmp66 > tmp67
tmp69 = tmp64 | tmp68
tmp70 = tmp66 & tmp67
tmp71 = tmp65 | tmp70
tmp72 = tl.full([1], 3, tl.int64)
tmp73 = tmp63 < tmp72
tmp74 = tmp71 & tmp73
tmp75 = tmp69 | tmp74
tl.where(tmp75, tmp62, tmp33)
tmp77 = tl.where(tmp75, tmp63, tmp72)
tl.store(out_ptr0 + x2, tmp34, xmask)
tl.store(out_ptr1 + x2, tmp77, xmask)
@triton.jit
def triton_poi_fused_add_max_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (8 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (9 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr2 + 1)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp14 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr1 + (10 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr2 + 2)
tmp20 = tl.broadcast_to(tmp19, [XBLOCK])
tmp23 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp27 = tl.load(in_ptr1 + (11 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr2 + 3)
tmp29 = tl.broadcast_to(tmp28, [XBLOCK])
tmp32 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last')
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp7 = tmp5 + tmp6
tmp12 = tmp9 + tmp11
tmp13 = tmp8 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = triton_helpers.maximum(tmp7, tmp15)
tmp21 = tmp18 + tmp20
tmp22 = tmp17 + tmp21
tmp24 = tmp22 + tmp23
tmp25 = triton_helpers.maximum(tmp16, tmp24)
tmp30 = tmp27 + tmp29
tmp31 = tmp26 + tmp30
tmp33 = tmp31 + tmp32
tmp34 = triton_helpers.maximum(tmp25, tmp33)
tmp35 = tmp7 > tmp15
tmp36 = tmp7 == tmp15
tmp37 = tmp7 != tmp7
tmp38 = tmp15 != tmp15
tmp39 = tmp37 > tmp38
tmp40 = tmp35 | tmp39
tmp41 = tmp37 & tmp38
tmp42 = tmp36 | tmp41
tmp43 = tl.full([1], 0, tl.int64)
tmp44 = tl.full([1], 1, tl.int64)
tmp45 = tmp43 < tmp44
tmp46 = tmp42 & tmp45
tmp47 = tmp40 | tmp46
tmp48 = tl.where(tmp47, tmp7, tmp15)
tmp49 = tl.where(tmp47, tmp43, tmp44)
tmp50 = tmp48 > tmp24
tmp51 = tmp48 == tmp24
tmp52 = tmp48 != tmp48
tmp53 = tmp24 != tmp24
tmp54 = tmp52 > tmp53
tmp55 = tmp50 | tmp54
tmp56 = tmp52 & tmp53
tmp57 = tmp51 | tmp56
tmp58 = tl.full([1], 2, tl.int64)
tmp59 = tmp49 < tmp58
tmp60 = tmp57 & tmp59
tmp61 = tmp55 | tmp60
tmp62 = tl.where(tmp61, tmp48, tmp24)
tmp63 = tl.where(tmp61, tmp49, tmp58)
tmp64 = tmp62 > tmp33
tmp65 = tmp62 == tmp33
tmp66 = tmp62 != tmp62
tmp67 = tmp33 != tmp33
tmp68 = tmp66 > tmp67
tmp69 = tmp64 | tmp68
tmp70 = tmp66 & tmp67
tmp71 = tmp65 | tmp70
tmp72 = tl.full([1], 3, tl.int64)
tmp73 = tmp63 < tmp72
tmp74 = tmp71 & tmp73
tmp75 = tmp69 | tmp74
tl.where(tmp75, tmp62, tmp33)
tmp77 = tl.where(tmp75, tmp63, tmp72)
tl.store(out_ptr0 + x2, tmp34, xmask)
tl.store(out_ptr1 + x2, tmp77, xmask)
@triton.jit
def triton_poi_fused_add_gather_max_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, in_ptr5, in_ptr6, out_ptr0, out_ptr1, out_ptr2, out_ptr3,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr3 + 0)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp9 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr2 + 1)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp15 = tl.load(in_ptr3 + 1)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp33 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp34 = tl.load(in_ptr1 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp35 = tl.load(in_ptr2 + 2)
tmp36 = tl.broadcast_to(tmp35, [XBLOCK])
tmp39 = tl.load(in_ptr3 + 2)
tmp40 = tl.broadcast_to(tmp39, [XBLOCK])
tmp56 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp57 = tl.load(in_ptr1 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp58 = tl.load(in_ptr2 + 3)
tmp59 = tl.broadcast_to(tmp58, [XBLOCK])
tmp62 = tl.load(in_ptr3 + 3)
tmp63 = tl.broadcast_to(tmp62, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp8 = tmp5 + tmp7
tmp13 = tmp10 + tmp12
tmp14 = tmp9 + tmp13
tmp17 = tmp14 + tmp16
tmp18 = tmp8 > tmp17
tmp19 = tmp8 == tmp17
tmp20 = tmp8 != tmp8
tmp21 = tmp17 != tmp17
tmp22 = tmp20 > tmp21
tmp23 = tmp18 | tmp22
tmp24 = tmp20 & tmp21
tmp25 = tmp19 | tmp24
tmp26 = tl.full([1], 0, tl.int64)
tmp27 = tl.full([1], 1, tl.int64)
tmp28 = tmp26 < tmp27
tmp29 = tmp25 & tmp28
tmp30 = tmp23 | tmp29
tmp31 = tl.where(tmp30, tmp8, tmp17)
tmp32 = tl.where(tmp30, tmp26, tmp27)
tmp37 = tmp34 + tmp36
tmp38 = tmp33 + tmp37
tmp41 = tmp38 + tmp40
tmp42 = tmp31 > tmp41
tmp43 = tmp31 == tmp41
tmp44 = tmp31 != tmp31
tmp45 = tmp41 != tmp41
tmp46 = tmp44 > tmp45
tmp47 = tmp42 | tmp46
tmp48 = tmp44 & tmp45
tmp49 = tmp43 | tmp48
tmp50 = tl.full([1], 2, tl.int64)
tmp51 = tmp32 < tmp50
tmp52 = tmp49 & tmp51
tmp53 = tmp47 | tmp52
tmp54 = tl.where(tmp53, tmp31, tmp41)
tmp55 = tl.where(tmp53, tmp32, tmp50)
tmp60 = tmp57 + tmp59
tmp61 = tmp56 + tmp60
tmp64 = tmp61 + tmp63
tmp65 = tmp54 > tmp64
tmp66 = tmp54 == tmp64
tmp67 = tmp54 != tmp54
tmp68 = tmp64 != tmp64
tmp69 = tmp67 > tmp68
tmp70 = tmp65 | tmp69
tmp71 = tmp67 & tmp68
tmp72 = tmp66 | tmp71
tmp73 = tl.full([1], 3, tl.int64)
tmp74 = tmp55 < tmp73
tmp75 = tmp72 & tmp74
tmp76 = tmp70 | tmp75
tl.where(tmp76, tmp54, tmp64)
tmp78 = tl.where(tmp76, tmp55, tmp73)
tmp79 = tl.full([XBLOCK], 4, tl.int32)
tmp80 = tmp78 + tmp79
tmp81 = tmp78 < 0
tmp82 = tl.where(tmp81, tmp80, tmp78)
tl.device_assert((0 <= tmp82) & (tmp82 < 4) | ~xmask,
'index out of bounds: 0 <= tmp82 < 4')
tmp84 = tl.load(in_ptr4 + (tmp82 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp85 = tmp84 + tmp79
tmp86 = tmp84 < 0
tmp87 = tl.where(tmp86, tmp85, tmp84)
tl.device_assert((0 <= tmp87) & (tmp87 < 4) | ~xmask,
'index out of bounds: 0 <= tmp87 < 4')
tmp89 = tl.load(in_ptr5 + (tmp87 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp90 = tmp89 + tmp79
tmp91 = tmp89 < 0
tmp92 = tl.where(tmp91, tmp90, tmp89)
tl.device_assert((0 <= tmp92) & (tmp92 < 4) | ~xmask,
'index out of bounds: 0 <= tmp92 < 4')
tmp94 = tl.load(in_ptr6 + (tmp92 + 4 * x0), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + 4 * x0, tmp78, xmask)
tl.store(out_ptr1 + 4 * x0, tmp94, xmask)
tl.store(out_ptr2 + 4 * x0, tmp89, xmask)
tl.store(out_ptr3 + 4 * x0, tmp84, xmask)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4,), (1,))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg3_1, (4,), (1,))
assert_size_stride(arg4_1, (4, 4), (4, 1))
assert_size_stride(arg5_1, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(arg2_1, (16, 4), (4, 1), 0),
reinterpret_tensor(arg0_1, (4, 4), (1, 4), 0), out=buf0)
del arg0_1
del arg2_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
get_raw_stream(0)
triton_poi_fused_add_max_0[grid(16)](buf0, arg1_1, arg3_1, arg4_1,
buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1)
del arg3_1
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
triton_poi_fused_add_max_1[grid(16)](buf1, buf0, arg1_1, arg4_1,
buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf5 = buf1
del buf1
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
triton_poi_fused_add_max_2[grid(16)](buf3, buf0, arg1_1, arg4_1,
buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1)
del arg4_1
del buf3
buf11 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
buf7 = reinterpret_tensor(buf11, (4, 1), (4, 1), 3)
buf8 = reinterpret_tensor(buf11, (4, 1), (4, 1), 0)
buf9 = reinterpret_tensor(buf11, (4, 1), (4, 1), 1)
buf10 = reinterpret_tensor(buf11, (4, 1), (4, 1), 2)
triton_poi_fused_add_gather_max_3[grid(4)](buf5, buf0, arg1_1,
arg5_1, buf6, buf4, buf2, buf7, buf8, buf9, buf10, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del arg1_1
del arg5_1
del buf0
del buf2
del buf4
del buf5
del buf6
return buf11,
class CRF(nn.Module):
"""
Implements Conditional Random Fields that can be trained via
backpropagation.
"""
def __init__(self, num_tags):
super(CRF, self).__init__()
self.num_tags = num_tags
self.transitions = nn.Parameter(torch.Tensor(num_tags, num_tags))
self.start_transitions = nn.Parameter(torch.randn(num_tags))
self.stop_transitions = nn.Parameter(torch.randn(num_tags))
nn.init.xavier_normal_(self.transitions)
def forward(self, feats):
if len(feats.shape) != 3:
raise ValueError('feats must be 3-d got {}-d'.format(feats.shape))
return self._viterbi(feats)
def loss(self, feats, tags):
"""
Computes negative log likelihood between features and tags.
Essentially difference between individual sequence scores and
sum of all possible sequence scores (partition function)
Parameters:
feats: Input features [batch size, sequence length, number of tags]
tags: Target tag indices [batch size, sequence length]. Should be between
0 and num_tags
Returns:
Negative log likelihood [a scalar]
"""
if len(feats.shape) != 3:
raise ValueError('feats must be 3-d got {}-d'.format(feats.shape))
if len(tags.shape) != 2:
raise ValueError('tags must be 2-d but got {}-d'.format(tags.shape)
)
if feats.shape[:2] != tags.shape:
raise ValueError(
'First two dimensions of feats and tags must match')
sequence_score = self._sequence_score(feats, tags)
partition_function = self._partition_function(feats)
log_probability = sequence_score - partition_function
return -log_probability.mean()
def _sequence_score(self, feats, tags):
"""
Parameters:
feats: Input features [batch size, sequence length, number of tags]
tags: Target tag indices [batch size, sequence length]. Should be between
0 and num_tags
Returns: Sequence score of shape [batch size]
"""
feats.shape[0]
feat_score = feats.gather(2, tags.unsqueeze(-1)).squeeze(-1).sum(dim=-1
)
tags_pairs = tags.unfold(1, 2, 1)
indices = tags_pairs.permute(2, 0, 1).chunk(2)
trans_score = self.transitions[indices].squeeze(0).sum(dim=-1)
start_score = self.start_transitions[tags[:, 0]]
stop_score = self.stop_transitions[tags[:, -1]]
return feat_score + start_score + trans_score + stop_score
def _partition_function(self, feats):
"""
Computes the partitition function for CRF using the forward algorithm.
Basically calculate scores for all possible tag sequences for
the given feature vector sequence
Parameters:
feats: Input features [batch size, sequence length, number of tags]
Returns:
Total scores of shape [batch size]
"""
_, seq_size, num_tags = feats.shape
if self.num_tags != num_tags:
raise ValueError('num_tags should be {} but got {}'.format(self
.num_tags, num_tags))
a = feats[:, 0] + self.start_transitions.unsqueeze(0)
transitions = self.transitions.unsqueeze(0)
for i in range(1, seq_size):
feat = feats[:, i].unsqueeze(1)
a = self._log_sum_exp(a.unsqueeze(-1) + transitions + feat, 1)
return self._log_sum_exp(a + self.stop_transitions.unsqueeze(0), 1)
def _viterbi(self, feats):
"""
Uses Viterbi algorithm to predict the best sequence
Parameters:
feats: Input features [batch size, sequence length, number of tags]
Returns: Best tag sequence [batch size, sequence length]
"""
_, seq_size, num_tags = feats.shape
if self.num_tags != num_tags:
raise ValueError('num_tags should be {} but got {}'.format(self
.num_tags, num_tags))
v = feats[:, 0] + self.start_transitions.unsqueeze(0)
transitions = self.transitions.unsqueeze(0)
paths = []
for i in range(1, seq_size):
feat = feats[:, i]
v, idx = (v.unsqueeze(-1) + transitions).max(1)
paths.append(idx)
v = v + feat
v, tag = (v + self.stop_transitions.unsqueeze(0)).max(1, True)
tags = [tag]
for idx in reversed(paths):
tag = idx.gather(1, tag)
tags.append(tag)
tags.reverse()
return torch.cat(tags, 1)
def _log_sum_exp(self, logits, dim):
"""
Computes log-sum-exp in a stable way
"""
max_val, _ = logits.max(dim)
return max_val + (logits - max_val.unsqueeze(dim)).exp().sum(dim).log()
class OutputLayer(nn.Module):
"""
Abstract base class for output layer.
Handles projection to output labels
"""
def __init__(self, hidden_size, output_size):
super(OutputLayer, self).__init__()
self.output_size = output_size
self.output_projection = nn.Linear(hidden_size, output_size)
def loss(self, hidden, labels):
raise NotImplementedError('Must implement {}.loss'.format(self.
__class__.__name__))
class CRFOutputLayerNew(OutputLayer):
"""
Implements a CRF based output layer
"""
def __init__(self, hidden_size, output_size):
super(CRFOutputLayerNew, self).__init__(hidden_size, output_size)
self.crf = CRF(output_size)
def loss(self, hidden, labels):
feats = self.output_projection(hidden)
return self.crf.loss(feats, labels)
def forward(self, input_0):
arg0_1 = self.output_projection.weight
arg1_1 = self.output_projection.bias
arg4_1 = self.crf.transitions
arg3_1 = self.crf.start_transitions
arg5_1 = self.crf.stop_transitions
arg2_1 = input_0
output = call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1])
return output[0]
|
yezhengli-Mr9/torchnlp
|
CRFOutputLayer
| false
| 13,151
|
[
"Apache-2.0"
] | 0
|
0f2ad6d149a413da9f03c6f6694c429746de6551
|
https://github.com/yezhengli-Mr9/torchnlp/tree/0f2ad6d149a413da9f03c6f6694c429746de6551
|
CondConv2D
|
import functools
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.modules.conv import _ConvNd
from torch.nn.modules.utils import _pair
from torch.nn.parameter import Parameter
class _routing(nn.Module):
def __init__(self, in_channels, num_experts, dropout_rate):
super(_routing, self).__init__()
self.dropout = nn.Dropout(dropout_rate)
self.fc = nn.Linear(in_channels, num_experts)
def forward(self, x):
x = torch.flatten(x)
x = self.dropout(x)
x = self.fc(x)
return F.sigmoid(x)
class CondConv2D(_ConvNd):
"""Learn specialized convolutional kernels for each example.
As described in the paper
`CondConv: Conditionally Parameterized Convolutions for Efficient Inference`_ ,
conditionally parameterized convolutions (CondConv),
which challenge the paradigm of static convolutional kernels
by computing convolutional kernels as a function of the input.
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
padding_mode (string, optional): ``'zeros'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. Default: ``'zeros'``
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
num_experts (int): Number of experts per layer
Shape:
- Input: :math:`(N, C_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C_{out}, H_{out}, W_{out})` where
.. math::
H_{out} = \\left\\lfloor\\frac{H_{in} + 2 \\times \\text{padding}[0] - \\text{dilation}[0]
\\times (\\text{kernel\\_size}[0] - 1) - 1}{\\text{stride}[0]} + 1\\right\\rfloor
.. math::
W_{out} = \\left\\lfloor\\frac{W_{in} + 2 \\times \\text{padding}[1] - \\text{dilation}[1]
\\times (\\text{kernel\\_size}[1] - 1) - 1}{\\text{stride}[1]} + 1\\right\\rfloor
Attributes:
weight (Tensor): the learnable weights of the module of shape
:math:`(\\text{out\\_channels}, \\frac{\\text{in\\_channels}}{\\text{groups}},`
:math:`\\text{kernel\\_size[0]}, \\text{kernel\\_size[1]})`.
The values of these weights are sampled from
:math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})` where
:math:`k = \\frac{groups}{C_\\text{in} * \\prod_{i=0}^{1}\\text{kernel\\_size}[i]}`
bias (Tensor): the learnable bias of the module of shape (out_channels). If :attr:`bias` is ``True``,
then the values of these weights are
sampled from :math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})` where
:math:`k = \\frac{groups}{C_\\text{in} * \\prod_{i=0}^{1}\\text{kernel\\_size}[i]}`
.. _CondConv: Conditionally Parameterized Convolutions for Efficient Inference:
https://arxiv.org/abs/1904.04971
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros',
num_experts=3, dropout_rate=0.2):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(CondConv2D, self).__init__(in_channels, out_channels,
kernel_size, stride, padding, dilation, False, _pair(0), groups,
bias, padding_mode)
self._avg_pooling = functools.partial(F.adaptive_avg_pool2d,
output_size=(1, 1))
self._routing_fn = _routing(in_channels, num_experts, dropout_rate)
self.weight = Parameter(torch.Tensor(num_experts, out_channels,
in_channels // groups, *kernel_size))
self.reset_parameters()
def _conv_forward(self, input, weight):
if self.padding_mode != 'zeros':
return F.conv2d(F.pad(input, self._padding_repeated_twice, mode
=self.padding_mode), weight, self.bias, self.stride, _pair(
0), self.dilation, self.groups)
return F.conv2d(input, weight, self.bias, self.stride, self.padding,
self.dilation, self.groups)
def forward(self, input):
pooled_inputs = self._avg_pooling(input)
routing_weights = self._routing_fn(pooled_inputs)
kernels = torch.sum(routing_weights[:, None, None, None, None] *
self.weight, 0)
return self._conv_forward(input, kernels)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import functools
from torch import nn
import torch.nn.functional as F
from torch.nn.modules.conv import _ConvNd
from torch.nn.modules.utils import _pair
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp3 = tl.load(in_ptr1 + x0, xmask)
tmp5 = tl.load(in_ptr0 + 1)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp8 = tl.load(in_ptr1 + (256 + x0), xmask)
tmp11 = tl.load(in_ptr0 + 2)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp14 = tl.load(in_ptr1 + (512 + x0), xmask)
tmp2 = tl.sigmoid(tmp1)
tmp4 = tmp2 * tmp3
tmp7 = tl.sigmoid(tmp6)
tmp9 = tmp7 * tmp8
tmp10 = tmp4 + tmp9
tmp13 = tl.sigmoid(tmp12)
tmp15 = tmp13 * tmp14
tmp16 = tmp10 + tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (3, 4), (4, 1))
assert_size_stride(primals_3, (3,), (1,))
assert_size_stride(primals_4, (3, 4, 4, 4, 4), (256, 64, 16, 4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 1, 1), (1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(4)](buf1, primals_1, 4, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf2 = empty_strided_cuda((1, 3), (3, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(buf1, (1, 4), (0,
1), 0), reinterpret_tensor(primals_2, (4, 3), (1, 4), 0), alpha
=1, beta=1, out=buf2)
del primals_2
del primals_3
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sum_1[grid(256)](buf2, primals_4, buf3, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1,
4, 4, 4), (64, 16, 4, 1), 0), buf3, stride=(1, 1), padding=(0,
0), dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf4, (1, 4, 1, 1), (4, 1, 1, 1))
buf5 = reinterpret_tensor(buf4, (1, 4, 1, 1), (4, 1, 4, 4), 0)
del buf4
triton_poi_fused_convolution_2[grid(4)](buf5, primals_5, 4, XBLOCK=
4, num_warps=1, num_stages=1)
del primals_5
return reinterpret_tensor(buf5, (4, 1, 1), (1, 1, 1), 0
), primals_4, reinterpret_tensor(buf1, (1, 4), (4, 1), 0
), buf2, buf3, reinterpret_tensor(primals_1, (1, 4, 4, 4), (64, 16,
4, 1), 0)
class _routing(nn.Module):
def __init__(self, in_channels, num_experts, dropout_rate):
super(_routing, self).__init__()
self.dropout = nn.Dropout(dropout_rate)
self.fc = nn.Linear(in_channels, num_experts)
def forward(self, x):
x = torch.flatten(x)
x = self.dropout(x)
x = self.fc(x)
return F.sigmoid(x)
class CondConv2DNew(_ConvNd):
"""Learn specialized convolutional kernels for each example.
As described in the paper
`CondConv: Conditionally Parameterized Convolutions for Efficient Inference`_ ,
conditionally parameterized convolutions (CondConv),
which challenge the paradigm of static convolutional kernels
by computing convolutional kernels as a function of the input.
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
padding_mode (string, optional): ``'zeros'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. Default: ``'zeros'``
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
num_experts (int): Number of experts per layer
Shape:
- Input: :math:`(N, C_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C_{out}, H_{out}, W_{out})` where
.. math::
H_{out} = \\left\\lfloor\\frac{H_{in} + 2 \\times \\text{padding}[0] - \\text{dilation}[0]
\\times (\\text{kernel\\_size}[0] - 1) - 1}{\\text{stride}[0]} + 1\\right\\rfloor
.. math::
W_{out} = \\left\\lfloor\\frac{W_{in} + 2 \\times \\text{padding}[1] - \\text{dilation}[1]
\\times (\\text{kernel\\_size}[1] - 1) - 1}{\\text{stride}[1]} + 1\\right\\rfloor
Attributes:
weight (Tensor): the learnable weights of the module of shape
:math:`(\\text{out\\_channels}, \\frac{\\text{in\\_channels}}{\\text{groups}},`
:math:`\\text{kernel\\_size[0]}, \\text{kernel\\_size[1]})`.
The values of these weights are sampled from
:math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})` where
:math:`k = \\frac{groups}{C_\\text{in} * \\prod_{i=0}^{1}\\text{kernel\\_size}[i]}`
bias (Tensor): the learnable bias of the module of shape (out_channels). If :attr:`bias` is ``True``,
then the values of these weights are
sampled from :math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})` where
:math:`k = \\frac{groups}{C_\\text{in} * \\prod_{i=0}^{1}\\text{kernel\\_size}[i]}`
.. _CondConv: Conditionally Parameterized Convolutions for Efficient Inference:
https://arxiv.org/abs/1904.04971
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros',
num_experts=3, dropout_rate=0.2):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(CondConv2DNew, self).__init__(in_channels, out_channels,
kernel_size, stride, padding, dilation, False, _pair(0), groups,
bias, padding_mode)
self._avg_pooling = functools.partial(F.adaptive_avg_pool2d,
output_size=(1, 1))
self._routing_fn = _routing(in_channels, num_experts, dropout_rate)
self.weight = Parameter(torch.Tensor(num_experts, out_channels,
in_channels // groups, *kernel_size))
self.reset_parameters()
def _conv_forward(self, input, weight):
if self.padding_mode != 'zeros':
return F.conv2d(F.pad(input, self._padding_repeated_twice, mode
=self.padding_mode), weight, self.bias, self.stride, _pair(
0), self.dilation, self.groups)
return F.conv2d(input, weight, self.bias, self.stride, self.padding,
self.dilation, self.groups)
def forward(self, input_0):
primals_4 = self.weight
primals_5 = self.bias
primals_2 = self._routing_fn.fc.weight
primals_3 = self._routing_fn.fc.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
yifanpu001/CondConv-pytorch
|
CondConv2D
| false
| 13,152
|
[
"MIT"
] | 0
|
d5198f1c53de97304f8a23f4ca287cf5b4d33561
|
https://github.com/yifanpu001/CondConv-pytorch/tree/d5198f1c53de97304f8a23f4ca287cf5b4d33561
|
Router
|
import torch
import warnings
import torch.nn as nn
class Router(nn.Module):
"""Convolution + Relu + Global Average Pooling + Sigmoid"""
def __init__(self, input_nc, input_width, input_height, kernel_size=28,
soft_decision=True, stochastic=False, **kwargs):
super(Router, self).__init__()
self.soft_decision = soft_decision
self.stochastic = stochastic
if max(input_width, input_height) < kernel_size:
warnings.warn('Router kernel too large, shrink it')
kernel_size = max(input_width, input_height)
self.conv1 = nn.Conv2d(input_nc, 1, kernel_size=kernel_size)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = self.conv1(x)
x = x.mean(dim=-1).mean(dim=-1).squeeze()
x = self.output_controller(x)
return x
def output_controller(self, x):
if self.soft_decision:
return self.sigmoid(x)
if self.stochastic:
x = self.sigmoid(x)
return ops.ST_StochasticIndicator()(x)
else:
x = self.sigmoid(x)
return ops.ST_Indicator()(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_nc': 4, 'input_width': 4, 'input_height': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import warnings
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = 1.0
tmp5 = tmp3 / tmp4
tmp6 = tmp5 / tmp4
tmp7 = tl.sigmoid(tmp6)
tl.store(in_out_ptr0 + x0, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 1, 1), (1, 1, 1, 1))
buf1 = reinterpret_tensor(buf0, (4,), (1,), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_sigmoid_0[grid(4)](buf1, primals_2, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_2
return buf1, primals_1, primals_3, buf1
class RouterNew(nn.Module):
"""Convolution + Relu + Global Average Pooling + Sigmoid"""
def __init__(self, input_nc, input_width, input_height, kernel_size=28,
soft_decision=True, stochastic=False, **kwargs):
super(RouterNew, self).__init__()
self.soft_decision = soft_decision
self.stochastic = stochastic
if max(input_width, input_height) < kernel_size:
warnings.warn('Router kernel too large, shrink it')
kernel_size = max(input_width, input_height)
self.conv1 = nn.Conv2d(input_nc, 1, kernel_size=kernel_size)
self.sigmoid = nn.Sigmoid()
def output_controller(self, x):
if self.soft_decision:
return self.sigmoid(x)
if self.stochastic:
x = self.sigmoid(x)
return ops.ST_StochasticIndicator()(x)
else:
x = self.sigmoid(x)
return ops.ST_Indicator()(x)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
yulinfeng000/AdaptiveNeuralTrees
|
Router
| false
| 13,153
|
[
"MIT"
] | 0
|
bbcb381b9cb0c91ae1af33ce43b43f352055041c
|
https://github.com/yulinfeng000/AdaptiveNeuralTrees/tree/bbcb381b9cb0c91ae1af33ce43b43f352055041c
|
ScaledDotProductAttention
|
import torch
import numpy as np
import torch.nn as nn
class ScaledDotProductAttention(nn.Module):
"""Scaled dot-product attention mechanism."""
def __init__(self, attention_dropout=0.0):
"""Init.
Args:
attention_dropout: A scalar, dropout rate.
"""
super(ScaledDotProductAttention, self).__init__()
self.dropout = nn.Dropout(attention_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, q, k, v, scale=None, attn_mask=None):
"""Forward pass.
Args:
q: Queries tensor, with shape of [B, L_q, D_q]
k: Keys tensor, with shape of [B, L_k, D_k]
v: Values tensor, with shape of [B, L_v, D_v]
scale: A scalar, scale factor.
attn_mask: A binary masking tensor, with shape of [B, L_q, L_k]
Returns:
Context and attention tensor.
"""
attention = torch.bmm(q, k.transpose(1, 2))
if scale:
attention = attention * scale
if attn_mask:
attention = attention.masked_fill_(attn_mask, -np.inf)
attention = self.softmax(attention)
attention = self.dropout(attention)
context = torch.bmm(attention, v)
return context, attention
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), (
16, 1, 4), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf0, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf2 = buf0
del buf0
triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = buf1
del buf1
extern_kernels.bmm(buf2, arg2_1, out=buf3)
del arg2_1
return buf3, buf2
class ScaledDotProductAttentionNew(nn.Module):
"""Scaled dot-product attention mechanism."""
def __init__(self, attention_dropout=0.0):
"""Init.
Args:
attention_dropout: A scalar, dropout rate.
"""
super(ScaledDotProductAttentionNew, self).__init__()
self.dropout = nn.Dropout(attention_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0], output[1]
|
yumoh/pinyin2hanzi
|
ScaledDotProductAttention
| false
| 13,154
|
[
"MIT"
] | 0
|
1cbb650d3dd3ec0a0f51be5822556634860ad612
|
https://github.com/yumoh/pinyin2hanzi/tree/1cbb650d3dd3ec0a0f51be5822556634860ad612
|
LR
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class LR(nn.Module):
""" Logistinc regression
"""
def __init__(self, input_nc, input_width, input_height, no_classes=10,
**kwargs):
super(LR, self).__init__()
self.fc = nn.Linear(input_nc * input_width * input_height, no_classes)
def forward(self, x):
x = x.view(x.size(0), -1)
return F.log_softmax(self.fc(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_nc': 4, 'input_width': 4, 'input_height': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__log_softmax_0(in_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (10, 64), (64, 1))
assert_size_stride(primals_3, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (4,
64), (64, 1), 0), reinterpret_tensor(primals_2, (64, 10), (1,
64), 0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf3 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
get_raw_stream(0)
triton_per_fused__log_softmax_0[grid(4)](buf0, buf3, 4, 10, XBLOCK=
1, num_warps=2, num_stages=1)
del buf0
return buf3, reinterpret_tensor(primals_1, (4, 64), (64, 1), 0), buf3
class LRNew(nn.Module):
""" Logistinc regression
"""
def __init__(self, input_nc, input_width, input_height, no_classes=10,
**kwargs):
super(LRNew, self).__init__()
self.fc = nn.Linear(input_nc * input_width * input_height, no_classes)
def forward(self, input_0):
primals_2 = self.fc.weight
primals_3 = self.fc.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
yulinfeng000/AdaptiveNeuralTrees
|
LR
| false
| 13,155
|
[
"MIT"
] | 0
|
bbcb381b9cb0c91ae1af33ce43b43f352055041c
|
https://github.com/yulinfeng000/AdaptiveNeuralTrees/tree/bbcb381b9cb0c91ae1af33ce43b43f352055041c
|
MySigmoidFocalLoss
|
import torch
import torch.utils.data
from torch import nn
class MySigmoidFocalLoss(nn.Module):
def __init__(self, gamma, alpha):
super().__init__()
self.gamma = gamma
self.alpha = alpha
def forward(self, confids, targets):
bias = 1e-07
num_classes = confids.shape[1]
dtype = targets.dtype
device = targets.device
gamma = self.gamma
alpha = self.alpha
class_range = torch.arange(1, num_classes + 1, dtype=dtype, device=
device).unsqueeze(0)
t = targets.unsqueeze(1)
p = confids
term1 = (1 - p) ** gamma * torch.log(p + bias)
term2 = p ** gamma * torch.log(1 - p + bias)
loss = -(t == class_range).float() * term1 * alpha - ((t !=
class_range) * (t >= 0)).float() * term2 * (1 - alpha)
return loss.sum() / confids.shape[0]
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'gamma': 4, 'alpha': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.utils.data
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused__to_copy_add_div_eq_ge_log_mul_ne_neg_pow_rsub_sub_sum_0(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r3 = rindex // 256
r5 = rindex % 64
r0 = rindex % 4
r7 = rindex % 256
tmp0 = tl.load(in_ptr0 + (r5 + 64 * r3), None, eviction_policy='evict_last'
)
tmp6 = tl.load(in_ptr1 + r7, None, eviction_policy='evict_last')
tmp1 = 1 + r0
tmp2 = tmp1.to(tl.float32)
tmp3 = tmp0 == tmp2
tmp4 = tmp3.to(tl.float32)
tmp5 = -tmp4
tmp7 = 1.0
tmp8 = tmp7 - tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp9 * tmp9
tmp11 = 1e-07
tmp12 = tmp6 + tmp11
tmp13 = tl_math.log(tmp12)
tmp14 = tmp10 * tmp13
tmp15 = tmp5 * tmp14
tmp16 = 4.0
tmp17 = tmp15 * tmp16
tmp18 = tmp0 != tmp2
tmp19 = 0.0
tmp20 = tmp0 >= tmp19
tmp21 = tmp18 & tmp20
tmp22 = tmp21.to(tl.float32)
tmp23 = tmp6 * tmp6
tmp24 = tmp23 * tmp23
tmp25 = tmp8 + tmp11
tmp26 = tl_math.log(tmp25)
tmp27 = tmp24 * tmp26
tmp28 = tmp22 * tmp27
tmp29 = -3.0
tmp30 = tmp28 * tmp29
tmp31 = tmp17 - tmp30
tmp32 = tl.broadcast_to(tmp31, [RBLOCK])
tmp34 = triton_helpers.promote_to_tensor(tl.sum(tmp32, 0))
tmp35 = 0.25
tmp36 = tmp34 * tmp35
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp36, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_per_fused__to_copy_add_div_eq_ge_log_mul_ne_neg_pow_rsub_sub_sum_0[
grid(1)](buf2, arg1_1, arg0_1, 1, 1024, num_warps=8, num_stages=1)
del arg0_1
del arg1_1
return buf2,
class MySigmoidFocalLossNew(nn.Module):
def __init__(self, gamma, alpha):
super().__init__()
self.gamma = gamma
self.alpha = alpha
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
yuruiqi/FCOS
|
MySigmoidFocalLoss
| false
| 13,156
|
[
"BSD-2-Clause"
] | 0
|
f03f984a03f4e23a0c1c8b470e401d4319e56c3f
|
https://github.com/yuruiqi/FCOS/tree/f03f984a03f4e23a0c1c8b470e401d4319e56c3f
|
SeqAttnMatch
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class SeqAttnMatch(nn.Module):
"""Given sequences X and Y, match sequence Y to each element in X.
* o_i = sum(alpha_j * y_j) for i in X
* alpha_j = softmax(y_j * x_i)
"""
def __init__(self, input_size, identity=False):
super(SeqAttnMatch, self).__init__()
if not identity:
self.linear = nn.Linear(input_size, input_size)
else:
self.linear = None
def forward(self, x, y, y_mask):
"""
Args:
x: batch * len1 * hdim
y: batch * len2 * hdim
y_mask: batch * len2 (1 for padding, 0 for true)
Output:
matched_seq: batch * len1 * hdim
"""
if self.linear:
x_proj = self.linear(x.view(-1, x.size(2))).view(x.size())
x_proj = F.relu(x_proj)
y_proj = self.linear(y.view(-1, y.size(2))).view(y.size())
y_proj = F.relu(y_proj)
else:
x_proj = x
y_proj = y
scores = x_proj.bmm(y_proj.transpose(2, 1))
y_mask = y_mask.unsqueeze(1).expand(scores.size())
scores.data.masked_fill_(y_mask.data > 0, -float('inf'))
alpha_flat = F.softmax(scores.view(-1, y.size(1)), dim=-1)
alpha = alpha_flat.view(-1, x.size(1), y.size(1))
matched_seq = alpha.bmm(y)
return matched_seq
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_out_ptr1,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_out_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp5 + tmp1
tmp7 = triton_helpers.maximum(tmp3, tmp6)
tmp8 = 0.0
tmp9 = tmp7 <= tmp8
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(in_out_ptr1 + x2, tmp7, xmask)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * (x0 // 4), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * (x0 // 4)), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + 4 * (x0 // 4)), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr0 + (3 + 4 * (x0 // 4)), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp4 = float('-inf')
tmp5 = tl.where(tmp2, tmp4, tmp3)
tmp7 = tmp6 > tmp1
tmp9 = tl.where(tmp7, tmp4, tmp8)
tmp10 = triton_helpers.maximum(tmp5, tmp9)
tmp12 = tmp11 > tmp1
tmp14 = tl.where(tmp12, tmp4, tmp13)
tmp15 = triton_helpers.maximum(tmp10, tmp14)
tmp17 = tmp16 > tmp1
tmp19 = tl.where(tmp17, tmp4, tmp18)
tmp20 = triton_helpers.maximum(tmp15, tmp19)
tmp21 = tmp5 - tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp9 - tmp20
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = tmp14 - tmp20
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tmp19 - tmp20
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp28 + tmp30
tl.store(out_ptr0 + x0, tmp20, xmask)
tl.store(out_ptr1 + x0, tmp31, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * (x1 // 4)), xmask)
tmp3 = tl.load(in_out_ptr0 + x2, xmask)
tmp6 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp4 = float('-inf')
tmp5 = tl.where(tmp2, tmp4, tmp3)
tmp7 = tmp5 - tmp6
tmp8 = tl_math.exp(tmp7)
tmp10 = tmp8 / tmp9
tl.store(in_out_ptr0 + x2, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf2)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0)
del buf0
buf3 = reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0)
del buf2
buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(64)](buf1, buf3,
primals_3, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf1, reinterpret_tensor(buf3, (4, 4, 4), (16, 1,
4), 0), out=buf4)
buf5 = empty_strided_cuda((16, 1), (1, 16), torch.float32)
buf6 = empty_strided_cuda((16, 1), (1, 16), torch.float32)
triton_poi_fused__softmax_1[grid(16)](primals_5, buf4, buf5, buf6,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf7 = reinterpret_tensor(buf4, (16, 4), (4, 1), 0)
del buf4
triton_poi_fused__softmax_2[grid(64)](buf7, primals_5, buf5, buf6,
64, XBLOCK=64, num_warps=1, num_stages=1)
del buf5
del buf6
del primals_5
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf7, (4, 4, 4), (16, 4, 1),
0), primals_4, out=buf8)
return buf8, primals_4, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), buf1, buf7, buf3, buf9
class SeqAttnMatchNew(nn.Module):
"""Given sequences X and Y, match sequence Y to each element in X.
* o_i = sum(alpha_j * y_j) for i in X
* alpha_j = softmax(y_j * x_i)
"""
def __init__(self, input_size, identity=False):
super(SeqAttnMatchNew, self).__init__()
if not identity:
self.linear = nn.Linear(input_size, input_size)
else:
self.linear = None
def forward(self, input_0, input_1, input_2):
primals_2 = self.linear.weight
primals_3 = self.linear.bias
primals_1 = input_0
primals_4 = input_1
primals_5 = input_2
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
ys7yoo/DrQAKor
|
SeqAttnMatch
| false
| 13,157
|
[
"BSD-3-Clause"
] | 0
|
ed9a69dd2a95f8ccb81bd5d6db0fbd59aae0be50
|
https://github.com/ys7yoo/DrQAKor/tree/ed9a69dd2a95f8ccb81bd5d6db0fbd59aae0be50
|
FCN_mse
|
import torch
import torch.nn as nn
class FCN_mse(nn.Module):
"""
Predict whether pixels are part of the object or the background.
"""
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=5, padding=2)
self.conv2 = nn.Conv2d(16, 32, kernel_size=5, padding=2)
self.classifier = nn.Conv2d(32, 1, kernel_size=1)
def forward(self, x):
c1 = torch.tanh(self.conv1(x))
c2 = torch.tanh(self.conv2(c1))
score = self.classifier(c2)
return score
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_tanh_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 16
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x3, tmp3, None)
@triton.jit
def triton_poi_fused_convolution_tanh_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x3, tmp3, None)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (16, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (32, 16, 5, 5), (400, 25, 5, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (1, 32, 1, 1), (32, 1, 1, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_tanh_0[grid(262144)](buf1, primals_2,
262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_tanh_1[grid(524288)](buf3, primals_5,
524288, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_2[grid(16384)](buf5, primals_7, 16384,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
return buf5, primals_1, primals_3, primals_4, primals_6, buf1, buf3
class FCN_mseNew(nn.Module):
"""
Predict whether pixels are part of the object or the background.
"""
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=5, padding=2)
self.conv2 = nn.Conv2d(16, 32, kernel_size=5, padding=2)
self.classifier = nn.Conv2d(32, 1, kernel_size=1)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.classifier.weight
primals_7 = self.classifier.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
yuishihara/chainer-causal-info-gan
|
FCN_mse
| false
| 13,158
|
[
"MIT"
] | 0
|
67ff8e66fb1f8762e6c7830be80730395d2eb22c
|
https://github.com/yuishihara/chainer-causal-info-gan/tree/67ff8e66fb1f8762e6c7830be80730395d2eb22c
|
Solver_GAP_OneFClayers
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Solver_GAP_OneFClayers(nn.Module):
""" GAP + fc1 """
def __init__(self, input_nc, input_width, input_height, dropout_prob=
0.0, reduction_rate=2, **kwargs):
super(Solver_GAP_OneFClayers, self).__init__()
self.dropout_prob = dropout_prob
self.reduction_rate = reduction_rate
self.fc1 = nn.Linear(input_nc, 10)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = x.mean(dim=-1).mean(dim=-1).squeeze()
x = F.dropout(x, training=self.training, p=self.dropout_prob)
x = self.fc1(x)
return F.log_softmax(x, dim=-1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_nc': 4, 'input_width': 4, 'input_height': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp9 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp30 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp32 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = tmp15 / tmp7
tmp17 = tmp8 + tmp16
tmp20 = tmp18 + tmp19
tmp22 = tmp20 + tmp21
tmp24 = tmp22 + tmp23
tmp25 = tmp24 / tmp7
tmp26 = tmp17 + tmp25
tmp29 = tmp27 + tmp28
tmp31 = tmp29 + tmp30
tmp33 = tmp31 + tmp32
tmp34 = tmp33 / tmp7
tmp35 = tmp26 + tmp34
tmp36 = tmp35 / tmp7
tl.store(out_ptr0 + x0, tmp36, xmask)
@triton.jit
def triton_per_fused__log_softmax_1(in_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (10, 4), (4, 1))
assert_size_stride(primals_3, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_0[grid(16)](primals_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_3, buf0, reinterpret_tensor(primals_2,
(4, 10), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_2
del primals_3
buf4 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
triton_per_fused__log_softmax_1[grid(4)](buf1, buf4, 4, 10, XBLOCK=
1, num_warps=2, num_stages=1)
del buf1
return buf4, buf0, buf4
class Solver_GAP_OneFClayersNew(nn.Module):
""" GAP + fc1 """
def __init__(self, input_nc, input_width, input_height, dropout_prob=
0.0, reduction_rate=2, **kwargs):
super(Solver_GAP_OneFClayersNew, self).__init__()
self.dropout_prob = dropout_prob
self.reduction_rate = reduction_rate
self.fc1 = nn.Linear(input_nc, 10)
self.sigmoid = nn.Sigmoid()
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
yulinfeng000/AdaptiveNeuralTrees
|
Solver_GAP_OneFClayers
| false
| 13,159
|
[
"MIT"
] | 0
|
bbcb381b9cb0c91ae1af33ce43b43f352055041c
|
https://github.com/yulinfeng000/AdaptiveNeuralTrees/tree/bbcb381b9cb0c91ae1af33ce43b43f352055041c
|
Conv_ReLU_Block
|
import torch
import torch.nn as nn
class Conv_ReLU_Block(nn.Module):
def __init__(self, channel_in):
super(Conv_ReLU_Block, self).__init__()
self.conv_0 = nn.Conv2d(in_channels=channel_in, out_channels=128,
kernel_size=1, stride=1, padding=0, bias=False)
self.conv_1 = nn.Conv2d(in_channels=channel_in, out_channels=32,
kernel_size=3, stride=1, padding=1, bias=False)
self.conv_2 = nn.Conv2d(in_channels=32, out_channels=32,
kernel_size=3, stride=1, padding=1, bias=False)
self.conv_3 = nn.Conv2d(in_channels=64, out_channels=32,
kernel_size=3, stride=1, padding=1, bias=False)
self.conv_4 = nn.Conv2d(in_channels=96, out_channels=32,
kernel_size=3, stride=1, padding=1, bias=False)
self.conv_5 = nn.Conv2d(in_channels=128, out_channels=64,
kernel_size=1, stride=1, padding=0, bias=False)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
residual = x
out_0 = self.relu(self.conv_0(residual))
out_1 = self.relu(self.conv_1(x))
out_2 = self.relu(self.conv_2(out_1))
cat_1 = torch.cat((out_1, out_2), 1)
out_3 = self.relu(self.conv_3(cat_1))
cat_2 = torch.cat((cat_1, out_3), 1)
out_4 = self.relu(self.conv_4(cat_2))
cat_3 = torch.cat((cat_2, out_4), 1)
out = torch.add(out_0, cat_3)
out = self.relu(self.conv_5(out))
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channel_in': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, None)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 16 % 64
x0 = xindex % 16
x2 = xindex // 1024
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 512 * x2), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 64, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-32 + x1) + 512 * x2), tmp6, other=0.0
)
tmp10 = tl.full([1], 0, tl.int32)
tmp11 = triton_helpers.maximum(tmp10, tmp9)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp6, tmp11, tmp12)
tmp14 = tl.where(tmp4, tmp5, tmp13)
tl.store(out_ptr0 + x3, tmp14, None)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 16 % 96
x0 = xindex % 16
x2 = xindex // 1536
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 1024 * x2), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 96, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-64 + x1) + 512 * x2), tmp6, other=0.0
)
tmp10 = tl.full([1], 0, tl.int32)
tmp11 = triton_helpers.maximum(tmp10, tmp9)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp6, tmp11, tmp12)
tmp14 = tl.where(tmp4, tmp5, tmp13)
tl.store(out_ptr0 + x3, tmp14, None)
@triton.jit
def triton_poi_fused_add_cat_relu_threshold_backward_3(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 128
x0 = xindex % 16
x2 = xindex // 2048
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = x1
tl.full([1], 0, tl.int64)
tmp6 = tl.full([1], 96, tl.int64)
tmp7 = tmp3 < tmp6
tmp8 = tl.load(in_ptr1 + (x0 + 16 * x1 + 1536 * x2), tmp7, other=0.0)
tmp9 = tmp3 >= tmp6
tl.full([1], 128, tl.int64)
tmp12 = tl.load(in_ptr2 + (x0 + 16 * (-96 + x1) + 512 * x2), tmp9,
other=0.0)
tmp13 = triton_helpers.maximum(tmp1, tmp12)
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp9, tmp13, tmp14)
tmp16 = tl.where(tmp7, tmp8, tmp15)
tmp17 = tmp2 + tmp16
tmp18 = 0.0
tmp19 = tmp2 <= tmp18
tl.store(out_ptr0 + x3, tmp17, None)
tl.store(out_ptr1 + x3, tmp19, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_4(in_out_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + x0, tmp2, None)
tl.store(out_ptr0 + x0, tmp4, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_5(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(out_ptr0 + x0, tmp4, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (128, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (32, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_6, (32, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_7, (64, 128, 1, 1), (128, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 128, 4, 4), (2048, 16, 4, 1))
buf1 = extern_kernels.convolution(primals_1, primals_3, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 32, 4, 4), (512, 16, 4, 1))
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_relu_0[grid(2048)](buf2, 2048, XBLOCK=256,
num_warps=4, num_stages=1)
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 32, 4, 4), (512, 16, 4, 1))
buf4 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_1[grid(4096)](buf2, buf3, buf4, 4096, XBLOCK=
256, num_warps=4, num_stages=1)
buf5 = extern_kernels.convolution(buf4, primals_5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 32, 4, 4), (512, 16, 4, 1))
buf6 = empty_strided_cuda((4, 96, 4, 4), (1536, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_2[grid(6144)](buf4, buf5, buf6, 6144, XBLOCK=
128, num_warps=4, num_stages=1)
buf7 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 32, 4, 4), (512, 16, 4, 1))
buf8 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.
float32)
buf15 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.bool
)
triton_poi_fused_add_cat_relu_threshold_backward_3[grid(8192)](buf0,
buf6, buf7, buf8, buf15, 8192, XBLOCK=128, num_warps=4,
num_stages=1)
del buf0
buf9 = extern_kernels.convolution(buf8, primals_7, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 64, 4, 4), (1024, 16, 4, 1))
buf10 = buf9
del buf9
buf11 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_4[grid(4096)](buf10, buf11,
4096, XBLOCK=256, num_warps=4, num_stages=1)
buf12 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_5[grid(2048)](buf7, buf12,
2048, XBLOCK=128, num_warps=4, num_stages=1)
del buf7
buf13 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_5[grid(2048)](buf5, buf13,
2048, XBLOCK=128, num_warps=4, num_stages=1)
del buf5
buf14 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_5[grid(2048)](buf3, buf14,
2048, XBLOCK=128, num_warps=4, num_stages=1)
del buf3
return (buf10, primals_1, primals_2, primals_3, primals_4, primals_5,
primals_6, primals_7, buf2, buf4, buf6, buf8, buf11, buf12, buf13,
buf14, buf15)
class Conv_ReLU_BlockNew(nn.Module):
def __init__(self, channel_in):
super(Conv_ReLU_BlockNew, self).__init__()
self.conv_0 = nn.Conv2d(in_channels=channel_in, out_channels=128,
kernel_size=1, stride=1, padding=0, bias=False)
self.conv_1 = nn.Conv2d(in_channels=channel_in, out_channels=32,
kernel_size=3, stride=1, padding=1, bias=False)
self.conv_2 = nn.Conv2d(in_channels=32, out_channels=32,
kernel_size=3, stride=1, padding=1, bias=False)
self.conv_3 = nn.Conv2d(in_channels=64, out_channels=32,
kernel_size=3, stride=1, padding=1, bias=False)
self.conv_4 = nn.Conv2d(in_channels=96, out_channels=32,
kernel_size=3, stride=1, padding=1, bias=False)
self.conv_5 = nn.Conv2d(in_channels=128, out_channels=64,
kernel_size=1, stride=1, padding=0, bias=False)
self.relu = nn.ReLU(inplace=True)
def forward(self, input_0):
primals_2 = self.conv_0.weight
primals_3 = self.conv_1.weight
primals_4 = self.conv_2.weight
primals_5 = self.conv_3.weight
primals_6 = self.conv_4.weight
primals_7 = self.conv_5.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
ypf780732/multi-staged-fusion-sr
|
Conv_ReLU_Block
| false
| 13,160
|
[
"MIT"
] | 0
|
83d82c4310cc9314544793dc0b299a34956044e0
|
https://github.com/ypf780732/multi-staged-fusion-sr/tree/83d82c4310cc9314544793dc0b299a34956044e0
|
RouterGAPwithDoubleConv
|
import torch
import warnings
import torch.nn as nn
class RouterGAPwithDoubleConv(nn.Module):
""" 2 x (Convolution + Relu) + Global Average Pooling + FC + Sigmoid """
def __init__(self, input_nc, input_width, input_height, ngf=32,
kernel_size=3, soft_decision=True, stochastic=False, **kwargs):
super(RouterGAPwithDoubleConv, self).__init__()
self.ngf = ngf
self.soft_decision = soft_decision
self.stochastic = stochastic
if max(input_width, input_height) < kernel_size:
warnings.warn('Router kernel too large, shrink it')
kernel_size = max(input_width, input_height)
if max(input_width, input_height) % 2 == 0:
kernel_size += 1
padding = (kernel_size - 1) // 2
self.conv1 = nn.Conv2d(input_nc, ngf, kernel_size=kernel_size,
padding=padding)
self.conv2 = nn.Conv2d(ngf, ngf, kernel_size=kernel_size, padding=
padding)
self.relu = nn.ReLU(inplace=True)
self.linear1 = nn.Linear(ngf, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
out = self.relu(self.conv1(x))
out = self.relu(self.conv2(out))
out = out.mean(dim=-1).mean(dim=-1).squeeze()
out = self.linear1(out).squeeze()
out = self.output_controller(out)
return out
def output_controller(self, x):
if self.soft_decision:
return self.sigmoid(x)
if self.stochastic:
x = self.sigmoid(x)
return ops.ST_StochasticIndicator()(x)
else:
x = self.sigmoid(x)
return ops.ST_Indicator()(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_nc': 4, 'input_width': 4, 'input_height': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import warnings
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_mean_relu_1(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_ptr0 + 16 * x2, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 16 * x2), xmask, eviction_policy='evict_last'
)
tmp9 = tl.load(in_ptr0 + (2 + 16 * x2), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr0 + (3 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr0 + (4 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp22 = tl.load(in_ptr0 + (5 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp26 = tl.load(in_ptr0 + (6 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp30 = tl.load(in_ptr0 + (7 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp36 = tl.load(in_ptr0 + (8 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp39 = tl.load(in_ptr0 + (9 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp43 = tl.load(in_ptr0 + (10 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp47 = tl.load(in_ptr0 + (11 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp53 = tl.load(in_ptr0 + (12 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp56 = tl.load(in_ptr0 + (13 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp60 = tl.load(in_ptr0 + (14 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp64 = tl.load(in_ptr0 + (15 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp5 + tmp1
tmp7 = triton_helpers.maximum(tmp3, tmp6)
tmp8 = tmp4 + tmp7
tmp10 = tmp9 + tmp1
tmp11 = triton_helpers.maximum(tmp3, tmp10)
tmp12 = tmp8 + tmp11
tmp14 = tmp13 + tmp1
tmp15 = triton_helpers.maximum(tmp3, tmp14)
tmp16 = tmp12 + tmp15
tmp17 = 4.0
tmp18 = tmp16 / tmp17
tmp20 = tmp19 + tmp1
tmp21 = triton_helpers.maximum(tmp3, tmp20)
tmp23 = tmp22 + tmp1
tmp24 = triton_helpers.maximum(tmp3, tmp23)
tmp25 = tmp21 + tmp24
tmp27 = tmp26 + tmp1
tmp28 = triton_helpers.maximum(tmp3, tmp27)
tmp29 = tmp25 + tmp28
tmp31 = tmp30 + tmp1
tmp32 = triton_helpers.maximum(tmp3, tmp31)
tmp33 = tmp29 + tmp32
tmp34 = tmp33 / tmp17
tmp35 = tmp18 + tmp34
tmp37 = tmp36 + tmp1
tmp38 = triton_helpers.maximum(tmp3, tmp37)
tmp40 = tmp39 + tmp1
tmp41 = triton_helpers.maximum(tmp3, tmp40)
tmp42 = tmp38 + tmp41
tmp44 = tmp43 + tmp1
tmp45 = triton_helpers.maximum(tmp3, tmp44)
tmp46 = tmp42 + tmp45
tmp48 = tmp47 + tmp1
tmp49 = triton_helpers.maximum(tmp3, tmp48)
tmp50 = tmp46 + tmp49
tmp51 = tmp50 / tmp17
tmp52 = tmp35 + tmp51
tmp54 = tmp53 + tmp1
tmp55 = triton_helpers.maximum(tmp3, tmp54)
tmp57 = tmp56 + tmp1
tmp58 = triton_helpers.maximum(tmp3, tmp57)
tmp59 = tmp55 + tmp58
tmp61 = tmp60 + tmp1
tmp62 = triton_helpers.maximum(tmp3, tmp61)
tmp63 = tmp59 + tmp62
tmp65 = tmp64 + tmp1
tmp66 = triton_helpers.maximum(tmp3, tmp65)
tmp67 = tmp63 + tmp66
tmp68 = tmp67 / tmp17
tmp69 = tmp52 + tmp68
tmp70 = tmp69 / tmp17
tl.store(out_ptr0 + x2, tmp70, xmask)
@triton.jit
def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_3(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 32
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (32, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (1, 32), (32, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 4, 4), (512, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(2048)](buf1, primals_2,
2048, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 4, 4), (512, 16, 4, 1))
buf3 = empty_strided_cuda((4, 32), (32, 1), torch.float32)
triton_poi_fused_convolution_mean_relu_1[grid(128)](buf2, primals_5,
buf3, 128, XBLOCK=128, num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf3, reinterpret_tensor(primals_6, (32, 1), (1,
32), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4,), (1,), 0)
del buf4
triton_poi_fused_sigmoid_2[grid(4)](buf5, primals_7, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_3[grid(2048)](buf2
, primals_5, buf6, 2048, XBLOCK=128, num_warps=4, num_stages=1)
del buf2
del primals_5
return (buf5, primals_1, primals_3, primals_4, buf1, buf3, buf5,
primals_6, buf6)
class RouterGAPwithDoubleConvNew(nn.Module):
""" 2 x (Convolution + Relu) + Global Average Pooling + FC + Sigmoid """
def __init__(self, input_nc, input_width, input_height, ngf=32,
kernel_size=3, soft_decision=True, stochastic=False, **kwargs):
super(RouterGAPwithDoubleConvNew, self).__init__()
self.ngf = ngf
self.soft_decision = soft_decision
self.stochastic = stochastic
if max(input_width, input_height) < kernel_size:
warnings.warn('Router kernel too large, shrink it')
kernel_size = max(input_width, input_height)
if max(input_width, input_height) % 2 == 0:
kernel_size += 1
padding = (kernel_size - 1) // 2
self.conv1 = nn.Conv2d(input_nc, ngf, kernel_size=kernel_size,
padding=padding)
self.conv2 = nn.Conv2d(ngf, ngf, kernel_size=kernel_size, padding=
padding)
self.relu = nn.ReLU(inplace=True)
self.linear1 = nn.Linear(ngf, 1)
self.sigmoid = nn.Sigmoid()
def output_controller(self, x):
if self.soft_decision:
return self.sigmoid(x)
if self.stochastic:
x = self.sigmoid(x)
return ops.ST_StochasticIndicator()(x)
else:
x = self.sigmoid(x)
return ops.ST_Indicator()(x)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.linear1.weight
primals_7 = self.linear1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
yulinfeng000/AdaptiveNeuralTrees
|
RouterGAPwithDoubleConv
| false
| 13,161
|
[
"MIT"
] | 0
|
bbcb381b9cb0c91ae1af33ce43b43f352055041c
|
https://github.com/yulinfeng000/AdaptiveNeuralTrees/tree/bbcb381b9cb0c91ae1af33ce43b43f352055041c
|
Attention
|
import torch
class Attention(torch.nn.Module):
def __init__(self):
super(Attention, self).__init__()
def forward(self, hl, hr):
hl = hl / hl.norm(dim=-1, keepdim=True)
hr = hr / hr.norm(dim=-1, keepdim=True)
a = (hl[:, None, :] * hr[None, :, :]).sum(dim=-1)
mu_lr = hr - a.softmax(dim=1).transpose(1, 0) @ hl
mu_rl = hl - a.softmax(dim=0) @ hr
return mu_lr, mu_rl
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tmp0 / tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex // 64
x3 = xindex % 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (4 * x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x3, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0 + 64 * x2), xmask, eviction_policy
='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x3), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0 + 64 * x2), xmask, eviction_policy
='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x3), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0 + 64 * x2), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x4, tmp14, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 16
x2 = xindex // 64
x3 = xindex % 64
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (64 + x3), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (128 + x3), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (192 + x3), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tmp12 = triton_helpers.maximum(tmp10, tmp11)
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp16 = triton_helpers.maximum(tmp14, tmp15)
tmp17 = tmp0 - tmp16
tmp18 = tl_math.exp(tmp17)
tl.store(out_ptr0 + x4, tmp9, xmask)
tl.store(out_ptr1 + x4, tmp18, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16 % 4
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused_sub_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_out_ptr0 + x0, xmask)
tmp2 = tmp0 - tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_sub_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 - tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_linalg_vector_norm_0[grid(256)](arg0_1, buf0,
256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_div_linalg_vector_norm_0[grid(256)](arg1_1, buf1,
256, XBLOCK=256, num_warps=4, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sum_1[grid(256)](buf0, buf1, buf2, 256, XBLOCK
=256, num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(256)](buf2, buf3, buf7, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf4 = buf2
del buf2
triton_poi_fused_clone_3[grid(256)](buf3, buf4, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf5 = reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0)
del buf3
extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), out=buf5)
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused_sub_4[grid(256)](buf6, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf8 = buf4
del buf4
triton_poi_fused__softmax_5[grid(256)](buf7, buf8, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf9 = reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0)
del buf7
extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0), out=buf9)
del buf1
del buf8
buf10 = buf0
del buf0
triton_poi_fused_sub_6[grid(256)](buf10, buf9, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf9
return buf6, buf10
class AttentionNew(torch.nn.Module):
def __init__(self):
super(AttentionNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0], output[1]
|
yuanqing-wang/graca
|
Attention
| false
| 13,162
|
[
"MIT"
] | 0
|
6934e3cfe219a7f866b1f9e4ebcc107d76b47585
|
https://github.com/yuanqing-wang/graca/tree/6934e3cfe219a7f866b1f9e4ebcc107d76b47585
|
MLP_AlexNet
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class MLP_AlexNet(nn.Module):
""" The last fully connected part of LeNet MNIST:
https://github.com/BVLC/caffe/blob/master/examples/mnist/lenet.prototxt
"""
def __init__(self, input_nc, input_width, input_height, dropout_prob=
0.0, **kwargs):
super(MLP_AlexNet, self).__init__()
self.dropout_prob = dropout_prob
ngf = input_nc * input_width * input_height
self.fc1 = nn.Linear(ngf, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training, p=self.dropout_prob)
x = self.fc2(x)
return F.log_softmax(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_nc': 4, 'input_width': 4, 'input_height': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_per_fused__log_softmax_1(in_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (128, 64), (64, 1))
assert_size_stride(primals_3, (128,), (1,))
assert_size_stride(primals_4, (10, 128), (128, 1))
assert_size_stride(primals_5, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 64), (64, 1), 0
), reinterpret_tensor(primals_2, (64, 128), (1, 64), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(512)](buf1, primals_3, 512, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4,
(128, 10), (1, 128), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf5 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
triton_per_fused__log_softmax_1[grid(4)](buf2, buf5, 4, 10, XBLOCK=
1, num_warps=2, num_stages=1)
del buf2
return buf5, reinterpret_tensor(primals_1, (4, 64), (64, 1), 0
), buf1, buf5, primals_4
class MLP_AlexNetNew(nn.Module):
""" The last fully connected part of LeNet MNIST:
https://github.com/BVLC/caffe/blob/master/examples/mnist/lenet.prototxt
"""
def __init__(self, input_nc, input_width, input_height, dropout_prob=
0.0, **kwargs):
super(MLP_AlexNetNew, self).__init__()
self.dropout_prob = dropout_prob
ngf = input_nc * input_width * input_height
self.fc1 = nn.Linear(ngf, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
yulinfeng000/AdaptiveNeuralTrees
|
MLP_AlexNet
| false
| 13,163
|
[
"MIT"
] | 0
|
bbcb381b9cb0c91ae1af33ce43b43f352055041c
|
https://github.com/yulinfeng000/AdaptiveNeuralTrees/tree/bbcb381b9cb0c91ae1af33ce43b43f352055041c
|
MLP_LeNetMNIST
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class MLP_LeNetMNIST(nn.Module):
""" The last fully connected part of LeNet MNIST:
https://github.com/BVLC/caffe/blob/master/examples/mnist/lenet.prototxt
"""
def __init__(self, input_nc, input_width, input_height, dropout_prob=
0.0, **kwargs):
super(MLP_LeNetMNIST, self).__init__()
self.dropout_prob = dropout_prob
ngf = input_nc * input_width * input_height
self.fc1 = nn.Linear(ngf, int(round(ngf / 1.6)))
self.fc2 = nn.Linear(int(round(ngf / 1.6)), 10)
def forward(self, x):
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training, p=self.dropout_prob)
x = self.fc2(x)
return F.log_softmax(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_nc': 4, 'input_width': 4, 'input_height': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 160
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 40
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_per_fused__log_softmax_1(in_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (40, 64), (64, 1))
assert_size_stride(primals_3, (40,), (1,))
assert_size_stride(primals_4, (10, 40), (40, 1))
assert_size_stride(primals_5, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 40), (40, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 64), (64, 1), 0
), reinterpret_tensor(primals_2, (64, 40), (1, 64), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(160)](buf1, primals_3, 160, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4,
(40, 10), (1, 40), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf5 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
triton_per_fused__log_softmax_1[grid(4)](buf2, buf5, 4, 10, XBLOCK=
1, num_warps=2, num_stages=1)
del buf2
return buf5, reinterpret_tensor(primals_1, (4, 64), (64, 1), 0
), buf1, buf5, primals_4
class MLP_LeNetMNISTNew(nn.Module):
""" The last fully connected part of LeNet MNIST:
https://github.com/BVLC/caffe/blob/master/examples/mnist/lenet.prototxt
"""
def __init__(self, input_nc, input_width, input_height, dropout_prob=
0.0, **kwargs):
super(MLP_LeNetMNISTNew, self).__init__()
self.dropout_prob = dropout_prob
ngf = input_nc * input_width * input_height
self.fc1 = nn.Linear(ngf, int(round(ngf / 1.6)))
self.fc2 = nn.Linear(int(round(ngf / 1.6)), 10)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
yulinfeng000/AdaptiveNeuralTrees
|
MLP_LeNetMNIST
| false
| 13,164
|
[
"MIT"
] | 0
|
bbcb381b9cb0c91ae1af33ce43b43f352055041c
|
https://github.com/yulinfeng000/AdaptiveNeuralTrees/tree/bbcb381b9cb0c91ae1af33ce43b43f352055041c
|
LRN
|
import torch
import torch.nn as nn
class LRN(nn.Module):
def __init__(self, local_size=1, alpha=1.0, beta=0.75, ACROSS_CHANNELS=
False):
super(LRN, self).__init__()
self.ACROSS_CHANNELS = ACROSS_CHANNELS
if self.ACROSS_CHANNELS:
self.average = nn.AvgPool3d(kernel_size=(local_size, 1, 1),
stride=1, padding=(int((local_size - 1.0) / 2), 0, 0))
else:
self.average = nn.AvgPool2d(kernel_size=local_size, stride=1,
padding=int((local_size - 1.0) / 2))
self.alpha = alpha
self.beta = beta
def forward(self, x):
if self.ACROSS_CHANNELS:
div = x.pow(2).unsqueeze(1)
div = self.average(div).squeeze(1)
div = div.mul(self.alpha).add(1.0).pow(self.beta)
else:
div = x.pow(2)
div = self.average(div)
div = div.mul(self.alpha).add(1.0).pow(self.beta)
x = x.div(div)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_avg_pool2d_div_mul_pow_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0 * tmp0
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 + tmp2
tmp6 = 0.75
tmp7 = libdevice.pow(tmp5, tmp6)
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_avg_pool2d_div_mul_pow_0[grid(256)](arg0_1,
buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class LRNNew(nn.Module):
def __init__(self, local_size=1, alpha=1.0, beta=0.75, ACROSS_CHANNELS=
False):
super(LRNNew, self).__init__()
self.ACROSS_CHANNELS = ACROSS_CHANNELS
if self.ACROSS_CHANNELS:
self.average = nn.AvgPool3d(kernel_size=(local_size, 1, 1),
stride=1, padding=(int((local_size - 1.0) / 2), 0, 0))
else:
self.average = nn.AvgPool2d(kernel_size=local_size, stride=1,
padding=int((local_size - 1.0) / 2))
self.alpha = alpha
self.beta = beta
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
zenghui9977/AFL
|
LRN
| false
| 13,165
|
[
"MIT"
] | 0
|
769d78be94ce8f80d376aceb2de9dc5a9d20a807
|
https://github.com/zenghui9977/AFL/tree/769d78be94ce8f80d376aceb2de9dc5a9d20a807
|
SimpleMLPGen_with_meta_feature
|
import torch
import torch.optim
import torch.jit
import torch.nn as nn
class SimpleMLPGen_with_meta_feature(nn.Module):
def __init__(self, num_in_features, num_out_features, neurons_per_layer):
super(SimpleMLPGen_with_meta_feature, self).__init__()
self.l_in = nn.Linear(in_features=num_in_features, out_features=
neurons_per_layer)
self.l_out = nn.Linear(in_features=neurons_per_layer, out_features=
num_out_features)
self.act = nn.ELU()
def forward(self, x):
x = self.act(self.l_in(x))
x = self.l_out(x)
return x
def set_parameters(self, meta_in_features, simple_mlp_gen_obj):
x = simple_mlp_gen_obj.act(simple_mlp_gen_obj.l_in(meta_in_features))
x = simple_mlp_gen_obj.l_out(x)
_base = (simple_mlp_gen_obj.num_in_features * simple_mlp_gen_obj.
neurons_per_layer)
l_in_weight = x[:_base].reshape((simple_mlp_gen_obj.num_in_features,
simple_mlp_gen_obj.neurons_per_layer)).t()
l_in_bias = x[_base:_base + simple_mlp_gen_obj.neurons_per_layer]
_base += simple_mlp_gen_obj.neurons_per_layer
_base_add = (simple_mlp_gen_obj.neurons_per_layer *
simple_mlp_gen_obj.num_out_features)
l_out_weight = x[_base:_base + _base_add].reshape((
simple_mlp_gen_obj.neurons_per_layer, simple_mlp_gen_obj.
num_out_features)).t()
_base += _base_add
l_out_bias = x[_base:]
self.l_in.weight = torch.nn.Parameter(l_in_weight)
self.l_out.weight = torch.nn.Parameter(l_out_weight)
self.l_in.bias = torch.nn.Parameter(l_in_bias)
self.l_out.bias = torch.nn.Parameter(l_out_bias)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_in_features': 4, 'num_out_features': 4,
'neurons_per_layer': 1}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.optim
import torch.jit
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (1, 4), (4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 1), (1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_1
del primals_2
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_elu_0[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 1), (
1, 0), 0), reinterpret_tensor(primals_4, (1, 4), (1, 1), 0),
alpha=1, beta=1, out=buf3)
del primals_5
return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, reinterpret_tensor(buf2, (64, 1), (1, 1), 0), primals_4
class SimpleMLPGen_with_meta_featureNew(nn.Module):
def __init__(self, num_in_features, num_out_features, neurons_per_layer):
super(SimpleMLPGen_with_meta_featureNew, self).__init__()
self.l_in = nn.Linear(in_features=num_in_features, out_features=
neurons_per_layer)
self.l_out = nn.Linear(in_features=neurons_per_layer, out_features=
num_out_features)
self.act = nn.ELU()
def set_parameters(self, meta_in_features, simple_mlp_gen_obj):
x = simple_mlp_gen_obj.act(simple_mlp_gen_obj.l_in(meta_in_features))
x = simple_mlp_gen_obj.l_out(x)
_base = (simple_mlp_gen_obj.num_in_features * simple_mlp_gen_obj.
neurons_per_layer)
l_in_weight = x[:_base].reshape((simple_mlp_gen_obj.num_in_features,
simple_mlp_gen_obj.neurons_per_layer)).t()
l_in_bias = x[_base:_base + simple_mlp_gen_obj.neurons_per_layer]
_base += simple_mlp_gen_obj.neurons_per_layer
_base_add = (simple_mlp_gen_obj.neurons_per_layer *
simple_mlp_gen_obj.num_out_features)
l_out_weight = x[_base:_base + _base_add].reshape((
simple_mlp_gen_obj.neurons_per_layer, simple_mlp_gen_obj.
num_out_features)).t()
_base += _base_add
l_out_bias = x[_base:]
self.l_in.weight = torch.nn.Parameter(l_in_weight)
self.l_out.weight = torch.nn.Parameter(l_out_weight)
self.l_in.bias = torch.nn.Parameter(l_in_bias)
self.l_out.bias = torch.nn.Parameter(l_out_bias)
def forward(self, input_0):
primals_1 = self.l_in.weight
primals_2 = self.l_in.bias
primals_4 = self.l_out.weight
primals_5 = self.l_out.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
zhaofeng-shu33/deep_euler_tests
|
SimpleMLPGen_with_meta_feature
| false
| 13,166
|
[
"MIT"
] | 0
|
a3d0961af679d490b0c58873ee0726234122bc7a
|
https://github.com/zhaofeng-shu33/deep_euler_tests/tree/a3d0961af679d490b0c58873ee0726234122bc7a
|
BertLMHead
|
from _paritybench_helpers import _mock_config
from torch.nn import Module
import torch
from torch.nn import LayerNorm
from torch.nn import Linear
from torch.nn.functional import gelu
class BertLMHead(Module):
def __init__(self, config):
super(BertLMHead, self).__init__()
hidden_size = config['hidden_size']
self.mlp = Linear(hidden_size, hidden_size, bias=True)
self.unembedding = Linear(hidden_size, config['vocab_size'], bias=True)
self.layer_norm = LayerNorm((hidden_size,))
def forward(self, activations):
return self.unembedding(self.layer_norm(gelu(self.mlp(activations))))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, vocab_size=4)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch.nn import Module
from torch.nn import LayerNorm
from torch.nn import Linear
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_gelu_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp23 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tmp10 = tmp9 * tmp1
tmp11 = tmp9 * tmp3
tmp12 = libdevice.erf(tmp11)
tmp13 = tmp12 + tmp6
tmp14 = tmp10 * tmp13
tmp15 = tmp8 + tmp14
tmp17 = tmp16 * tmp1
tmp18 = tmp16 * tmp3
tmp19 = libdevice.erf(tmp18)
tmp20 = tmp19 + tmp6
tmp21 = tmp17 * tmp20
tmp22 = tmp15 + tmp21
tmp24 = tmp23 * tmp1
tmp25 = tmp23 * tmp3
tmp26 = libdevice.erf(tmp25)
tmp27 = tmp26 + tmp6
tmp28 = tmp24 * tmp27
tmp29 = tmp22 + tmp28
tmp30 = 4.0
tmp31 = tmp29 / tmp30
tmp32 = tmp8 - tmp31
tmp33 = tmp32 * tmp32
tmp34 = tmp14 - tmp31
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp21 - tmp31
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp28 - tmp31
tmp41 = tmp40 * tmp40
tmp42 = tmp39 + tmp41
tmp43 = tmp42 / tmp30
tl.store(out_ptr0 + x0, tmp31, xmask)
tl.store(out_ptr1 + x0, tmp43, xmask)
@triton.jit
def triton_poi_fused_gelu_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp9 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tmp10 = tmp8 - tmp9
tmp12 = 1e-05
tmp13 = tmp11 + tmp12
tmp14 = libdevice.rsqrt(tmp13)
tmp15 = tmp10 * tmp14
tmp17 = tmp15 * tmp16
tmp19 = tmp17 + tmp18
tl.store(out_ptr0 + x2, tmp19, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_gelu_native_layer_norm_0[grid(64)](buf0, buf1,
buf2, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_gelu_native_layer_norm_1[grid(256)](buf0, buf1,
buf2, primals_4, primals_5, buf3, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf1
del buf2
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_7
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), primals_6
class BertLMHeadNew(Module):
def __init__(self, config):
super(BertLMHeadNew, self).__init__()
hidden_size = config['hidden_size']
self.mlp = Linear(hidden_size, hidden_size, bias=True)
self.unembedding = Linear(hidden_size, config['vocab_size'], bias=True)
self.layer_norm = LayerNorm((hidden_size,))
def forward(self, input_0):
primals_1 = self.mlp.weight
primals_2 = self.mlp.bias
primals_6 = self.unembedding.weight
primals_4 = self.unembedding.bias
primals_5 = self.layer_norm.weight
primals_7 = self.layer_norm.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
yulonglin/bert
|
BertLMHead
| false
| 13,167
|
[
"MIT"
] | 0
|
7f992e88f109e4267b0e84f8398cab0561a67f4f
|
https://github.com/yulonglin/bert/tree/7f992e88f109e4267b0e84f8398cab0561a67f4f
|
NormedResidualLayer
|
from torch.nn import Module
import torch
from torch.nn import Dropout
from torch.nn import LayerNorm
from torch.nn import Linear
from torch.nn.functional import gelu
class NormedResidualLayer(Module):
def __init__(self, size, intermediate_size, dropout):
super(NormedResidualLayer, self).__init__()
self.mlp1 = Linear(size, intermediate_size, bias=True)
self.mlp2 = Linear(intermediate_size, size, bias=True)
self.layer_norm = LayerNorm((size,))
self.dropout = Dropout(dropout)
def forward(self, input):
intermediate = gelu(self.mlp1(input))
output = self.dropout(self.mlp2(intermediate)) + input
output = self.layer_norm(output)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'size': 4, 'intermediate_size': 4, 'dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch.nn import Module
from torch.nn import Dropout
from torch.nn import LayerNorm
from torch.nn import Linear
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_gelu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_gelu_0[grid(256)](buf0, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused_add_native_layer_norm_1[grid(64)](buf2, primals_3,
buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_2[grid(256)](buf2, primals_3,
buf3, buf4, primals_6, primals_7, buf5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf3
del buf4
del primals_7
return buf5, primals_3, primals_6, buf0, reinterpret_tensor(buf1, (64,
4), (4, 1), 0), buf2, primals_4
class NormedResidualLayerNew(Module):
def __init__(self, size, intermediate_size, dropout):
super(NormedResidualLayerNew, self).__init__()
self.mlp1 = Linear(size, intermediate_size, bias=True)
self.mlp2 = Linear(intermediate_size, size, bias=True)
self.layer_norm = LayerNorm((size,))
self.dropout = Dropout(dropout)
def forward(self, input_0):
primals_1 = self.mlp1.weight
primals_2 = self.mlp1.bias
primals_4 = self.mlp2.weight
primals_5 = self.mlp2.bias
primals_6 = self.layer_norm.weight
primals_7 = self.layer_norm.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
yulonglin/bert
|
NormedResidualLayer
| false
| 13,168
|
[
"MIT"
] | 0
|
7f992e88f109e4267b0e84f8398cab0561a67f4f
|
https://github.com/yulonglin/bert/tree/7f992e88f109e4267b0e84f8398cab0561a67f4f
|
GLU
|
import torch
from torch import nn
class GLU(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
out, gate = x.chunk(2, dim=self.dim)
return out * gate.sigmoid()
def get_inputs():
return [torch.rand([4, 4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (2 + x0 + 4 * x1), xmask)
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 2), (128, 32, 8, 2, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0[grid(512)](arg0_1, buf0, 512, XBLOCK
=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class GLUNew(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
zhengx18/conformer
|
GLU
| false
| 13,169
|
[
"MIT"
] | 0
|
a258c0b0cc70034f53d2b2040badf5d58aab95bc
|
https://github.com/zhengx18/conformer/tree/a258c0b0cc70034f53d2b2040badf5d58aab95bc
|
TripletLogExpLoss
|
import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
class TripletLogExpLoss(nn.Module):
"""Creates a criterion that measures the triplet loss given an input
tensors x1, x2, x3.
This is used for measuring a relative similarity between samples. A triplet
is composed by `a`, `p` and `n`: anchor, positive examples and negative
example respectively. The shape of all input variables should be
:math:`(N, D)`.
The distance is described in detail in the paper `Improving Pairwise Ranking for Multi-Label
Image Classification`_ by Y. Li et al.
.. math::
L(a, p, n) = log \\left( 1 + exp(d(a_i, p_i) - d(a_i, n_i) \\right)
Args:
anchor: anchor input tensor
positive: positive input tensor
negative: negative input tensor
Shape:
- Input: :math:`(N, D)` where `D = vector dimension`
- Output: :math:`(N, 1)`
>>> triplet_loss = nn.TripletLogExpLoss(p=2)
>>> input1 = autograd.Variable(torch.randn(100, 128))
>>> input2 = autograd.Variable(torch.randn(100, 128))
>>> input3 = autograd.Variable(torch.randn(100, 128))
>>> output = triplet_loss(input1, input2, input3)
>>> output.backward()
.. _Learning shallow convolutional feature descriptors with triplet losses:
http://www.iis.ee.ic.ac.uk/%7Evbalnt/shallow_descr/TFeat_paper.pdf
"""
def __init__(self, p=2, eps=1e-06, swap=False):
super(TripletLogExpLoss, self).__init__()
self.p = p
self.eps = eps
self.swap = swap
def forward(self, anchor, positive, negative):
assert anchor.size() == positive.size(
), 'Input sizes between positive and negative must be equal.'
assert anchor.size() == negative.size(
), 'Input sizes between anchor and negative must be equal.'
assert positive.size() == negative.size(
), 'Input sizes between positive and negative must be equal.'
assert anchor.dim() == 2, 'Input must be a 2D matrix.'
d_p = F.pairwise_distance(anchor, positive, self.p, self.eps)
d_n = F.pairwise_distance(anchor, negative, self.p, self.eps)
if self.swap:
d_s = F.pairwise_distance(positive, negative, self.p, self.eps)
d_n = torch.min(d_n, d_s)
dist = torch.log(1 + torch.exp(d_p - d_n))
loss = torch.mean(dist)
return loss
def eval_func(self, dp, dn):
return np.log(1 + np.exp(dp - dn))
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_exp_log_mean_norm_sub_0(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr2 + 4 * r0, None, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr2 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr2 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp39 = tl.load(in_ptr2 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = 1e-06
tmp4 = tmp2 + tmp3
tmp5 = tmp4 * tmp4
tmp8 = tmp6 - tmp7
tmp9 = tmp8 + tmp3
tmp10 = tmp9 * tmp9
tmp11 = tmp5 + tmp10
tmp14 = tmp12 - tmp13
tmp15 = tmp14 + tmp3
tmp16 = tmp15 * tmp15
tmp17 = tmp11 + tmp16
tmp20 = tmp18 - tmp19
tmp21 = tmp20 + tmp3
tmp22 = tmp21 * tmp21
tmp23 = tmp17 + tmp22
tmp24 = libdevice.sqrt(tmp23)
tmp26 = tmp0 - tmp25
tmp27 = tmp26 + tmp3
tmp28 = tmp27 * tmp27
tmp30 = tmp6 - tmp29
tmp31 = tmp30 + tmp3
tmp32 = tmp31 * tmp31
tmp33 = tmp28 + tmp32
tmp35 = tmp12 - tmp34
tmp36 = tmp35 + tmp3
tmp37 = tmp36 * tmp36
tmp38 = tmp33 + tmp37
tmp40 = tmp18 - tmp39
tmp41 = tmp40 + tmp3
tmp42 = tmp41 * tmp41
tmp43 = tmp38 + tmp42
tmp44 = libdevice.sqrt(tmp43)
tmp45 = tmp24 - tmp44
tmp46 = tl_math.exp(tmp45)
tmp47 = 1.0
tmp48 = tmp46 + tmp47
tmp49 = tl_math.log(tmp48)
tmp50 = tl.broadcast_to(tmp49, [XBLOCK, RBLOCK])
tmp52 = tl.sum(tmp50, 1)[:, None]
tmp53 = 4.0
tmp54 = tmp52 / tmp53
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp54, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_add_exp_log_mean_norm_sub_0[grid(1)](buf2, arg0_1,
arg1_1, arg2_1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf2,
class TripletLogExpLossNew(nn.Module):
"""Creates a criterion that measures the triplet loss given an input
tensors x1, x2, x3.
This is used for measuring a relative similarity between samples. A triplet
is composed by `a`, `p` and `n`: anchor, positive examples and negative
example respectively. The shape of all input variables should be
:math:`(N, D)`.
The distance is described in detail in the paper `Improving Pairwise Ranking for Multi-Label
Image Classification`_ by Y. Li et al.
.. math::
L(a, p, n) = log \\left( 1 + exp(d(a_i, p_i) - d(a_i, n_i) \\right)
Args:
anchor: anchor input tensor
positive: positive input tensor
negative: negative input tensor
Shape:
- Input: :math:`(N, D)` where `D = vector dimension`
- Output: :math:`(N, 1)`
>>> triplet_loss = nn.TripletLogExpLoss(p=2)
>>> input1 = autograd.Variable(torch.randn(100, 128))
>>> input2 = autograd.Variable(torch.randn(100, 128))
>>> input3 = autograd.Variable(torch.randn(100, 128))
>>> output = triplet_loss(input1, input2, input3)
>>> output.backward()
.. _Learning shallow convolutional feature descriptors with triplet losses:
http://www.iis.ee.ic.ac.uk/%7Evbalnt/shallow_descr/TFeat_paper.pdf
"""
def __init__(self, p=2, eps=1e-06, swap=False):
super(TripletLogExpLossNew, self).__init__()
self.p = p
self.eps = eps
self.swap = swap
def eval_func(self, dp, dn):
return np.log(1 + np.exp(dp - dn))
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
zhangxue123/deep-image-retrieval
|
TripletLogExpLoss
| false
| 13,170
|
[
"BSD-3-Clause"
] | 0
|
ac188856fa5a034aed3f7ed3fb617d580da44462
|
https://github.com/zhangxue123/deep-image-retrieval/tree/ac188856fa5a034aed3f7ed3fb617d580da44462
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.