entry_point
stringlengths 1
65
| original_triton_python_code
stringlengths 208
619k
| optimised_triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
listlengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
|
|---|---|---|---|---|---|---|---|---|---|---|
IoULoss
|
import torch
import torch.nn as nn
class IoULoss(nn.Module):
def __init__(self, weight=None, size_average=True):
super(IoULoss, self).__init__()
def forward(self, inputs: 'torch.Tensor', targets: 'torch.Tensor',
smooth: 'int'=1):
inputs = torch.sigmoid(inputs)
inputs = inputs.view(-1)
targets = targets.view(-1)
intersection = (inputs * targets).sum()
total = (inputs + targets).sum()
union = total - intersection
IoU = (intersection + smooth) / (union + smooth)
return 1 - IoU
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mul_rsub_sub_sum_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp2 = tl.load(in_ptr1 + r0, None)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = tmp1 + tmp2
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = 1.0
tmp12 = tmp6 + tmp11
tmp13 = tmp10 - tmp6
tmp14 = tmp13 + tmp11
tmp15 = tmp12 / tmp14
tmp16 = tmp11 - tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp16, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mul_rsub_sub_sum_0[grid(1)](buf2, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf2,
class IoULossNew(nn.Module):
def __init__(self, weight=None, size_average=True):
super(IoULossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Latterlig96/DCUnet
|
IoULoss
| false
| 8,489
|
[
"MIT"
] | 11
|
87d1c137a60177d6daf1dfff0483678d5580fda0
|
https://github.com/Latterlig96/DCUnet/tree/87d1c137a60177d6daf1dfff0483678d5580fda0
|
AdaILN
|
import torch
import torch.utils.data
import torch.utils.data.distributed
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
class AdaILN(nn.Module):
def __init__(self, num_features, eps=1e-05):
super(AdaILN, self).__init__()
self.eps = eps
self.rho = Parameter(torch.Tensor(1, num_features, 1, 1))
self.rho.data.fill_(0.9)
def forward(self, x, gamma, beta):
in_mean, in_var = torch.mean(x, dim=[2, 3], keepdim=True), torch.var(x,
dim=[2, 3], keepdim=True)
out_in = (x - in_mean) / torch.sqrt(in_var + self.eps)
ln_mean, ln_var = torch.mean(x, dim=[1, 2, 3], keepdim=True
), torch.var(x, dim=[1, 2, 3], keepdim=True)
out_ln = (x - ln_mean) / torch.sqrt(ln_var + self.eps)
out = self.rho.expand(x.shape[0], -1, -1, -1) * out_in + (1 - self.
rho.expand(x.shape[0], -1, -1, -1)) * out_ln
out = out * gamma.unsqueeze(2).unsqueeze(3) + beta.unsqueeze(2
).unsqueeze(3)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_features': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.utils.data
import torch.utils.data.distributed
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_mean_sqrt_var_0(in_out_ptr0, in_out_ptr1, in_ptr0,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp1 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 64.0
tmp20 = tmp4 / tmp19
tmp21 = 63.0
tmp22 = tmp18 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.sqrt(tmp24)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp20, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x0, tmp25, xmask)
@triton.jit
def triton_per_fused_add_div_mean_mul_rsub_sqrt_sub_var_1(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
x3 = xindex // 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp26 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr3 + x3, xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp1 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 16.0
tmp20 = tmp4 / tmp19
tmp21 = 15.0
tmp22 = tmp18 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.sqrt(tmp24)
tmp27 = tmp0 - tmp20
tmp28 = tmp27 / tmp25
tmp29 = tmp26 * tmp28
tmp30 = 1.0
tmp31 = tmp30 - tmp26
tmp33 = tmp0 - tmp32
tmp35 = tmp33 / tmp34
tmp36 = tmp31 * tmp35
tmp37 = tmp29 + tmp36
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp20, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x0, tmp25, xmask)
tl.store(out_ptr0 + (r1 + 16 * x0), tmp37, xmask)
@triton.jit
def triton_poi_fused_add_div_mul_rsub_sub_2(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex % 256
x0 = xindex % 16
x2 = xindex // 256
x4 = xindex
tmp0 = tl.load(in_ptr0 + x3, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0 + 16 * x2), None, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr2 + (x0 + 16 * x2), None, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tl.store(out_ptr0 + x4, tmp4, None)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf6 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf9 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf7 = reinterpret_tensor(buf6, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf6
buf11 = reinterpret_tensor(buf9, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf9
get_raw_stream(0)
triton_per_fused_add_mean_sqrt_var_0[grid(4)](buf7, buf11,
primals_1, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf3 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
buf5 = reinterpret_tensor(buf3, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf3
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_per_fused_add_div_mean_mul_rsub_sqrt_sub_var_1[grid(16)](buf1,
buf5, primals_1, primals_2, buf7, buf11, buf12, 16, 16, XBLOCK=
1, num_warps=2, num_stages=1)
del primals_2
buf13 = empty_strided_cuda((4, 4, 4, 4, 4, 4), (1024, 256, 64, 16,
4, 1), torch.float32)
triton_poi_fused_add_div_mul_rsub_sub_2[grid(4096)](buf12,
primals_3, primals_4, buf13, 4096, XBLOCK=256, num_warps=4,
num_stages=1)
del buf12
del primals_4
return buf13, primals_1, primals_3, buf1, buf5, buf7, buf11
class AdaILNNew(nn.Module):
def __init__(self, num_features, eps=1e-05):
super(AdaILNNew, self).__init__()
self.eps = eps
self.rho = Parameter(torch.Tensor(1, num_features, 1, 1))
self.rho.data.fill_(0.9)
def forward(self, input_0, input_1, input_2):
primals_2 = self.rho
primals_1 = input_0
primals_3 = input_1
primals_4 = input_2
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
Lornatang/UGATIT_PyTorch
|
AdaILN
| false
| 8,490
|
[
"Apache-2.0"
] | 25
|
03519e4829b85ceee67c031a28d5a9318ac932b5
|
https://github.com/Lornatang/UGATIT_PyTorch/tree/03519e4829b85ceee67c031a28d5a9318ac932b5
|
AttentionHead
|
import torch
from torch import Tensor
import torch.optim.lr_scheduler
import torch.nn as nn
import torch.nn.functional as F
import torch.optim
import torch.onnx.operators
def scaled_dot_product_attention(query: 'Tensor', key: 'Tensor', value:
'Tensor') ->Tensor:
temp = query.bmm(key.transpose(1, 2))
scale = query.size(-1) ** 0.5
softmax = F.softmax(temp / scale, dim=-1)
return softmax.bmm(value)
class AttentionHead(nn.Module):
def __init__(self, dim_in: 'int', dim_k: 'int', dim_v: 'int'):
super().__init__()
self.q = nn.Linear(dim_in, dim_k)
self.k = nn.Linear(dim_in, dim_k)
self.v = nn.Linear(dim_in, dim_v)
def forward(self, query: 'Tensor', key: 'Tensor', value: 'Tensor'
) ->Tensor:
return scaled_dot_product_attention(self.q(query), self.k(key),
self.v(value))
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'dim_in': 4, 'dim_k': 4, 'dim_v': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import Tensor
import torch.optim.lr_scheduler
import torch.nn as nn
import torch.nn.functional as F
import torch.optim
import torch.onnx.operators
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x2, tmp17, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (16,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_8, reinterpret_tensor(primals_9, (16,
4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf2)
del primals_7
del primals_8
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf3, buf4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf5 = buf3
del buf3
triton_poi_fused__softmax_1[grid(64)](buf4, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf6 = buf4
del buf4
extern_kernels.bmm(buf5, reinterpret_tensor(buf2, (4, 4, 4), (16, 4,
1), 0), out=buf6)
return buf6, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0
), buf5, reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0)
def scaled_dot_product_attention(query: 'Tensor', key: 'Tensor', value:
'Tensor') ->Tensor:
temp = query.bmm(key.transpose(1, 2))
scale = query.size(-1) ** 0.5
softmax = F.softmax(temp / scale, dim=-1)
return softmax.bmm(value)
class AttentionHeadNew(nn.Module):
def __init__(self, dim_in: 'int', dim_k: 'int', dim_v: 'int'):
super().__init__()
self.q = nn.Linear(dim_in, dim_k)
self.k = nn.Linear(dim_in, dim_k)
self.v = nn.Linear(dim_in, dim_v)
def forward(self, input_0, input_1, input_2):
primals_1 = self.q.weight
primals_2 = self.q.bias
primals_4 = self.k.weight
primals_5 = self.k.bias
primals_7 = self.v.weight
primals_8 = self.v.bias
primals_3 = input_0
primals_6 = input_1
primals_9 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
LogIntelligence/LogADEmpirical
|
AttentionHead
| false
| 8,491
|
[
"MIT"
] | 11
|
48458aee65c1c84466b04dd4092fae79a7f341fd
|
https://github.com/LogIntelligence/LogADEmpirical/tree/48458aee65c1c84466b04dd4092fae79a7f341fd
|
PatchToPatchEdgeConvolution
|
import math
import torch
import torch.nn as nn
import torch.sparse as sp
class PatchToPatchEdgeConvolution(nn.Module):
def __init__(self, in_features, out_features):
super(PatchToPatchEdgeConvolution, self).__init__()
self.weight = nn.parameter.Parameter(torch.FloatTensor(in_features,
out_features))
self.bias = nn.parameter.Parameter(torch.FloatTensor(out_features))
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, edge_nodes, adj_matrix, inc_matrix, edge_feats):
"""
:param edge_nodes: Matrix indicating the nodes which each edge in the
batch connects. Shape [B, N]
:param adj_matrix: Sparse adjacency matrix of the graph of shape
[N, N]. Must contain only 1-entries (i.e. should not be normalised).
:param inc_matrix: Sparse incidence matrix of the graph of shape
[N, E].
:param edge_feats: Features of *all* edges in the graph. Shape [E, D].
:return: Hidden representation of shape [B, K].
"""
batch_edge_idcs = sp.mm(adj_matrix.transpose(1, 0), edge_nodes.
transpose(1, 0))
batch_edge_idcs = sp.mm(inc_matrix.transpose(1, 0), batch_edge_idcs
).transpose(1, 0)
batch_edge_idcs = (batch_edge_idcs == 2.0).float()
row_sum = torch.sum(batch_edge_idcs, dim=1)
inv = 1.0 / row_sum
inv[torch.isinf(inv)] = 0.0
batch_edge_idcs = batch_edge_idcs * inv.view(-1, 1)
h_edges = torch.mm(edge_feats, self.weight) + self.bias
h = torch.spmm(batch_edge_idcs, h_edges)
return h
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4]),
torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_zeros_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused__to_copy_eq_index_put_lift_fresh_mul_reciprocal_sum_1(
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 2.0
tmp2 = tmp0 == tmp1
tmp3 = tmp2.to(tl.float32)
tmp5 = tmp4 == tmp1
tmp6 = tmp5.to(tl.float32)
tmp7 = tmp3 + tmp6
tmp9 = tmp8 == tmp1
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp7 + tmp10
tmp13 = tmp12 == tmp1
tmp14 = tmp13.to(tl.float32)
tmp15 = tmp11 + tmp14
tmp16 = tl.full([1], 1, tl.int32)
tmp17 = tmp16 / tmp15
tmp18 = 1.0
tmp19 = tmp17 * tmp18
tmp20 = libdevice.isinf(tmp19).to(tl.int1)
tmp21 = 0.0
tmp22 = tl.where(tmp20, tmp21, tmp19)
tl.store(out_ptr0 + x0, tmp22, xmask)
@triton.jit
def triton_poi_fused__to_copy_eq_mul_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp4 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = 2.0
tmp2 = tmp0 == tmp1
tmp3 = tmp2.to(tl.float32)
tmp5 = tmp3 * tmp4
tl.store(in_out_ptr0 + x2, tmp5, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_zeros_0[grid(16)](buf0, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf1 = torch.ops.aten._sparse_addmm.default(reinterpret_tensor(buf0,
(4, 4), (1, 4), 0), primals_2, primals_1, beta=0)
del primals_1
del primals_2
buf2 = buf1
del buf1
buf3 = torch.ops.aten._sparse_addmm.default(reinterpret_tensor(buf0,
(4, 4), (1, 4), 0), buf2, primals_3, beta=0)
del primals_3
buf4 = buf3
del buf3
buf5 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused__to_copy_eq_index_put_lift_fresh_mul_reciprocal_sum_1[
grid(4)](buf4, buf5, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf6 = buf4
del buf4
triton_poi_fused__to_copy_eq_mul_2[grid(16)](buf6, buf5, 16, XBLOCK
=16, num_warps=1, num_stages=1)
del buf5
buf7 = buf2
del buf2
extern_kernels.addmm(primals_6, primals_5, primals_4, alpha=1, beta
=1, out=buf7)
del primals_4
del primals_6
buf8 = buf0
del buf0
extern_kernels.mm(buf6, buf7, out=buf8)
del buf7
return buf8, reinterpret_tensor(buf6, (4, 4), (1, 4), 0
), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0)
class PatchToPatchEdgeConvolutionNew(nn.Module):
def __init__(self, in_features, out_features):
super(PatchToPatchEdgeConvolutionNew, self).__init__()
self.weight = nn.parameter.Parameter(torch.FloatTensor(in_features,
out_features))
self.bias = nn.parameter.Parameter(torch.FloatTensor(out_features))
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input_0, input_1, input_2, input_3):
primals_1 = self.weight
primals_6 = self.bias
primals_2 = input_0
primals_3 = input_1
primals_4 = input_2
primals_5 = input_3
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
Lujian-123321/gcn-
|
PatchToPatchEdgeConvolution
| false
| 8,492
|
[
"MIT"
] | 12
|
8f3a0a1d979bc7f075352e194e1e39687f0b12ab
|
https://github.com/Lujian-123321/gcn-/tree/8f3a0a1d979bc7f075352e194e1e39687f0b12ab
|
PositionwiseFeedForward
|
import math
import torch
import torch.optim.lr_scheduler
import torch.nn as nn
import torch.optim
import torch.onnx.operators
class GELU(nn.Module):
"""
Paper Section 3.4, last paragraph notice that BERT used the GELU instead of RELU
"""
def forward(self, x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x +
0.044715 * torch.pow(x, 3))))
class PositionwiseFeedForward(nn.Module):
"""Implements FFN equation."""
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
self.activation = GELU()
def forward(self, x):
return self.w_2(self.dropout(self.activation(self.w_1(x))))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'd_ff': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.optim.lr_scheduler
import torch.nn as nn
import torch.optim
import torch.onnx.operators
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_pow_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tmp0 * tmp0
tmp4 = tmp3 * tmp0
tmp5 = 0.044715
tmp6 = tmp4 * tmp5
tmp7 = tmp0 + tmp6
tmp8 = 0.7978845608028654
tmp9 = tmp7 * tmp8
tmp10 = libdevice.tanh(tmp9)
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp2 * tmp12
tl.store(out_ptr0 + x0, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_pow_tanh_0[grid(256)](buf0, buf1, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4
class GELU(nn.Module):
"""
Paper Section 3.4, last paragraph notice that BERT used the GELU instead of RELU
"""
def forward(self, x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x +
0.044715 * torch.pow(x, 3))))
class PositionwiseFeedForwardNew(nn.Module):
"""Implements FFN equation."""
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForwardNew, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
self.activation = GELU()
def forward(self, input_0):
primals_1 = self.w_1.weight
primals_2 = self.w_1.bias
primals_4 = self.w_2.weight
primals_5 = self.w_2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
LogIntelligence/LogADEmpirical
|
PositionwiseFeedForward
| false
| 8,493
|
[
"MIT"
] | 11
|
48458aee65c1c84466b04dd4092fae79a7f341fd
|
https://github.com/LogIntelligence/LogADEmpirical/tree/48458aee65c1c84466b04dd4092fae79a7f341fd
|
LeastSquaresGenerativeAdversarialLoss
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
import torch.utils.data.distributed
class LeastSquaresGenerativeAdversarialLoss(nn.Module):
"""
Loss for `Least Squares Generative Adversarial Network (LSGAN) <https://arxiv.org/abs/1611.04076>`_
Args:
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Default: ``'mean'``
Inputs:
- prediction (tensor): unnormalized discriminator predictions
- real (bool): if the ground truth label is for real images or fake images. Default: true
.. warning::
Do not use sigmoid as the last layer of Discriminator.
"""
def __init__(self, reduction='mean'):
super(LeastSquaresGenerativeAdversarialLoss, self).__init__()
self.mse_loss = nn.MSELoss(reduction=reduction)
def forward(self, prediction, real=True):
if real:
label = torch.ones_like(prediction)
else:
label = torch.zeros_like(prediction)
return self.mse_loss(prediction, label)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mse_loss_ones_like_0(in_out_ptr0, in_ptr0, xnumel, rnumel
):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = 1.0
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 256.0
tmp8 = tmp6 / tmp7
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp8, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mse_loss_ones_like_0[grid(1)](buf1, arg0_1, 1, 256,
num_warps=2, num_stages=1)
del arg0_1
return buf1,
class LeastSquaresGenerativeAdversarialLossNew(nn.Module):
"""
Loss for `Least Squares Generative Adversarial Network (LSGAN) <https://arxiv.org/abs/1611.04076>`_
Args:
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Default: ``'mean'``
Inputs:
- prediction (tensor): unnormalized discriminator predictions
- real (bool): if the ground truth label is for real images or fake images. Default: true
.. warning::
Do not use sigmoid as the last layer of Discriminator.
"""
def __init__(self, reduction='mean'):
super(LeastSquaresGenerativeAdversarialLossNew, self).__init__()
self.mse_loss = nn.MSELoss(reduction=reduction)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Liuhong99/CST
|
LeastSquaresGenerativeAdversarialLoss
| false
| 8,494
|
[
"MIT"
] | 20
|
f6653a4ee7968fa3ba875a182670636f648be783
|
https://github.com/Liuhong99/CST/tree/f6653a4ee7968fa3ba875a182670636f648be783
|
FastGuidedFilter
|
import torch
from torchvision.transforms import functional as F
from torch import nn
from torch.nn import functional as F
class BoxFilter(nn.Module):
def __init__(self, r):
super(BoxFilter, self).__init__()
self.r = r
def forward(self, x):
kernel_size = 2 * self.r + 1
kernel_x = torch.full((x.data.shape[1], 1, 1, kernel_size), 1 /
kernel_size, device=x.device, dtype=x.dtype)
kernel_y = torch.full((x.data.shape[1], 1, kernel_size, 1), 1 /
kernel_size, device=x.device, dtype=x.dtype)
x = F.conv2d(x, kernel_x, padding=(0, self.r), groups=x.data.shape[1])
x = F.conv2d(x, kernel_y, padding=(self.r, 0), groups=x.data.shape[1])
return x
class FastGuidedFilter(nn.Module):
def __init__(self, r: 'int', eps: 'float'=1e-05):
super().__init__()
self.r = r
self.eps = eps
self.boxfilter = BoxFilter(r)
def forward(self, lr_x, lr_y, hr_x):
mean_x = self.boxfilter(lr_x)
mean_y = self.boxfilter(lr_y)
cov_xy = self.boxfilter(lr_x * lr_y) - mean_x * mean_y
var_x = self.boxfilter(lr_x * lr_x) - mean_x * mean_x
A = cov_xy / (var_x + self.eps)
b = mean_y - A * mean_x
A = F.interpolate(A, hr_x.shape[2:], mode='bilinear', align_corners
=False)
b = F.interpolate(b, hr_x.shape[2:], mode='bilinear', align_corners
=False)
return A * hr_x + b
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'r': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torchvision.transforms import functional as F
from torch import nn
from torch.nn import functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_full_mul_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, out_ptr2, out_ptr3, ynumel, xnumel, YBLOCK: tl.constexpr,
XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask)
tmp1 = tl.load(in_ptr1 + (x2 + 16 * y3), xmask & ymask)
tmp2 = tmp0 * tmp1
tmp3 = tmp0 * tmp0
tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp2, xmask & ymask)
tl.store(out_ptr1 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask)
tl.store(out_ptr2 + (y0 + 4 * x2 + 64 * y1), tmp3, xmask & ymask)
tl.store(out_ptr3 + (y0 + 4 * x2 + 64 * y1), tmp1, xmask & ymask)
@triton.jit
def triton_poi_fused_full_1(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 36
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.1111111111111111
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_div_mul_sub_2(
in_out_ptr2, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x5 = xindex
tmp97 = tl.load(in_ptr4 + x5, xmask)
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tl.full([1], 1, tl.int64)
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tmp14 = x0
tmp15 = tmp14.to(tl.float32)
tmp16 = tmp15 + tmp2
tmp17 = tmp16 * tmp4
tmp18 = tmp17 - tmp2
tmp19 = triton_helpers.maximum(tmp18, tmp7)
tmp20 = tmp19.to(tl.int32)
tmp21 = tmp20 + tmp10
tmp22 = triton_helpers.minimum(tmp21, tmp12)
tmp23 = tl.load(in_ptr0 + (x2 + 4 * tmp22 + 16 * tmp13 + 64 * x3),
xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr1 + (x2 + 4 * tmp22 + 16 * tmp13 + 64 * x3),
xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr2 + (x2 + 4 * tmp22 + 16 * tmp13 + 64 * x3),
xmask, eviction_policy='evict_last')
tmp26 = tmp24 * tmp25
tmp27 = tmp23 - tmp26
tmp28 = tl.load(in_ptr3 + (x2 + 4 * tmp22 + 16 * tmp13 + 64 * x3),
xmask, eviction_policy='evict_last')
tmp29 = tmp24 * tmp24
tmp30 = tmp28 - tmp29
tmp31 = 1e-05
tmp32 = tmp30 + tmp31
tmp33 = tmp27 / tmp32
tmp34 = tl.load(in_ptr0 + (x2 + 4 * tmp20 + 16 * tmp13 + 64 * x3),
xmask, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr1 + (x2 + 4 * tmp20 + 16 * tmp13 + 64 * x3),
xmask, eviction_policy='evict_last')
tmp36 = tl.load(in_ptr2 + (x2 + 4 * tmp20 + 16 * tmp13 + 64 * x3),
xmask, eviction_policy='evict_last')
tmp37 = tmp35 * tmp36
tmp38 = tmp34 - tmp37
tmp39 = tl.load(in_ptr3 + (x2 + 4 * tmp20 + 16 * tmp13 + 64 * x3),
xmask, eviction_policy='evict_last')
tmp40 = tmp35 * tmp35
tmp41 = tmp39 - tmp40
tmp42 = tmp41 + tmp31
tmp43 = tmp38 / tmp42
tmp44 = tl.load(in_ptr0 + (x2 + 4 * tmp22 + 16 * tmp9 + 64 * x3), xmask,
eviction_policy='evict_last')
tmp45 = tl.load(in_ptr1 + (x2 + 4 * tmp22 + 16 * tmp9 + 64 * x3), xmask,
eviction_policy='evict_last')
tmp46 = tl.load(in_ptr2 + (x2 + 4 * tmp22 + 16 * tmp9 + 64 * x3), xmask,
eviction_policy='evict_last')
tmp47 = tmp45 * tmp46
tmp48 = tmp44 - tmp47
tmp49 = tl.load(in_ptr3 + (x2 + 4 * tmp22 + 16 * tmp9 + 64 * x3), xmask,
eviction_policy='evict_last')
tmp50 = tmp45 * tmp45
tmp51 = tmp49 - tmp50
tmp52 = tmp51 + tmp31
tmp53 = tmp48 / tmp52
tmp54 = tl.load(in_ptr0 + (x2 + 4 * tmp20 + 16 * tmp9 + 64 * x3), xmask,
eviction_policy='evict_last')
tmp55 = tl.load(in_ptr1 + (x2 + 4 * tmp20 + 16 * tmp9 + 64 * x3), xmask,
eviction_policy='evict_last')
tmp56 = tl.load(in_ptr2 + (x2 + 4 * tmp20 + 16 * tmp9 + 64 * x3), xmask,
eviction_policy='evict_last')
tmp57 = tmp55 * tmp56
tmp58 = tmp54 - tmp57
tmp59 = tl.load(in_ptr3 + (x2 + 4 * tmp20 + 16 * tmp9 + 64 * x3), xmask,
eviction_policy='evict_last')
tmp60 = tmp55 * tmp55
tmp61 = tmp59 - tmp60
tmp62 = tmp61 + tmp31
tmp63 = tmp58 / tmp62
tmp64 = tmp53 - tmp63
tmp65 = tmp20.to(tl.float32)
tmp66 = tmp19 - tmp65
tmp67 = triton_helpers.maximum(tmp66, tmp7)
tmp68 = triton_helpers.minimum(tmp67, tmp4)
tmp69 = tmp64 * tmp68
tmp70 = tmp63 + tmp69
tmp71 = tmp33 * tmp24
tmp72 = tmp25 - tmp71
tmp73 = tmp43 * tmp35
tmp74 = tmp36 - tmp73
tmp75 = tmp53 * tmp45
tmp76 = tmp46 - tmp75
tmp77 = tmp63 * tmp55
tmp78 = tmp56 - tmp77
tmp79 = tmp76 - tmp78
tmp80 = tmp79 * tmp68
tmp81 = tmp78 + tmp80
tmp82 = tmp33 - tmp43
tmp83 = tmp82 * tmp68
tmp84 = tmp43 + tmp83
tmp85 = tmp84 - tmp70
tmp86 = tmp9.to(tl.float32)
tmp87 = tmp8 - tmp86
tmp88 = triton_helpers.maximum(tmp87, tmp7)
tmp89 = triton_helpers.minimum(tmp88, tmp4)
tmp90 = tmp85 * tmp89
tmp91 = tmp72 - tmp74
tmp92 = tmp91 * tmp68
tmp93 = tmp74 + tmp92
tmp94 = tmp93 - tmp81
tmp95 = tmp94 * tmp89
tmp96 = tmp70 + tmp90
tmp98 = tmp96 * tmp97
tmp99 = tmp81 + tmp95
tmp100 = tmp98 + tmp99
tl.store(in_out_ptr2 + x5, tmp100, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_full_mul_0[grid(16, 16)](arg0_1,
arg1_1, buf0, buf6, buf15, buf11, 16, 16, XBLOCK=16, YBLOCK=16,
num_warps=4, num_stages=1)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 1, 1, 9), (9, 9, 9, 1), torch.float32)
triton_poi_fused_full_1[grid(36)](buf1, 36, XBLOCK=64, num_warps=1,
num_stages=1)
buf2 = extern_kernels.convolution(buf0, buf1, stride=(1, 1),
padding=(0, 4), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 1, 16, 4))
del buf0
buf3 = reinterpret_tensor(buf1, (4, 1, 9, 1), (9, 9, 1, 1), 0)
del buf1
triton_poi_fused_full_1[grid(36)](buf3, 36, XBLOCK=64, num_warps=1,
num_stages=1)
buf4 = extern_kernels.convolution(buf2, buf3, stride=(1, 1),
padding=(4, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 1, 16, 4))
del buf2
buf5 = reinterpret_tensor(buf3, (4, 1, 1, 9), (9, 9, 9, 1), 0)
del buf3
triton_poi_fused_full_1[grid(36)](buf5, 36, XBLOCK=64, num_warps=1,
num_stages=1)
buf7 = extern_kernels.convolution(buf6, buf5, stride=(1, 1),
padding=(0, 4), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf7, (4, 4, 4, 4), (64, 1, 16, 4))
del buf6
buf8 = reinterpret_tensor(buf5, (4, 1, 9, 1), (9, 9, 1, 1), 0)
del buf5
triton_poi_fused_full_1[grid(36)](buf8, 36, XBLOCK=64, num_warps=1,
num_stages=1)
buf9 = extern_kernels.convolution(buf7, buf8, stride=(1, 1),
padding=(4, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf9, (4, 4, 4, 4), (64, 1, 16, 4))
del buf7
buf10 = reinterpret_tensor(buf8, (4, 1, 1, 9), (9, 9, 9, 1), 0)
del buf8
triton_poi_fused_full_1[grid(36)](buf10, 36, XBLOCK=64, num_warps=1,
num_stages=1)
buf12 = extern_kernels.convolution(buf11, buf10, stride=(1, 1),
padding=(0, 4), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf12, (4, 4, 4, 4), (64, 1, 16, 4))
del buf11
buf13 = reinterpret_tensor(buf10, (4, 1, 9, 1), (9, 9, 1, 1), 0)
del buf10
triton_poi_fused_full_1[grid(36)](buf13, 36, XBLOCK=64, num_warps=1,
num_stages=1)
buf14 = extern_kernels.convolution(buf12, buf13, stride=(1, 1),
padding=(4, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf14, (4, 4, 4, 4), (64, 1, 16, 4))
del buf12
buf16 = reinterpret_tensor(buf13, (4, 1, 1, 9), (9, 9, 9, 1), 0)
del buf13
triton_poi_fused_full_1[grid(36)](buf16, 36, XBLOCK=64, num_warps=1,
num_stages=1)
buf17 = extern_kernels.convolution(buf15, buf16, stride=(1, 1),
padding=(0, 4), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf17, (4, 4, 4, 4), (64, 1, 16, 4))
del buf15
buf18 = reinterpret_tensor(buf16, (4, 1, 9, 1), (9, 9, 1, 1), 0)
del buf16
triton_poi_fused_full_1[grid(36)](buf18, 36, XBLOCK=64, num_warps=1,
num_stages=1)
buf19 = extern_kernels.convolution(buf17, buf18, stride=(1, 1),
padding=(4, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf19, (4, 4, 4, 4), (64, 1, 16, 4))
del buf18
buf20 = reinterpret_tensor(buf17, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf17
buf25 = buf20
del buf20
buf32 = buf25
del buf25
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_div_mul_sub_2[
grid(256)](buf32, buf4, buf9, buf14, buf19, arg2_1, 256, XBLOCK
=128, num_warps=4, num_stages=1)
del arg2_1
del buf14
del buf19
del buf4
del buf9
return buf32,
class BoxFilter(nn.Module):
def __init__(self, r):
super(BoxFilter, self).__init__()
self.r = r
def forward(self, x):
kernel_size = 2 * self.r + 1
kernel_x = torch.full((x.data.shape[1], 1, 1, kernel_size), 1 /
kernel_size, device=x.device, dtype=x.dtype)
kernel_y = torch.full((x.data.shape[1], 1, kernel_size, 1), 1 /
kernel_size, device=x.device, dtype=x.dtype)
x = F.conv2d(x, kernel_x, padding=(0, self.r), groups=x.data.shape[1])
x = F.conv2d(x, kernel_y, padding=(self.r, 0), groups=x.data.shape[1])
return x
class FastGuidedFilterNew(nn.Module):
def __init__(self, r: 'int', eps: 'float'=1e-05):
super().__init__()
self.r = r
self.eps = eps
self.boxfilter = BoxFilter(r)
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
LightTwist/RobustVideoMatting
|
FastGuidedFilter
| false
| 8,495
|
[
"Apache-2.0"
] | 11
|
79eb143fef3a4c58b4857c1a5a927a318f528093
|
https://github.com/LightTwist/RobustVideoMatting/tree/79eb143fef3a4c58b4857c1a5a927a318f528093
|
AdaptiveFeatureNorm
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
import torch.utils.data.distributed
class AdaptiveFeatureNorm(nn.Module):
"""
The `Stepwise Adaptive Feature Norm loss (ICCV 2019) <https://arxiv.org/pdf/1811.07456v2.pdf>`_
Instead of using restrictive scalar R to match the corresponding feature norm, Stepwise Adaptive Feature Norm
is used in order to learn task-specific features with large norms in a progressive manner.
We denote parameters of backbone :math:`G` as :math:`\\theta_g`, parameters of bottleneck :math:`F_f` as :math:`\\theta_f`
, parameters of classifier head :math:`F_y` as :math:`\\theta_y`, and features extracted from sample :math:`x_i` as
:math:`h(x_i;\\theta)`. Full loss is calculated as follows
.. math::
L(\\theta_g,\\theta_f,\\theta_y)=\\frac{1}{n_s}\\sum_{(x_i,y_i)\\in D_s}L_y(x_i,y_i)+\\frac{\\lambda}{n_s+n_t}
\\sum_{x_i\\in D_s\\cup D_t}L_d(h(x_i;\\theta_0)+\\Delta_r,h(x_i;\\theta))\\\\
where :math:`L_y` denotes classification loss, :math:`L_d` denotes norm loss, :math:`\\theta_0` and :math:`\\theta`
represent the updated and updating model parameters in the last and current iterations respectively.
Args:
delta (float): positive residual scalar to control the feature norm enlargement.
Inputs:
- f (tensor): feature representations on source or target domain.
Shape:
- f: :math:`(N, F)` where F means the dimension of input features.
- Outputs: scalar.
Examples::
>>> adaptive_feature_norm = AdaptiveFeatureNorm(delta=1)
>>> f_s = torch.randn(32, 1000)
>>> f_t = torch.randn(32, 1000)
>>> norm_loss = adaptive_feature_norm(f_s) + adaptive_feature_norm(f_t)
"""
def __init__(self, delta):
super(AdaptiveFeatureNorm, self).__init__()
self.delta = delta
def forward(self, f: 'torch.Tensor') ->torch.Tensor:
radius = f.norm(p=2, dim=1).detach()
assert radius.requires_grad is False
radius = radius + self.delta
loss = ((f.norm(p=2, dim=1) - radius) ** 2).mean()
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'delta': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_linalg_vector_norm_mean_pow_sub_0(in_out_ptr0,
in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp5 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp8 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = libdevice.sqrt(tmp10)
tmp12 = 4.0
tmp13 = tmp11 + tmp12
tmp14 = tmp11 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.sum(tmp16, 1)[:, None]
tmp19 = 64.0
tmp20 = tmp18 / tmp19
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp20, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_linalg_vector_norm_mean_pow_sub_0[grid(1)](buf1,
arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf1,
class AdaptiveFeatureNormNew(nn.Module):
"""
The `Stepwise Adaptive Feature Norm loss (ICCV 2019) <https://arxiv.org/pdf/1811.07456v2.pdf>`_
Instead of using restrictive scalar R to match the corresponding feature norm, Stepwise Adaptive Feature Norm
is used in order to learn task-specific features with large norms in a progressive manner.
We denote parameters of backbone :math:`G` as :math:`\\theta_g`, parameters of bottleneck :math:`F_f` as :math:`\\theta_f`
, parameters of classifier head :math:`F_y` as :math:`\\theta_y`, and features extracted from sample :math:`x_i` as
:math:`h(x_i;\\theta)`. Full loss is calculated as follows
.. math::
L(\\theta_g,\\theta_f,\\theta_y)=\\frac{1}{n_s}\\sum_{(x_i,y_i)\\in D_s}L_y(x_i,y_i)+\\frac{\\lambda}{n_s+n_t}
\\sum_{x_i\\in D_s\\cup D_t}L_d(h(x_i;\\theta_0)+\\Delta_r,h(x_i;\\theta))\\\\
where :math:`L_y` denotes classification loss, :math:`L_d` denotes norm loss, :math:`\\theta_0` and :math:`\\theta`
represent the updated and updating model parameters in the last and current iterations respectively.
Args:
delta (float): positive residual scalar to control the feature norm enlargement.
Inputs:
- f (tensor): feature representations on source or target domain.
Shape:
- f: :math:`(N, F)` where F means the dimension of input features.
- Outputs: scalar.
Examples::
>>> adaptive_feature_norm = AdaptiveFeatureNorm(delta=1)
>>> f_s = torch.randn(32, 1000)
>>> f_t = torch.randn(32, 1000)
>>> norm_loss = adaptive_feature_norm(f_s) + adaptive_feature_norm(f_t)
"""
def __init__(self, delta):
super(AdaptiveFeatureNormNew, self).__init__()
self.delta = delta
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Liuhong99/CST
|
AdaptiveFeatureNorm
| false
| 8,496
|
[
"MIT"
] | 20
|
f6653a4ee7968fa3ba875a182670636f648be783
|
https://github.com/Liuhong99/CST/tree/f6653a4ee7968fa3ba875a182670636f648be783
|
FeedForward
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class FeedForward(nn.Module):
def __init__(self, d_model, d_ff=512, dropout=0.5):
super().__init__()
self.linear_1 = nn.Linear(d_model, d_ff)
self.dropout = nn.Dropout(dropout)
self.linear_2 = nn.Linear(d_ff, d_model)
def forward(self, x):
x = self.dropout(F.relu(self.linear_1(x)))
x = self.linear_2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (512, 4), (4, 1))
assert_size_stride(primals_2, (512,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 512), (512, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 512), (512, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 512), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 512), (8192, 2048, 512, 1), 0
)
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 512), (8192, 2048, 512, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(32768)](buf1,
primals_2, buf3, 32768, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 512),
(512, 1), 0), reinterpret_tensor(primals_4, (512, 4), (1, 512),
0), alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 512), (512, 1), 0), primals_4, buf3
class FeedForwardNew(nn.Module):
def __init__(self, d_model, d_ff=512, dropout=0.5):
super().__init__()
self.linear_1 = nn.Linear(d_model, d_ff)
self.dropout = nn.Dropout(dropout)
self.linear_2 = nn.Linear(d_ff, d_model)
def forward(self, input_0):
primals_1 = self.linear_1.weight
primals_2 = self.linear_1.bias
primals_4 = self.linear_2.weight
primals_5 = self.linear_2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
MadanMl/PyTorch-Transformer-for-RUL-Prediction
|
FeedForward
| false
| 8,497
|
[
"Apache-2.0"
] | 25
|
5bf0a4739abdecbbc88118ea413393997bdc1e24
|
https://github.com/MadanMl/PyTorch-Transformer-for-RUL-Prediction/tree/5bf0a4739abdecbbc88118ea413393997bdc1e24
|
UpsampleConvLayer
|
import torch
import torch.nn as nn
class UpsampleConvLayer(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride):
super(UpsampleConvLayer, self).__init__()
reflection_padding = kernel_size // 2
self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding)
self.conv2d = nn.ConvTranspose2d(in_channels, out_channels,
kernel_size, stride=stride)
def forward(self, x):
out = self.reflection_pad(x)
out = self.conv2d(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4,
'stride': 1}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8 % 8
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-2 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-2 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 1936
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 121 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0[grid(1024)](primals_1, buf0,
1024, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 11, 11), (484, 121, 11, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(1936)](buf2, primals_3, 1936,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
return buf2, primals_2, buf0
class UpsampleConvLayerNew(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride):
super(UpsampleConvLayerNew, self).__init__()
reflection_padding = kernel_size // 2
self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding)
self.conv2d = nn.ConvTranspose2d(in_channels, out_channels,
kernel_size, stride=stride)
def forward(self, input_0):
primals_1 = self.conv2d.weight
primals_3 = self.conv2d.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
MKFMIKU/PFFNet
|
UpsampleConvLayer
| false
| 8,498
|
[
"MIT"
] | 41
|
e506010a7cf00a32e77681845bdaf78ba88b027d
|
https://github.com/MKFMIKU/PFFNet/tree/e506010a7cf00a32e77681845bdaf78ba88b027d
|
MeanPoolConv
|
import torch
from torch import nn
class IWConv2d(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, he_init=True,
stride=1, bias=True):
super(IWConv2d, self).__init__()
self.he_init = he_init
self.padding = int((kernel_size - 1) / 2)
self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1,
padding=self.padding, bias=bias)
def forward(self, input):
output = self.conv(input)
return output
class MeanPoolConv(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, he_init=True):
super(MeanPoolConv, self).__init__()
self.he_init = he_init
self.conv = IWConv2d(input_dim, output_dim, kernel_size, he_init=
self.he_init)
def forward(self, input):
output = input
output = (output[:, :, ::2, ::2] + output[:, :, 1::2, ::2] + output
[:, :, ::2, 1::2] + output[:, :, 1::2, 1::2]) / 4
output = self.conv(output)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'output_dim': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(16)](buf2, primals_3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
return buf2, primals_2, buf0
class IWConv2d(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, he_init=True,
stride=1, bias=True):
super(IWConv2d, self).__init__()
self.he_init = he_init
self.padding = int((kernel_size - 1) / 2)
self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1,
padding=self.padding, bias=bias)
def forward(self, input):
output = self.conv(input)
return output
class MeanPoolConvNew(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, he_init=True):
super(MeanPoolConvNew, self).__init__()
self.he_init = he_init
self.conv = IWConv2d(input_dim, output_dim, kernel_size, he_init=
self.he_init)
def forward(self, input_0):
primals_1 = self.conv.conv.weight
primals_3 = self.conv.conv.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
MIC-DKFZ/mood
|
MeanPoolConv
| false
| 8,499
|
[
"Apache-2.0"
] | 42
|
a01303adb4256653b133e2f7cd4741d366b681f7
|
https://github.com/MIC-DKFZ/mood/tree/a01303adb4256653b133e2f7cd4741d366b681f7
|
Sobelxy
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Sobelxy(nn.Module):
def __init__(self):
super(Sobelxy, self).__init__()
kernelx = [[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]
kernely = [[1, 2, 1], [0, 0, 0], [-1, -2, -1]]
kernelx = torch.FloatTensor(kernelx).unsqueeze(0).unsqueeze(0)
kernely = torch.FloatTensor(kernely).unsqueeze(0).unsqueeze(0)
self.weightx = nn.Parameter(data=kernelx, requires_grad=False)
self.weighty = nn.Parameter(data=kernely, requires_grad=False)
def forward(self, x):
sobelx = F.conv2d(x, self.weightx, padding=1)
sobely = F.conv2d(x, self.weighty, padding=1)
return torch.abs(sobelx) + torch.abs(sobely)
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_abs_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp2 = tl.load(in_ptr0 + x0, None)
tmp1 = tl_math.abs(tmp0)
tmp3 = tl_math.abs(tmp2)
tmp4 = tmp1 + tmp3
tl.store(in_out_ptr0 + x0, tmp4, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (1, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(arg1_1, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(arg2_1, (1, 1, 3, 3), (9, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(arg1_1, arg0_1, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 64, 64), (4096, 4096, 64, 1))
del arg0_1
buf1 = extern_kernels.convolution(arg1_1, arg2_1, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 64, 64), (4096, 4096, 64, 1))
del arg1_1
del arg2_1
buf2 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_abs_add_0[grid(16384)](buf2, buf1, 16384, XBLOCK=
256, num_warps=4, num_stages=1)
del buf1
return buf2,
class SobelxyNew(nn.Module):
def __init__(self):
super(SobelxyNew, self).__init__()
kernelx = [[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]
kernely = [[1, 2, 1], [0, 0, 0], [-1, -2, -1]]
kernelx = torch.FloatTensor(kernelx).unsqueeze(0).unsqueeze(0)
kernely = torch.FloatTensor(kernely).unsqueeze(0).unsqueeze(0)
self.weightx = nn.Parameter(data=kernelx, requires_grad=False)
self.weighty = nn.Parameter(data=kernely, requires_grad=False)
def forward(self, input_0):
arg0_1 = self.weightx
arg2_1 = self.weighty
arg1_1 = input_0
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
Linfeng-Tang/SeAFusion
|
Sobelxy
| false
| 8,500
|
[
"MIT"
] | 18
|
54cf7ee116da3f726941560279bf12fedd0d434d
|
https://github.com/Linfeng-Tang/SeAFusion/tree/54cf7ee116da3f726941560279bf12fedd0d434d
|
ConvMeanPool
|
import torch
from torch import nn
class IWConv2d(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, he_init=True,
stride=1, bias=True):
super(IWConv2d, self).__init__()
self.he_init = he_init
self.padding = int((kernel_size - 1) / 2)
self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1,
padding=self.padding, bias=bias)
def forward(self, input):
output = self.conv(input)
return output
class ConvMeanPool(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, he_init=True):
super(ConvMeanPool, self).__init__()
self.he_init = he_init
self.conv = IWConv2d(input_dim, output_dim, kernel_size, he_init=
self.he_init)
def forward(self, input):
output = self.conv(input)
output = (output[:, :, ::2, ::2] + output[:, :, 1::2, ::2] + output
[:, :, ::2, 1::2] + output[:, :, 1::2, 1::2]) / 4
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'output_dim': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2 % 2
x4 = xindex // 4
x2 = xindex // 4 % 4
x6 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 6 * x1 + 9 * x4), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (3 + 2 * x0 + 9 * x4), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 6 * x1 + 9 * x4), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (4 + 9 * x4), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp3 + tmp1
tmp5 = tmp2 + tmp4
tmp7 = tmp6 + tmp1
tmp8 = tmp5 + tmp7
tmp10 = tmp9 + tmp1
tmp11 = tmp8 + tmp10
tmp12 = 0.25
tmp13 = tmp11 * tmp12
tl.store(out_ptr0 + x6, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 3, 3), (36, 9, 3, 1))
buf1 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_0[grid(64)](buf0, primals_2, buf1, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf0
del primals_2
return buf1, primals_1, primals_3
class IWConv2d(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, he_init=True,
stride=1, bias=True):
super(IWConv2d, self).__init__()
self.he_init = he_init
self.padding = int((kernel_size - 1) / 2)
self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1,
padding=self.padding, bias=bias)
def forward(self, input):
output = self.conv(input)
return output
class ConvMeanPoolNew(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, he_init=True):
super(ConvMeanPoolNew, self).__init__()
self.he_init = he_init
self.conv = IWConv2d(input_dim, output_dim, kernel_size, he_init=
self.he_init)
def forward(self, input_0):
primals_1 = self.conv.conv.weight
primals_2 = self.conv.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
MIC-DKFZ/mood
|
ConvMeanPool
| false
| 8,501
|
[
"Apache-2.0"
] | 42
|
a01303adb4256653b133e2f7cd4741d366b681f7
|
https://github.com/MIC-DKFZ/mood/tree/a01303adb4256653b133e2f7cd4741d366b681f7
|
Theta
|
from torch.autograd import Function
import torch
import torch.nn as nn
from typing import Tuple
from typing import Optional
import torch.nn.parallel
import torch.utils.data
import torch.utils.data.distributed
from typing import Any
class GradientReverseFunction(Function):
@staticmethod
def forward(ctx: 'Any', input: 'torch.Tensor', coeff: 'Optional[float]'=1.0
) ->torch.Tensor:
ctx.coeff = coeff
output = input * 1.0
return output
@staticmethod
def backward(ctx: 'Any', grad_output: 'torch.Tensor') ->Tuple[torch.
Tensor, Any]:
return grad_output.neg() * ctx.coeff, None
class GradientReverseLayer(nn.Module):
def __init__(self):
super(GradientReverseLayer, self).__init__()
def forward(self, *input):
return GradientReverseFunction.apply(*input)
class Theta(nn.Module):
"""
maximize loss respect to :math:` heta`
minimize loss respect to features
"""
def __init__(self, dim: 'int'):
super(Theta, self).__init__()
self.grl1 = GradientReverseLayer()
self.grl2 = GradientReverseLayer()
self.layer1 = nn.Linear(dim, dim)
nn.init.eye_(self.layer1.weight)
nn.init.zeros_(self.layer1.bias)
def forward(self, features: 'torch.Tensor') ->torch.Tensor:
features = self.grl1(features)
return self.grl2(self.layer1(features))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.autograd import Function
import torch.nn as nn
from typing import Tuple
from typing import Optional
import torch.nn.parallel
import torch.utils.data
import torch.utils.data.distributed
from typing import Any
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](primals_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
triton_poi_fused_mul_1[grid(256)](buf2, primals_3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_3
return buf2, reinterpret_tensor(buf0, (64, 4), (4, 1), 0)
class GradientReverseFunction(Function):
@staticmethod
def forward(ctx: 'Any', input: 'torch.Tensor', coeff: 'Optional[float]'=1.0
) ->torch.Tensor:
ctx.coeff = coeff
output = input * 1.0
return output
@staticmethod
def backward(ctx: 'Any', grad_output: 'torch.Tensor') ->Tuple[torch.
Tensor, Any]:
return grad_output.neg() * ctx.coeff, None
class GradientReverseLayer(nn.Module):
def __init__(self):
super(GradientReverseLayer, self).__init__()
def forward(self, *input):
return GradientReverseFunction.apply(*input)
class ThetaNew(nn.Module):
"""
maximize loss respect to :math:` heta`
minimize loss respect to features
"""
def __init__(self, dim: 'int'):
super(ThetaNew, self).__init__()
self.grl1 = GradientReverseLayer()
self.grl2 = GradientReverseLayer()
self.layer1 = nn.Linear(dim, dim)
nn.init.eye_(self.layer1.weight)
nn.init.zeros_(self.layer1.bias)
def forward(self, input_0):
primals_2 = self.layer1.weight
primals_3 = self.layer1.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Liuhong99/CST
|
Theta
| false
| 8,502
|
[
"MIT"
] | 20
|
f6653a4ee7968fa3ba875a182670636f648be783
|
https://github.com/Liuhong99/CST/tree/f6653a4ee7968fa3ba875a182670636f648be783
|
ResidualBlock
|
import torch
import torch.nn as nn
class ConvLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride):
super(ConvLayer, self).__init__()
reflection_padding = kernel_size // 2
self.reflection_pad = nn.ReflectionPad2d(reflection_padding)
self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride)
def forward(self, x):
out = self.reflection_pad(x)
out = self.conv2d(out)
return out
class ResidualBlock(torch.nn.Module):
def __init__(self, channels):
super(ResidualBlock, self).__init__()
self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.relu = nn.PReLU()
def forward(self, x):
residual = x
out = self.relu(self.conv1(x))
out = self.conv2(out) * 0.1
out = torch.add(out, residual)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused__prelu_kernel_reflection_pad2d_2(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp5 = tmp4 * tmp0
tmp6 = tl.where(tmp2, tmp0, tmp5)
tl.store(out_ptr0 + x3, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_convolution_mul_3(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp3 = 0.1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 + tmp5
tl.store(in_out_ptr0 + x3, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (1,), (1,))
assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0[grid(576)](primals_1, buf0, 576,
XBLOCK=128, num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(256)](buf2, primals_3, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf3 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
triton_poi_fused__prelu_kernel_reflection_pad2d_2[grid(576)](buf2,
primals_4, buf3, 576, XBLOCK=128, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf3, primals_5, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = buf4
del buf4
triton_poi_fused_add_convolution_mul_3[grid(256)](buf5, primals_6,
primals_1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_6
return buf5, primals_2, primals_4, primals_5, buf0, buf2, buf3
class ConvLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride):
super(ConvLayer, self).__init__()
reflection_padding = kernel_size // 2
self.reflection_pad = nn.ReflectionPad2d(reflection_padding)
self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride)
def forward(self, x):
out = self.reflection_pad(x)
out = self.conv2d(out)
return out
class ResidualBlockNew(torch.nn.Module):
def __init__(self, channels):
super(ResidualBlockNew, self).__init__()
self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.relu = nn.PReLU()
def forward(self, input_0):
primals_2 = self.conv1.conv2d.weight
primals_3 = self.conv1.conv2d.bias
primals_5 = self.conv2.conv2d.weight
primals_6 = self.conv2.conv2d.bias
primals_4 = self.relu.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
MKFMIKU/PFFNet
|
ResidualBlock
| false
| 8,503
|
[
"MIT"
] | 41
|
e506010a7cf00a32e77681845bdaf78ba88b027d
|
https://github.com/MKFMIKU/PFFNet/tree/e506010a7cf00a32e77681845bdaf78ba88b027d
|
Spatial_Attention
|
import torch
import torch.nn as nn
class Spatial_Attention(nn.Module):
def __init__(self, input_dim):
super(Spatial_Attention, self).__init__()
self.att_conv1 = nn.Conv2d(input_dim, 1, kernel_size=(1, 1),
padding=0, stride=1, bias=False)
self.att_act2 = nn.Softplus(beta=1, threshold=20)
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self, x):
att_score = self.att_act2(self.att_conv1(x))
return att_score
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_softplus_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 20.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = libdevice.log1p(tmp3)
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1))
buf1 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_softplus_0[grid(64)](buf0, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
return buf1, primals_1, primals_2, buf0
class Spatial_AttentionNew(nn.Module):
def __init__(self, input_dim):
super(Spatial_AttentionNew, self).__init__()
self.att_conv1 = nn.Conv2d(input_dim, 1, kernel_size=(1, 1),
padding=0, stride=1, bias=False)
self.att_act2 = nn.Softplus(beta=1, threshold=20)
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self, input_0):
primals_1 = self.att_conv1.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
MCC-WH/Token
|
Spatial_Attention
| false
| 8,504
|
[
"MIT"
] | 30
|
eadc301f2df9e1851633be1b63c273659af0da49
|
https://github.com/MCC-WH/Token/tree/eadc301f2df9e1851633be1b63c273659af0da49
|
region_levelset
|
import torch
import torch.nn as nn
class region_levelset(nn.Module):
"""
the mian of leveset function
"""
def __init__(self):
super(region_levelset, self).__init__()
def forward(self, mask_score, norm_img, class_weight):
"""
mask_score: predcited mask scores tensor:(N,C,W,H)
norm_img: normalizated images tensor:(N,C,W,H)
class_weight: weight for different classes
"""
mask_score_shape = mask_score.shape
norm_img_shape = norm_img.shape
level_set_loss = 0.0
for i in range(norm_img_shape[1]):
norm_img_ = torch.unsqueeze(norm_img[:, i], 1)
norm_img_ = norm_img_.expand(norm_img_shape[0],
mask_score_shape[1], norm_img_shape[2], norm_img_shape[3])
ave_similarity = torch.sum(norm_img_ * mask_score, (2, 3)
) / torch.sum(mask_score, (2, 3))
ave_similarity = ave_similarity.view(norm_img_shape[0],
mask_score_shape[1], 1, 1)
region_level = norm_img_ - ave_similarity.expand(norm_img_shape
[0], mask_score_shape[1], norm_img_shape[2], norm_img_shape[3])
region_level_loss = (class_weight * region_level * region_level *
mask_score)
level_set_loss += torch.sum(region_level_loss)
return level_set_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x1 = xindex // 4
x3 = xindex
tmp0 = tl.load(in_ptr0 + (r2 + 64 * x1), xmask, eviction_policy=
'evict_last', other=0.0)
tmp1 = tl.load(in_ptr1 + (r2 + 16 * x3), xmask, other=0.0)
tmp11 = tl.load(in_ptr0 + (16 + r2 + 64 * x1), xmask, eviction_policy=
'evict_last', other=0.0)
tmp17 = tl.load(in_ptr0 + (32 + r2 + 64 * x1), xmask, eviction_policy=
'evict_last', other=0.0)
tmp23 = tl.load(in_ptr0 + (48 + r2 + 64 * x1), xmask, eviction_policy=
'evict_last', other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp12 = tmp11 * tmp1
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp18 = tmp17 * tmp1
tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK])
tmp21 = tl.where(xmask, tmp19, 0)
tmp22 = tl.sum(tmp21, 1)[:, None]
tmp24 = tmp23 * tmp1
tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK])
tmp27 = tl.where(xmask, tmp25, 0)
tmp28 = tl.sum(tmp27, 1)[:, None]
tl.store(out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr1 + x3, tmp10, xmask)
tl.store(out_ptr2 + x3, tmp16, xmask)
tl.store(out_ptr3 + x3, tmp10, xmask)
tl.store(out_ptr4 + x3, tmp22, xmask)
tl.store(out_ptr5 + x3, tmp10, xmask)
tl.store(out_ptr6 + x3, tmp28, xmask)
tl.store(out_ptr7 + x3, tmp10, xmask)
@triton.jit
def triton_per_fused_add_mul_sub_sum_1(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9,
in_ptr10, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = rindex // 64
r4 = rindex // 16
tmp0 = tl.load(in_ptr0 + r3, None)
tmp1 = tl.load(in_ptr1 + (r0 + 64 * r2), None, eviction_policy='evict_last'
)
tmp2 = tl.load(in_ptr2 + r4, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr3 + r4, None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr4 + r3, None)
tmp13 = tl.load(in_ptr1 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr5 + r4, None, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr6 + r4, None, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr1 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr7 + r4, None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr8 + r4, None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr1 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp36 = tl.load(in_ptr9 + r4, None, eviction_policy='evict_last')
tmp37 = tl.load(in_ptr10 + r4, None, eviction_policy='evict_last')
tmp4 = tmp2 / tmp3
tmp5 = tmp1 - tmp4
tmp6 = tmp0 * tmp5
tmp7 = tmp6 * tmp5
tmp9 = tmp7 * tmp8
tmp10 = tl.broadcast_to(tmp9, [RBLOCK])
tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0))
tmp16 = tmp14 / tmp15
tmp17 = tmp13 - tmp16
tmp18 = tmp0 * tmp17
tmp19 = tmp18 * tmp17
tmp20 = tmp19 * tmp8
tmp21 = tl.broadcast_to(tmp20, [RBLOCK])
tmp23 = triton_helpers.promote_to_tensor(tl.sum(tmp21, 0))
tmp27 = tmp25 / tmp26
tmp28 = tmp24 - tmp27
tmp29 = tmp0 * tmp28
tmp30 = tmp29 * tmp28
tmp31 = tmp30 * tmp8
tmp32 = tl.broadcast_to(tmp31, [RBLOCK])
tmp34 = triton_helpers.promote_to_tensor(tl.sum(tmp32, 0))
tmp38 = tmp36 / tmp37
tmp39 = tmp35 - tmp38
tmp40 = tmp0 * tmp39
tmp41 = tmp40 * tmp39
tmp42 = tmp41 * tmp8
tmp43 = tl.broadcast_to(tmp42, [RBLOCK])
tmp45 = triton_helpers.promote_to_tensor(tl.sum(tmp43, 0))
tmp46 = 0.0
tmp47 = tmp12 + tmp46
tmp48 = tmp47 + tmp23
tmp49 = tmp48 + tmp34
tmp50 = tmp49 + tmp45
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp50, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_mul_sum_0[grid(16)](arg1_1, arg0_1, buf0, buf1,
buf3, buf4, buf6, buf7, buf9, buf10, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf11 = empty_strided_cuda((), (), torch.float32)
buf12 = buf11
del buf11
triton_per_fused_add_mul_sub_sum_1[grid(1)](buf12, arg2_1, arg1_1,
buf0, buf1, arg0_1, buf3, buf4, buf6, buf7, buf9, buf10, 1, 256,
num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
del buf0
del buf1
del buf10
del buf3
del buf4
del buf6
del buf7
del buf9
return buf12,
class region_levelsetNew(nn.Module):
"""
the mian of leveset function
"""
def __init__(self):
super(region_levelsetNew, self).__init__()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
LiWentomng/boxlevelset
|
region_levelset
| false
| 8,505
|
[
"Apache-2.0"
] | 25
|
8cc40bf6ae4a343c482c676c72259cc12c29d31c
|
https://github.com/LiWentomng/boxlevelset/tree/8cc40bf6ae4a343c482c676c72259cc12c29d31c
|
DenseBlock
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class ConvLeakyRelu2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, padding=1,
stride=1, dilation=1, groups=1):
super(ConvLeakyRelu2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=
kernel_size, padding=padding, stride=stride, dilation=dilation,
groups=groups)
def forward(self, x):
return F.leaky_relu(self.conv(x), negative_slope=0.2)
class DenseBlock(nn.Module):
def __init__(self, channels):
super(DenseBlock, self).__init__()
self.conv1 = ConvLeakyRelu2d(channels, channels)
self.conv2 = ConvLeakyRelu2d(2 * channels, channels)
def forward(self, x):
x = torch.cat((x, self.conv1(x)), dim=1)
x = torch.cat((x, self.conv2(x)), dim=1)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 8
x0 = xindex % 16
x2 = xindex // 128
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask,
other=0.0).to(tl.int1)
tmp10 = tl.load(in_ptr2 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask,
other=0.0)
tmp11 = tl.load(in_ptr3 + (-4 + x1), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = 0.2
tmp14 = tmp12 * tmp13
tmp15 = tl.where(tmp9, tmp12, tmp14)
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp6, tmp15, tmp16)
tmp18 = tl.where(tmp4, tmp5, tmp17)
tl.store(out_ptr0 + x3, tmp18, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 12
x0 = xindex % 16
x2 = xindex // 192
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 8, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 128 * x2), tmp4 & xmask, other=0.0
)
tmp6 = tmp0 >= tmp3
tl.full([1], 12, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-8 + x1) + 64 * x2), tmp6 & xmask,
other=0.0).to(tl.int1)
tmp10 = tl.load(in_ptr2 + (x0 + 16 * (-8 + x1) + 64 * x2), tmp6 & xmask,
other=0.0)
tmp11 = tl.load(in_ptr3 + (-8 + x1), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = 0.2
tmp14 = tmp12 * tmp13
tmp15 = tl.where(tmp9, tmp12, tmp14)
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp6, tmp15, tmp16)
tmp18 = tl.where(tmp4, tmp5, tmp17)
tl.store(out_ptr0 + x3, tmp18, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0[grid(256)](buf0,
primals_2, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
triton_poi_fused_cat_1[grid(512)](primals_3, buf1, buf0, primals_2,
buf2, 512, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_2
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_leaky_relu_0[grid(256)](buf3,
primals_5, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf5 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.float32
)
triton_poi_fused_cat_2[grid(768)](buf2, buf4, buf3, primals_5, buf5,
768, XBLOCK=256, num_warps=4, num_stages=1)
del buf3
del primals_5
return buf5, primals_1, primals_3, primals_4, buf1, buf2, buf4
class ConvLeakyRelu2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, padding=1,
stride=1, dilation=1, groups=1):
super(ConvLeakyRelu2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=
kernel_size, padding=padding, stride=stride, dilation=dilation,
groups=groups)
def forward(self, x):
return F.leaky_relu(self.conv(x), negative_slope=0.2)
class DenseBlockNew(nn.Module):
def __init__(self, channels):
super(DenseBlockNew, self).__init__()
self.conv1 = ConvLeakyRelu2d(channels, channels)
self.conv2 = ConvLeakyRelu2d(2 * channels, channels)
def forward(self, input_0):
primals_1 = self.conv1.conv.weight
primals_2 = self.conv1.conv.bias
primals_4 = self.conv2.conv.weight
primals_5 = self.conv2.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Linfeng-Tang/SeAFusion
|
DenseBlock
| false
| 8,506
|
[
"MIT"
] | 18
|
54cf7ee116da3f726941560279bf12fedd0d434d
|
https://github.com/Linfeng-Tang/SeAFusion/tree/54cf7ee116da3f726941560279bf12fedd0d434d
|
UpSampleConv
|
import torch
from torch import nn
class IWConv2d(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, he_init=True,
stride=1, bias=True):
super(IWConv2d, self).__init__()
self.he_init = he_init
self.padding = int((kernel_size - 1) / 2)
self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1,
padding=self.padding, bias=bias)
def forward(self, input):
output = self.conv(input)
return output
class DepthToSpace(nn.Module):
def __init__(self, block_size):
super(DepthToSpace, self).__init__()
self.block_size = block_size
self.block_size_sq = block_size * block_size
def forward(self, input):
output = input.permute(0, 2, 3, 1)
batch_size, input_height, input_width, input_depth = output.size()
output_depth = int(input_depth / self.block_size_sq)
output_width = int(input_width * self.block_size)
output_height = int(input_height * self.block_size)
t_1 = output.reshape(batch_size, input_height, input_width, self.
block_size_sq, output_depth)
spl = t_1.split(self.block_size, 3)
stacks = [t_t.reshape(batch_size, input_height, output_width,
output_depth) for t_t in spl]
output = torch.stack(stacks, 0).transpose(0, 1).permute(0, 2, 1, 3, 4
).reshape(batch_size, output_height, output_width, output_depth)
output = output.permute(0, 3, 1, 2)
return output
class UpSampleConv(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, he_init=True,
bias=True):
super(UpSampleConv, self).__init__()
self.he_init = he_init
self.conv = IWConv2d(input_dim, output_dim, kernel_size, he_init=
self.he_init, bias=bias)
self.depth_to_space = DepthToSpace(2)
def forward(self, input):
output = input
output = torch.cat((output, output, output, output), 1)
output = self.depth_to_space(output)
output = self.conv(output)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'output_dim': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 32 % 2
x4 = xindex // 256
x0 = xindex % 4
x1 = xindex // 4 % 8
x3 = xindex // 64 % 4
x6 = xindex
tmp0 = x4 + 4 * x2
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x3 + 16 * x0 + 64 * (x4 + 4 * x2) + x1 //
2), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr0 + (4 * x3 + 16 * x0 + 64 * (-4 + x4 + 4 * x2) +
x1 // 2), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x6, tmp10, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask)
tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 2, 8, 4), (256, 64, 32, 4, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(1024)](primals_1, buf0, 1024, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
triton_poi_fused_convolution_1[grid(16, 16)](primals_2, buf1, 16,
16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
buf2 = extern_kernels.convolution(reinterpret_tensor(buf0, (4, 4, 8,
8), (256, 1, 32, 4), 0), buf1, stride=(1, 1), padding=(1, 1),
dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 7, 7), (196, 1, 28, 4))
del buf1
buf3 = buf2
del buf2
triton_poi_fused_convolution_2[grid(784)](buf3, primals_3, 784,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
return buf3, primals_2, reinterpret_tensor(buf0, (4, 4, 8, 8), (256, 1,
32, 4), 0)
class IWConv2d(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, he_init=True,
stride=1, bias=True):
super(IWConv2d, self).__init__()
self.he_init = he_init
self.padding = int((kernel_size - 1) / 2)
self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1,
padding=self.padding, bias=bias)
def forward(self, input):
output = self.conv(input)
return output
class DepthToSpace(nn.Module):
def __init__(self, block_size):
super(DepthToSpace, self).__init__()
self.block_size = block_size
self.block_size_sq = block_size * block_size
def forward(self, input):
output = input.permute(0, 2, 3, 1)
batch_size, input_height, input_width, input_depth = output.size()
output_depth = int(input_depth / self.block_size_sq)
output_width = int(input_width * self.block_size)
output_height = int(input_height * self.block_size)
t_1 = output.reshape(batch_size, input_height, input_width, self.
block_size_sq, output_depth)
spl = t_1.split(self.block_size, 3)
stacks = [t_t.reshape(batch_size, input_height, output_width,
output_depth) for t_t in spl]
output = torch.stack(stacks, 0).transpose(0, 1).permute(0, 2, 1, 3, 4
).reshape(batch_size, output_height, output_width, output_depth)
output = output.permute(0, 3, 1, 2)
return output
class UpSampleConvNew(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, he_init=True,
bias=True):
super(UpSampleConvNew, self).__init__()
self.he_init = he_init
self.conv = IWConv2d(input_dim, output_dim, kernel_size, he_init=
self.he_init, bias=bias)
self.depth_to_space = DepthToSpace(2)
def forward(self, input_0):
primals_1 = self.conv.conv.weight
primals_3 = self.conv.conv.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
MIC-DKFZ/mood
|
UpSampleConv
| false
| 8,507
|
[
"Apache-2.0"
] | 42
|
a01303adb4256653b133e2f7cd4741d366b681f7
|
https://github.com/MIC-DKFZ/mood/tree/a01303adb4256653b133e2f7cd4741d366b681f7
|
BoxFilter
|
import torch
from torchvision.transforms import functional as F
from torch import nn
from torch.nn import functional as F
class BoxFilter(nn.Module):
def __init__(self, r):
super(BoxFilter, self).__init__()
self.r = r
def forward(self, x):
kernel_size = 2 * self.r + 1
kernel_x = torch.full((x.data.shape[1], 1, 1, kernel_size), 1 /
kernel_size, device=x.device, dtype=x.dtype)
kernel_y = torch.full((x.data.shape[1], 1, kernel_size, 1), 1 /
kernel_size, device=x.device, dtype=x.dtype)
x = F.conv2d(x, kernel_x, padding=(0, self.r), groups=x.data.shape[1])
x = F.conv2d(x, kernel_y, padding=(self.r, 0), groups=x.data.shape[1])
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'r': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_full_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 36
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.1111111111111111
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_full_1(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask)
tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_full_2(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tl.store(out_ptr0 + (x2 + 16 * y3), tmp0, xmask & ymask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1, 9), (9, 9, 9, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_full_0[grid(36)](buf0, 36, XBLOCK=64, num_warps=1,
num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
triton_poi_fused_convolution_full_1[grid(16, 16)](arg0_1, buf1, 16,
16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del arg0_1
buf2 = extern_kernels.convolution(buf1, buf0, stride=(1, 1),
padding=(0, 4), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 1, 16, 4))
del buf1
buf3 = reinterpret_tensor(buf0, (4, 1, 9, 1), (9, 9, 1, 1), 0)
del buf0
triton_poi_fused_full_0[grid(36)](buf3, 36, XBLOCK=64, num_warps=1,
num_stages=1)
buf4 = extern_kernels.convolution(buf2, buf3, stride=(1, 1),
padding=(4, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 1, 16, 4))
del buf3
buf5 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused_convolution_full_2[grid(16, 16)](buf4, buf5, 16,
16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del buf4
return buf5,
class BoxFilterNew(nn.Module):
def __init__(self, r):
super(BoxFilterNew, self).__init__()
self.r = r
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
LightTwist/RobustVideoMatting
|
BoxFilter
| false
| 8,508
|
[
"Apache-2.0"
] | 11
|
79eb143fef3a4c58b4857c1a5a927a318f528093
|
https://github.com/LightTwist/RobustVideoMatting/tree/79eb143fef3a4c58b4857c1a5a927a318f528093
|
SingleHiddenLayer
|
import torch
class SingleHiddenLayer(torch.nn.Module):
def __init__(self, input_channels, hidden_channels):
super(SingleHiddenLayer, self).__init__()
self.input_channels = input_channels
self.hidden_channels = hidden_channels
self.linear1 = torch.nn.Linear(hidden_channels, 128)
self.linear2 = torch.nn.Linear(128, input_channels * hidden_channels)
def extra_repr(self):
return 'input_channels: {}, hidden_channels: {}'.format(self.
input_channels, self.hidden_channels)
def forward(self, z):
z = self.linear1(z)
z = torch.relu(z)
z = self.linear2(z)
z = z.view(*z.shape[:-1], self.hidden_channels, self.input_channels)
return z
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_channels': 4, 'hidden_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (128, 4), (4, 1))
assert_size_stride(primals_2, (128,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (16, 128), (128, 1))
assert_size_stride(primals_5, (16,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf1,
primals_2, buf3, 8192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 128),
(128, 1), 0), reinterpret_tensor(primals_4, (128, 16), (1, 128),
0), alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 128), (128, 1), 0), primals_4, buf3
class SingleHiddenLayerNew(torch.nn.Module):
def __init__(self, input_channels, hidden_channels):
super(SingleHiddenLayerNew, self).__init__()
self.input_channels = input_channels
self.hidden_channels = hidden_channels
self.linear1 = torch.nn.Linear(hidden_channels, 128)
self.linear2 = torch.nn.Linear(128, input_channels * hidden_channels)
def extra_repr(self):
return 'input_channels: {}, hidden_channels: {}'.format(self.
input_channels, self.hidden_channels)
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
MLforHealth/state_representations_for_RLinHealth
|
SingleHiddenLayer
| false
| 8,509
|
[
"MIT"
] | 24
|
aa8dbb7d56caa95bf4380e3e745e134996291b66
|
https://github.com/MLforHealth/state_representations_for_RLinHealth/tree/aa8dbb7d56caa95bf4380e3e745e134996291b66
|
dnn_encoder
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class dnn_encoder(nn.Module):
def __init__(self, G_in, G_out, w1, w2, w3):
super(dnn_encoder, self).__init__()
self.fc1 = nn.Linear(G_in, w1)
self.fc2 = nn.Linear(w1, w2)
self.fc3 = nn.Linear(w2, w3)
self.out = nn.Linear(w3, G_out)
def forward(self, x):
x = F.leaky_relu(self.fc1(x))
x = F.leaky_relu(self.fc2(x))
x = F.leaky_relu(self.fc3(x))
x = self.out(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'G_in': 4, 'G_out': 4, 'w1': 4, 'w2': 4, 'w3': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(256)](buf0, primals_2, buf1,
buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf3 = buf0
del buf0
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_leaky_relu_0[grid(256)](buf3, primals_5, buf4,
buf5, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf6 = buf3
del buf3
extern_kernels.mm(reinterpret_tensor(buf5, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf6)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_leaky_relu_0[grid(256)](buf6, primals_7, buf7,
buf8, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
buf9 = buf6
del buf6
extern_kernels.addmm(primals_9, reinterpret_tensor(buf8, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf9)
del primals_9
return reinterpret_tensor(buf9, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0
), buf4, reinterpret_tensor(buf5, (64, 4), (4, 1), 0
), buf7, reinterpret_tensor(buf8, (64, 4), (4, 1), 0
), primals_8, primals_6, primals_4
class dnn_encoderNew(nn.Module):
def __init__(self, G_in, G_out, w1, w2, w3):
super(dnn_encoderNew, self).__init__()
self.fc1 = nn.Linear(G_in, w1)
self.fc2 = nn.Linear(w1, w2)
self.fc3 = nn.Linear(w2, w3)
self.out = nn.Linear(w3, G_out)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_8 = self.out.weight
primals_9 = self.out.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
Maitreyapatel/speech-conversion-between-different-modalities
|
dnn_encoder
| false
| 8,510
|
[
"MIT"
] | 23
|
f757b487d9e6c20aa4f7d37247ba16f9a967f573
|
https://github.com/Maitreyapatel/speech-conversion-between-different-modalities/tree/f757b487d9e6c20aa4f7d37247ba16f9a967f573
|
_ImpalaCNN
|
import torch
from typing import Tuple
from torch import nn
class _ImpalaResBlock(nn.Module):
def __init__(self, n_channels: 'int'):
super().__init__()
self.n_channels = n_channels
kernel_size = 3
padding = 1
self.relu = nn.ReLU()
self.relu_inplace = nn.ReLU()
self.conv1 = nn.Conv2d(n_channels, n_channels, kernel_size, padding
=padding)
self.conv2 = nn.Conv2d(n_channels, n_channels, kernel_size, padding
=padding)
def forward(self, inputs):
x = self.relu(inputs)
x = self.conv1(x)
x = self.relu_inplace(x)
x = self.conv2(x)
x += inputs
return x
class _ImpalaBlock(nn.Module):
def __init__(self, n_channels_in: 'int', n_channels_out: 'int'):
super().__init__()
self.n_channels_in = n_channels_in
self.n_channels_out = n_channels_out
kernel_size = 3
padding = 1
self.conv1 = nn.Conv2d(n_channels_in, n_channels_out, kernel_size,
padding=padding)
self.pool = nn.MaxPool2d(kernel_size, stride=2, padding=padding)
self.res1 = _ImpalaResBlock(n_channels_out)
self.res2 = _ImpalaResBlock(n_channels_out)
def forward(self, x):
x = self.conv1(x)
x = self.pool(x)
x = self.res1(x)
x = self.res2(x)
return x
class _ImpalaCNN(nn.Module):
def __init__(self, img_shape: 'Tuple[int, int, int]', n_extra_feats:
'int'=0, n_outputs: 'int'=256):
super().__init__()
self.n_outputs = n_outputs
h, w, c = img_shape
self.block1 = _ImpalaBlock(c, 16)
self.block2 = _ImpalaBlock(16, 32)
self.block3 = _ImpalaBlock(32, 32)
self.relu = nn.ReLU()
self.flatten = nn.Flatten()
test_img = torch.empty(c, h, w)[None]
n_feats = self.block3(self.block2(self.block1(test_img))).numel()
self.linear = nn.Linear(n_feats + n_extra_feats, self.n_outputs)
def forward(self, x, extra_obs=None):
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
x = self.relu(x)
x = self.flatten(x)
if extra_obs is not None:
x = torch.cat((x, extra_obs), -1)
x = self.linear(x)
x = self.relu(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'img_shape': [4, 4, 4]}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from typing import Tuple
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_1(in_ptr0, out_ptr0,
out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 2 % 2
x0 = xindex % 2
x4 = xindex // 2
x3 = xindex
tmp0 = -1 + 2 * x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + 2 * x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-5 + 2 * x0 + 8 * x4), tmp10 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp12 = 2 * x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-4 + 2 * x0 + 8 * x4), tmp16 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + 2 * x0
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-3 + 2 * x0 + 8 * x4), tmp23 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 2 * x1
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (-1 + 2 * x0 + 8 * x4), tmp30 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (2 * x0 + 8 * x4), tmp33 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x4), tmp36 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 1 + 2 * x1
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (3 + 2 * x0 + 8 * x4), tmp43 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x4), tmp46 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x4), tmp49 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tmp52 = tmp17 > tmp11
tmp53 = tl.full([1], 1, tl.int8)
tmp54 = tl.full([1], 0, tl.int8)
tmp55 = tl.where(tmp52, tmp53, tmp54)
tmp56 = tmp24 > tmp18
tmp57 = tl.full([1], 2, tl.int8)
tmp58 = tl.where(tmp56, tmp57, tmp55)
tmp59 = tmp31 > tmp25
tmp60 = tl.full([1], 3, tl.int8)
tmp61 = tl.where(tmp59, tmp60, tmp58)
tmp62 = tmp34 > tmp32
tmp63 = tl.full([1], 4, tl.int8)
tmp64 = tl.where(tmp62, tmp63, tmp61)
tmp65 = tmp37 > tmp35
tmp66 = tl.full([1], 5, tl.int8)
tmp67 = tl.where(tmp65, tmp66, tmp64)
tmp68 = tmp44 > tmp38
tmp69 = tl.full([1], 6, tl.int8)
tmp70 = tl.where(tmp68, tmp69, tmp67)
tmp71 = tmp47 > tmp45
tmp72 = tl.full([1], 7, tl.int8)
tmp73 = tl.where(tmp71, tmp72, tmp70)
tmp74 = tmp50 > tmp48
tmp75 = tl.full([1], 8, tl.int8)
tmp76 = tl.where(tmp74, tmp75, tmp73)
tmp77 = tl.full([1], 0, tl.int32)
tmp78 = triton_helpers.maximum(tmp77, tmp51)
tl.store(out_ptr0 + x3, tmp51, xmask)
tl.store(out_ptr1 + x3, tmp76, xmask)
tl.store(out_ptr2 + x3, tmp78, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_convolution_relu_3(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + x3, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_convolution_4(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, xmask)
tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr3 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp2 + tmp7
tl.store(in_out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused_convolution_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 32
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_6(in_ptr0, out_ptr0,
out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.full([1], -1, tl.int64)
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tmp5 & tmp5
tmp7 = tl.load(in_ptr0 + (-3 + 4 * x0), tmp6 & xmask, eviction_policy=
'evict_last', other=float('-inf'))
tmp8 = tmp1 >= tmp1
tmp9 = tmp1 < tmp3
tmp10 = tmp8 & tmp9
tmp11 = tmp5 & tmp10
tmp12 = tl.load(in_ptr0 + (-2 + 4 * x0), tmp11 & xmask, eviction_policy
='evict_last', other=float('-inf'))
tmp13 = triton_helpers.maximum(tmp12, tmp7)
tmp14 = tl.full([1], 1, tl.int64)
tmp15 = tmp14 >= tmp1
tmp16 = tmp14 < tmp3
tmp17 = tmp15 & tmp16
tmp18 = tmp5 & tmp17
tmp19 = tl.load(in_ptr0 + (-1 + 4 * x0), tmp18 & xmask, eviction_policy
='evict_last', other=float('-inf'))
tmp20 = triton_helpers.maximum(tmp19, tmp13)
tmp21 = tmp10 & tmp5
tmp22 = tl.load(in_ptr0 + (-1 + 4 * x0), tmp21 & xmask, eviction_policy
='evict_last', other=float('-inf'))
tmp23 = triton_helpers.maximum(tmp22, tmp20)
tmp24 = tmp10 & tmp10
tmp25 = tl.load(in_ptr0 + 4 * x0, tmp24 & xmask, eviction_policy=
'evict_last', other=float('-inf'))
tmp26 = triton_helpers.maximum(tmp25, tmp23)
tmp27 = tmp10 & tmp17
tmp28 = tl.load(in_ptr0 + (1 + 4 * x0), tmp27 & xmask, eviction_policy=
'evict_last', other=float('-inf'))
tmp29 = triton_helpers.maximum(tmp28, tmp26)
tmp30 = tmp17 & tmp5
tmp31 = tl.load(in_ptr0 + (1 + 4 * x0), tmp30 & xmask, eviction_policy=
'evict_last', other=float('-inf'))
tmp32 = triton_helpers.maximum(tmp31, tmp29)
tmp33 = tmp17 & tmp10
tmp34 = tl.load(in_ptr0 + (2 + 4 * x0), tmp33 & xmask, eviction_policy=
'evict_last', other=float('-inf'))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp17 & tmp17
tmp37 = tl.load(in_ptr0 + (3 + 4 * x0), tmp36 & xmask, eviction_policy=
'evict_last', other=float('-inf'))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = tmp12 > tmp7
tmp40 = tl.full([1], 1, tl.int8)
tmp41 = tl.full([1], 0, tl.int8)
tmp42 = tl.where(tmp39, tmp40, tmp41)
tmp43 = tmp19 > tmp13
tmp44 = tl.full([1], 2, tl.int8)
tmp45 = tl.where(tmp43, tmp44, tmp42)
tmp46 = tmp22 > tmp20
tmp47 = tl.full([1], 3, tl.int8)
tmp48 = tl.where(tmp46, tmp47, tmp45)
tmp49 = tmp25 > tmp23
tmp50 = tl.full([1], 4, tl.int8)
tmp51 = tl.where(tmp49, tmp50, tmp48)
tmp52 = tmp28 > tmp26
tmp53 = tl.full([1], 5, tl.int8)
tmp54 = tl.where(tmp52, tmp53, tmp51)
tmp55 = tmp31 > tmp29
tmp56 = tl.full([1], 6, tl.int8)
tmp57 = tl.where(tmp55, tmp56, tmp54)
tmp58 = tmp34 > tmp32
tmp59 = tl.full([1], 7, tl.int8)
tmp60 = tl.where(tmp58, tmp59, tmp57)
tmp61 = tmp37 > tmp35
tmp62 = tl.full([1], 8, tl.int8)
tmp63 = tl.where(tmp61, tmp62, tmp60)
tmp64 = tl.full([1], 0, tl.int32)
tmp65 = triton_helpers.maximum(tmp64, tmp38)
tl.store(out_ptr0 + x0, tmp38, xmask)
tl.store(out_ptr1 + x0, tmp63, xmask)
tl.store(out_ptr2 + x0, tmp65, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_convolution_relu_8(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_convolution_9(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x2, xmask)
tmp4 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp2 + tmp7
tl.store(in_out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_convolution_max_pool2d_with_indices_relu_10(in_out_ptr0,
in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], -1, tl.int64)
tmp4 = tl.full([1], 0, tl.int64)
tmp5 = tmp3 >= tmp4
tmp6 = tl.full([1], 1, tl.int64)
tmp7 = tmp3 < tmp6
tmp8 = tmp5 & tmp7
tmp8 & tmp8
tmp10 = tmp4 >= tmp4
tmp11 = tmp4 < tmp6
tmp12 = tmp10 & tmp11
tmp8 & tmp12
tmp14 = triton_helpers.maximum(tmp2, tmp2)
tmp15 = tmp6 >= tmp4
tmp16 = tmp6 < tmp6
tmp17 = tmp15 & tmp16
tmp8 & tmp17
tmp19 = triton_helpers.maximum(tmp2, tmp14)
tmp12 & tmp8
tmp21 = triton_helpers.maximum(tmp2, tmp19)
tmp12 & tmp12
tmp23 = triton_helpers.maximum(tmp2, tmp21)
tmp12 & tmp17
tmp25 = triton_helpers.maximum(tmp2, tmp23)
tmp17 & tmp8
tmp27 = triton_helpers.maximum(tmp2, tmp25)
tmp17 & tmp12
tmp29 = triton_helpers.maximum(tmp2, tmp27)
tmp17 & tmp17
tmp31 = triton_helpers.maximum(tmp2, tmp29)
tmp32 = tmp2 > tmp2
tmp33 = tl.full([1], 1, tl.int8)
tmp34 = tl.full([1], 0, tl.int8)
tmp35 = tl.where(tmp32, tmp33, tmp34)
tmp36 = tmp2 > tmp14
tmp37 = tl.full([1], 2, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tmp2 > tmp19
tmp40 = tl.full([1], 3, tl.int8)
tmp41 = tl.where(tmp39, tmp40, tmp38)
tmp42 = tmp2 > tmp21
tmp43 = tl.full([1], 4, tl.int8)
tmp44 = tl.where(tmp42, tmp43, tmp41)
tmp45 = tmp2 > tmp23
tmp46 = tl.full([1], 5, tl.int8)
tmp47 = tl.where(tmp45, tmp46, tmp44)
tmp48 = tmp2 > tmp25
tmp49 = tl.full([1], 6, tl.int8)
tmp50 = tl.where(tmp48, tmp49, tmp47)
tmp51 = tmp2 > tmp27
tmp52 = tl.full([1], 7, tl.int8)
tmp53 = tl.where(tmp51, tmp52, tmp50)
tmp54 = tmp2 > tmp29
tmp55 = tl.full([1], 8, tl.int8)
tmp56 = tl.where(tmp54, tmp55, tmp53)
tmp57 = tl.full([1], 0, tl.int32)
tmp58 = triton_helpers.maximum(tmp57, tmp31)
tl.store(in_out_ptr0 + x2, tmp2, xmask)
tl.store(out_ptr0 + x2, tmp31, xmask)
tl.store(out_ptr1 + x2, tmp56, xmask)
tl.store(out_ptr2 + x2, tmp58, xmask)
@triton.jit
def triton_poi_fused_add_convolution_relu_threshold_backward_11(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x2, xmask)
tmp4 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp2 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp11 = 0.0
tmp12 = tmp10 <= tmp11
tl.store(in_out_ptr0 + x2, tmp10, xmask)
tl.store(out_ptr0 + x2, tmp12, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_12(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32, primals_33
) = args
args.clear()
assert_size_stride(primals_1, (16, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_7, (16,), (1,))
assert_size_stride(primals_8, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_9, (16,), (1,))
assert_size_stride(primals_10, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_11, (16,), (1,))
assert_size_stride(primals_12, (32, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_13, (32,), (1,))
assert_size_stride(primals_14, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_15, (32,), (1,))
assert_size_stride(primals_16, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_17, (32,), (1,))
assert_size_stride(primals_18, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_19, (32,), (1,))
assert_size_stride(primals_20, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_21, (32,), (1,))
assert_size_stride(primals_22, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_23, (32,), (1,))
assert_size_stride(primals_24, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_25, (32,), (1,))
assert_size_stride(primals_26, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_27, (32,), (1,))
assert_size_stride(primals_28, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_29, (32,), (1,))
assert_size_stride(primals_30, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_31, (32,), (1,))
assert_size_stride(primals_32, (256, 32), (32, 1))
assert_size_stride(primals_33, (256,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 4, 4), (256, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(1024)](buf1, primals_2, 1024,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 16, 2, 2), (64, 4, 2, 1), torch.float32)
buf3 = empty_strided_cuda((4, 16, 2, 2), (64, 4, 2, 1), torch.int8)
buf4 = empty_strided_cuda((4, 16, 2, 2), (64, 4, 2, 1), torch.float32)
triton_poi_fused_max_pool2d_with_indices_relu_1[grid(256)](buf1,
buf2, buf3, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf5 = extern_kernels.convolution(buf4, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 16, 2, 2), (64, 4, 2, 1))
buf6 = buf5
del buf5
triton_poi_fused_convolution_relu_2[grid(256)](buf6, primals_5, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf7 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 16, 2, 2), (64, 4, 2, 1))
buf8 = empty_strided_cuda((4, 16, 2, 2), (64, 4, 2, 1), torch.float32)
triton_poi_fused_add_convolution_relu_3[grid(256)](buf7, primals_7,
buf2, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf9 = extern_kernels.convolution(buf8, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 16, 2, 2), (64, 4, 2, 1))
buf10 = buf9
del buf9
triton_poi_fused_convolution_relu_2[grid(256)](buf10, primals_9,
256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_9
buf11 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 16, 2, 2), (64, 4, 2, 1))
buf12 = buf11
del buf11
triton_poi_fused_add_convolution_4[grid(256)](buf12, primals_11,
buf7, primals_7, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf2
del buf7
del primals_11
del primals_7
buf13 = extern_kernels.convolution(buf12, primals_12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 32, 2, 2), (128, 4, 2, 1))
buf14 = buf13
del buf13
triton_poi_fused_convolution_5[grid(512)](buf14, primals_13, 512,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_13
buf15 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf16 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 1, 1), torch.int8)
buf17 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 1, 1), torch.float32)
triton_poi_fused_max_pool2d_with_indices_relu_6[grid(128)](buf14,
buf15, buf16, buf17, 128, XBLOCK=128, num_warps=4, num_stages=1)
buf18 = extern_kernels.convolution(buf17, primals_14, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 32, 1, 1), (32, 1, 1, 1))
buf19 = buf18
del buf18
triton_poi_fused_convolution_relu_7[grid(128)](buf19, primals_15,
128, XBLOCK=128, num_warps=4, num_stages=1)
del primals_15
buf20 = extern_kernels.convolution(buf19, primals_16, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 32, 1, 1), (32, 1, 1, 1))
buf21 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 1, 1), torch.float32)
triton_poi_fused_add_convolution_relu_8[grid(128)](buf20,
primals_17, buf15, buf21, 128, XBLOCK=128, num_warps=4,
num_stages=1)
buf22 = extern_kernels.convolution(buf21, primals_18, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 32, 1, 1), (32, 1, 1, 1))
buf23 = buf22
del buf22
triton_poi_fused_convolution_relu_7[grid(128)](buf23, primals_19,
128, XBLOCK=128, num_warps=4, num_stages=1)
del primals_19
buf24 = extern_kernels.convolution(buf23, primals_20, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 32, 1, 1), (32, 1, 1, 1))
buf25 = reinterpret_tensor(buf15, (4, 32, 1, 1), (32, 1, 1, 1), 0)
del buf15
triton_poi_fused_add_convolution_9[grid(128)](buf25, buf24,
primals_21, buf20, primals_17, 128, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_17
del primals_21
buf26 = extern_kernels.convolution(buf25, primals_22, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 32, 1, 1), (32, 1, 1, 1))
buf27 = buf26
del buf26
buf28 = reinterpret_tensor(buf24, (4, 32, 1, 1), (32, 1, 128, 128), 0)
del buf24
buf29 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 1, 1), torch.int8)
buf30 = buf20
del buf20
triton_poi_fused_convolution_max_pool2d_with_indices_relu_10[grid(128)
](buf27, primals_23, buf28, buf29, buf30, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_23
buf31 = extern_kernels.convolution(buf30, primals_24, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf31, (4, 32, 1, 1), (32, 1, 1, 1))
buf32 = buf31
del buf31
triton_poi_fused_convolution_relu_7[grid(128)](buf32, primals_25,
128, XBLOCK=128, num_warps=4, num_stages=1)
del primals_25
buf33 = extern_kernels.convolution(buf32, primals_26, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf33, (4, 32, 1, 1), (32, 1, 1, 1))
buf34 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 1, 1), torch.float32)
triton_poi_fused_add_convolution_relu_8[grid(128)](buf33,
primals_27, buf28, buf34, 128, XBLOCK=128, num_warps=4,
num_stages=1)
buf35 = extern_kernels.convolution(buf34, primals_28, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf35, (4, 32, 1, 1), (32, 1, 1, 1))
buf36 = buf35
del buf35
triton_poi_fused_convolution_relu_7[grid(128)](buf36, primals_29,
128, XBLOCK=128, num_warps=4, num_stages=1)
del primals_29
buf37 = extern_kernels.convolution(buf36, primals_30, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf37, (4, 32, 1, 1), (32, 1, 1, 1))
buf38 = reinterpret_tensor(buf28, (4, 32, 1, 1), (32, 1, 1, 1), 0)
del buf28
buf42 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 1, 1), torch.bool)
triton_poi_fused_add_convolution_relu_threshold_backward_11[grid(128)](
buf38, buf37, primals_31, buf33, primals_27, buf42, 128, XBLOCK
=128, num_warps=4, num_stages=1)
del buf33
del buf37
del primals_27
del primals_31
buf39 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf38, (4, 32), (32, 1), 0),
reinterpret_tensor(primals_32, (32, 256), (1, 32), 0), out=buf39)
buf40 = buf39
del buf39
buf41 = empty_strided_cuda((4, 256), (256, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_12[grid(1024)](buf40,
primals_33, buf41, 1024, XBLOCK=256, num_warps=4, num_stages=1)
del primals_33
return (buf40, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, primals_12, primals_14, primals_16, primals_18,
primals_20, primals_22, primals_24, primals_26, primals_28,
primals_30, buf1, buf3, buf4, buf6, buf8, buf10, buf12, buf14,
buf16, buf17, buf19, buf21, buf23, buf25, buf27, buf29, buf30,
buf32, buf34, buf36, reinterpret_tensor(buf38, (4, 32), (32, 1), 0),
buf41, primals_32, buf42)
class _ImpalaResBlock(nn.Module):
def __init__(self, n_channels: 'int'):
super().__init__()
self.n_channels = n_channels
kernel_size = 3
padding = 1
self.relu = nn.ReLU()
self.relu_inplace = nn.ReLU()
self.conv1 = nn.Conv2d(n_channels, n_channels, kernel_size, padding
=padding)
self.conv2 = nn.Conv2d(n_channels, n_channels, kernel_size, padding
=padding)
def forward(self, inputs):
x = self.relu(inputs)
x = self.conv1(x)
x = self.relu_inplace(x)
x = self.conv2(x)
x += inputs
return x
class _ImpalaBlock(nn.Module):
def __init__(self, n_channels_in: 'int', n_channels_out: 'int'):
super().__init__()
self.n_channels_in = n_channels_in
self.n_channels_out = n_channels_out
kernel_size = 3
padding = 1
self.conv1 = nn.Conv2d(n_channels_in, n_channels_out, kernel_size,
padding=padding)
self.pool = nn.MaxPool2d(kernel_size, stride=2, padding=padding)
self.res1 = _ImpalaResBlock(n_channels_out)
self.res2 = _ImpalaResBlock(n_channels_out)
def forward(self, x):
x = self.conv1(x)
x = self.pool(x)
x = self.res1(x)
x = self.res2(x)
return x
class _ImpalaCNNNew(nn.Module):
def __init__(self, img_shape: 'Tuple[int, int, int]', n_extra_feats:
'int'=0, n_outputs: 'int'=256):
super().__init__()
self.n_outputs = n_outputs
h, w, c = img_shape
self.block1 = _ImpalaBlock(c, 16)
self.block2 = _ImpalaBlock(16, 32)
self.block3 = _ImpalaBlock(32, 32)
self.relu = nn.ReLU()
self.flatten = nn.Flatten()
test_img = torch.empty(c, h, w)[None]
n_feats = self.block3(self.block2(self.block1(test_img))).numel()
self.linear = nn.Linear(n_feats + n_extra_feats, self.n_outputs)
def forward(self, input_0):
primals_1 = self.block1.conv1.weight
primals_2 = self.block1.conv1.bias
primals_4 = self.block1.res1.conv1.weight
primals_5 = self.block1.res1.conv1.bias
primals_6 = self.block1.res1.conv2.weight
primals_7 = self.block1.res1.conv2.bias
primals_8 = self.block1.res2.conv1.weight
primals_9 = self.block1.res2.conv1.bias
primals_10 = self.block1.res2.conv2.weight
primals_11 = self.block1.res2.conv2.bias
primals_12 = self.block2.conv1.weight
primals_13 = self.block2.conv1.bias
primals_14 = self.block2.res1.conv1.weight
primals_15 = self.block2.res1.conv1.bias
primals_16 = self.block2.res1.conv2.weight
primals_17 = self.block2.res1.conv2.bias
primals_18 = self.block2.res2.conv1.weight
primals_19 = self.block2.res2.conv1.bias
primals_20 = self.block2.res2.conv2.weight
primals_21 = self.block2.res2.conv2.bias
primals_22 = self.block3.conv1.weight
primals_23 = self.block3.conv1.bias
primals_24 = self.block3.res1.conv1.weight
primals_25 = self.block3.res1.conv1.bias
primals_26 = self.block3.res1.conv2.weight
primals_27 = self.block3.res1.conv2.bias
primals_28 = self.block3.res2.conv1.weight
primals_29 = self.block3.res2.conv1.bias
primals_30 = self.block3.res2.conv2.weight
primals_31 = self.block3.res2.conv2.bias
primals_32 = self.linear.weight
primals_33 = self.linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33])
return output[0]
|
IBM/vsrl-framework
|
_ImpalaCNN
| false
| 8,511
|
[
"MIT"
] | 44
|
42e0853bffb5efbb66cd97178aff9e10ad18c5a9
|
https://github.com/IBM/vsrl-framework/tree/42e0853bffb5efbb66cd97178aff9e10ad18c5a9
|
FinalTanh
|
import torch
class FinalTanh(torch.nn.Module):
def __init__(self, input_channels, hidden_channels,
hidden_hidden_channels, num_hidden_layers):
super(FinalTanh, self).__init__()
self.input_channels = input_channels
self.hidden_channels = hidden_channels
self.hidden_hidden_channels = hidden_hidden_channels
self.num_hidden_layers = num_hidden_layers
self.linear_in = torch.nn.Linear(hidden_channels,
hidden_hidden_channels)
self.linears = torch.nn.ModuleList(torch.nn.Linear(
hidden_hidden_channels, hidden_hidden_channels) for _ in range(
num_hidden_layers - 1))
self.linear_out = torch.nn.Linear(hidden_hidden_channels,
input_channels * hidden_channels)
def extra_repr(self):
return (
'input_channels: {}, hidden_channels: {}, hidden_hidden_channels: {}, num_hidden_layers: {}'
.format(self.input_channels, self.hidden_channels, self.
hidden_hidden_channels, self.num_hidden_layers))
def forward(self, z):
z = self.linear_in(z)
z = z.relu()
for linear in self.linears:
z = linear(z)
z = z.relu()
z = self.linear_out(z).view(*z.shape[:-1], self.hidden_channels,
self.input_channels)
z = z.tanh()
return z
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_channels': 4, 'hidden_channels': 4,
'hidden_hidden_channels': 4, 'num_hidden_layers': 1}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (16,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0
)
del buf2
triton_poi_fused_tanh_1[grid(1024)](buf3, primals_5, 1024, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_5
return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf3, primals_4, buf4
class FinalTanhNew(torch.nn.Module):
def __init__(self, input_channels, hidden_channels,
hidden_hidden_channels, num_hidden_layers):
super(FinalTanhNew, self).__init__()
self.input_channels = input_channels
self.hidden_channels = hidden_channels
self.hidden_hidden_channels = hidden_hidden_channels
self.num_hidden_layers = num_hidden_layers
self.linear_in = torch.nn.Linear(hidden_channels,
hidden_hidden_channels)
self.linears = torch.nn.ModuleList(torch.nn.Linear(
hidden_hidden_channels, hidden_hidden_channels) for _ in range(
num_hidden_layers - 1))
self.linear_out = torch.nn.Linear(hidden_hidden_channels,
input_channels * hidden_channels)
def extra_repr(self):
return (
'input_channels: {}, hidden_channels: {}, hidden_hidden_channels: {}, num_hidden_layers: {}'
.format(self.input_channels, self.hidden_channels, self.
hidden_hidden_channels, self.num_hidden_layers))
def forward(self, input_0):
primals_1 = self.linear_in.weight
primals_2 = self.linear_in.bias
primals_4 = self.linear_out.weight
primals_5 = self.linear_out.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
MLforHealth/state_representations_for_RLinHealth
|
FinalTanh
| false
| 8,512
|
[
"MIT"
] | 24
|
aa8dbb7d56caa95bf4380e3e745e134996291b66
|
https://github.com/MLforHealth/state_representations_for_RLinHealth/tree/aa8dbb7d56caa95bf4380e3e745e134996291b66
|
Simple224Upsample
|
import torch
import torch.nn as nn
class Simple224Upsample(nn.Module):
def __init__(self, arch=''):
super(Simple224Upsample, self).__init__()
self.upsample = nn.Upsample(mode='nearest', scale_factor=7)
self.arch = arch
def forward(self, x):
return self.upsample(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 12544
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 28 % 28
x0 = xindex % 28
x2 = xindex // 784
x4 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.14285714285714285
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tmp5 = x0
tmp6 = tmp5.to(tl.float32)
tmp7 = tmp6 * tmp2
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.load(in_ptr0 + (tmp8 + 4 * tmp4 + 16 * x2), xmask,
eviction_policy='evict_last')
tl.store(out_ptr0 + x4, tmp9, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 28, 28), (3136, 784, 28, 1), torch
.float32)
get_raw_stream(0)
triton_poi_fused__unsafe_index_0[grid(12544)](arg0_1, buf0, 12544,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class Simple224UpsampleNew(nn.Module):
def __init__(self, arch=''):
super(Simple224UpsampleNew, self).__init__()
self.upsample = nn.Upsample(mode='nearest', scale_factor=7)
self.arch = arch
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
MadryLab/smoothed-vit
|
Simple224Upsample
| false
| 8,513
|
[
"MIT"
] | 16
|
a4327542e519e010764821716b64b944d458d1c1
|
https://github.com/MadryLab/smoothed-vit/tree/a4327542e519e010764821716b64b944d458d1c1
|
MultiHeadAttention
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
def attention(q, k, v, d_k, mask=None, dropout=None):
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
mask = mask.unsqueeze(1)
scores = F.softmax(scores, dim=-1)
if dropout is not None:
scores = dropout(scores)
output = torch.matmul(scores, v)
return output
class MultiHeadAttention(nn.Module):
def __init__(self, heads, d_model, dropout=0.5):
super().__init__()
self.d_model = d_model
self.d_k = d_model // heads
self.h = heads
self.q_linear = nn.Linear(d_model, d_model)
self.v_linear = nn.Linear(d_model, d_model)
self.k_linear = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(dropout)
self.out = nn.Linear(d_model, d_model)
def forward(self, q, k, v, mask=None):
bs = q.size(0)
k = self.k_linear(k).view(bs, -1, self.h, self.d_k)
q = self.q_linear(q).view(bs, -1, self.h, self.d_k)
v = self.v_linear(v).view(bs, -1, self.h, self.d_k)
k = k.transpose(1, 2)
q = q.transpose(1, 2)
v = v.transpose(1, 2)
scores = attention(q, k, v, self.d_k, mask, self.dropout)
concat = scores.transpose(1, 2).contiguous().view(bs, -1, self.d_model)
output = self.out(concat)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'heads': 4, 'd_model': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + 16 * y3), tmp4, xmask & ymask)
@triton.jit
def triton_per_fused_1(in_ptr0, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr
):
xnumel = 256
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = float('-inf')
tmp12 = tmp0 == tmp11
tmp13 = tmp12 == 0
tmp14 = tmp13.to(tl.int64)
tmp15 = tmp14 != 0
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.where(xmask, tmp16, 0)
tmp19 = triton_helpers.any(tmp18, 1)[:, None]
tmp20 = tmp19 == 0
tmp21 = tmp6 / tmp10
tmp22 = 0.0
tmp23 = tl.where(tmp20, tmp22, tmp21)
tl.store(out_ptr3 + (r1 + 16 * x0), tmp23, xmask)
@triton.jit
def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf1)
del primals_5
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_9, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 16, 1), (64, 16, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(16, 16)](buf1, primals_6, buf3, 16, 16,
XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_6
buf4 = reinterpret_tensor(buf1, (4, 4, 1, 16), (64, 16, 16, 1), 0)
del buf1
triton_poi_fused_0[grid(16, 16)](buf0, primals_3, buf4, 16, 16,
XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf5 = empty_strided_cuda((16, 16, 16), (256, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 16, 1), (16, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 16), (16, 0, 1), 0), out=buf5)
buf9 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch
.float32)
triton_per_fused_1[grid(256)](buf5, buf9, 256, 16, XBLOCK=32,
num_warps=4, num_stages=1)
del buf5
buf10 = reinterpret_tensor(buf0, (4, 4, 16, 1), (64, 16, 1, 1), 0)
del buf0
triton_poi_fused_2[grid(16, 16)](buf2, primals_8, buf10, 16, 16,
XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_8
buf11 = reinterpret_tensor(buf2, (16, 16, 1), (16, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf9, (16, 16, 16), (256, 16,
1), 0), reinterpret_tensor(buf10, (16, 16, 1), (16, 1, 0), 0),
out=buf11)
buf12 = empty_strided_cuda((4, 16, 4, 1), (64, 4, 1, 1), torch.float32)
triton_poi_fused_clone_3[grid(64, 4)](buf11, buf12, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
buf13 = reinterpret_tensor(buf11, (64, 4), (4, 1), 0)
del buf11
extern_kernels.addmm(primals_11, reinterpret_tensor(buf12, (64, 4),
(4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf13)
del primals_11
return reinterpret_tensor(buf13, (4, 16, 4), (64, 4, 1), 0
), reinterpret_tensor(primals_4, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_9, (64, 4), (4, 1), 0
), buf9, reinterpret_tensor(buf10, (16, 1, 16), (16, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 16), (16, 1, 1), 0
), reinterpret_tensor(buf4, (16, 16, 1), (16, 1, 16), 0
), reinterpret_tensor(buf12, (64, 4), (4, 1), 0), primals_10
def attention(q, k, v, d_k, mask=None, dropout=None):
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
mask = mask.unsqueeze(1)
scores = F.softmax(scores, dim=-1)
if dropout is not None:
scores = dropout(scores)
output = torch.matmul(scores, v)
return output
class MultiHeadAttentionNew(nn.Module):
def __init__(self, heads, d_model, dropout=0.5):
super().__init__()
self.d_model = d_model
self.d_k = d_model // heads
self.h = heads
self.q_linear = nn.Linear(d_model, d_model)
self.v_linear = nn.Linear(d_model, d_model)
self.k_linear = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(dropout)
self.out = nn.Linear(d_model, d_model)
def forward(self, input_0, input_1, input_2):
primals_2 = self.q_linear.weight
primals_3 = self.q_linear.bias
primals_5 = self.v_linear.weight
primals_6 = self.v_linear.bias
primals_7 = self.k_linear.weight
primals_8 = self.k_linear.bias
primals_10 = self.out.weight
primals_11 = self.out.bias
primals_1 = input_0
primals_4 = input_1
primals_9 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
MadanMl/PyTorch-Transformer-for-RUL-Prediction
|
MultiHeadAttention
| false
| 8,514
|
[
"Apache-2.0"
] | 25
|
5bf0a4739abdecbbc88118ea413393997bdc1e24
|
https://github.com/MadanMl/PyTorch-Transformer-for-RUL-Prediction/tree/5bf0a4739abdecbbc88118ea413393997bdc1e24
|
DDM_Decoder
|
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
weight_shape = list(m.weight.data.size())
fan_in = np.prod(weight_shape[1:4])
fan_out = np.prod(weight_shape[2:4]) * weight_shape[0]
w_bound = np.sqrt(6.0 / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
weight_shape = list(m.weight.data.size())
fan_in = weight_shape[1]
fan_out = weight_shape[0]
w_bound = np.sqrt(6.0 / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
m.bias.data.fill_(0)
elif classname.find('GRUCell') != -1:
for param in m.parameters():
if len(param.shape) >= 2:
init.orthogonal_(param.data)
else:
init.normal_(param.data)
class DDM_Decoder(torch.nn.Module):
def __init__(self, obs_space, dim):
super(DDM_Decoder, self).__init__()
self.fc = nn.Linear(dim, 32 * 3 * 3)
self.linear1 = nn.Linear(32 * 3 * 3, dim)
self.linear2 = nn.Linear(dim, obs_space)
self.apply(weights_init)
self.train()
def forward(self, inputs):
x = F.elu(self.fc(inputs))
x = F.elu(self.linear1(x))
x = self.linear2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'obs_space': 4, 'dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import numpy as np
import torch.nn as nn
import torch.nn.init as init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, None)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tl.store(out_ptr0 + x0, tmp7, None)
@triton.jit
def triton_poi_fused_elu_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (288, 4), (4, 1))
assert_size_stride(primals_2, (288,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 288), (288, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 288), (288, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 288), (1, 4),
0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 288), (4608, 1152, 288, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_elu_0[grid(18432)](buf0, buf1, 18432, XBLOCK=256,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 288),
(288, 1), 0), reinterpret_tensor(primals_4, (288, 4), (1, 288),
0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_elu_1[grid(256)](buf2, buf3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_7
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf1, (64, 288), (288, 1), 0
), buf2, reinterpret_tensor(buf3, (64, 4), (4, 1), 0
), primals_6, primals_4
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
weight_shape = list(m.weight.data.size())
fan_in = np.prod(weight_shape[1:4])
fan_out = np.prod(weight_shape[2:4]) * weight_shape[0]
w_bound = np.sqrt(6.0 / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
weight_shape = list(m.weight.data.size())
fan_in = weight_shape[1]
fan_out = weight_shape[0]
w_bound = np.sqrt(6.0 / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
m.bias.data.fill_(0)
elif classname.find('GRUCell') != -1:
for param in m.parameters():
if len(param.shape) >= 2:
init.orthogonal_(param.data)
else:
init.normal_(param.data)
class DDM_DecoderNew(torch.nn.Module):
def __init__(self, obs_space, dim):
super(DDM_DecoderNew, self).__init__()
self.fc = nn.Linear(dim, 32 * 3 * 3)
self.linear1 = nn.Linear(32 * 3 * 3, dim)
self.linear2 = nn.Linear(dim, obs_space)
self.apply(weights_init)
self.train()
def forward(self, input_0):
primals_1 = self.fc.weight
primals_2 = self.fc.bias
primals_4 = self.linear1.weight
primals_5 = self.linear1.bias
primals_6 = self.linear2.weight
primals_7 = self.linear2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
MLforHealth/state_representations_for_RLinHealth
|
DDM_Decoder
| false
| 8,515
|
[
"MIT"
] | 24
|
aa8dbb7d56caa95bf4380e3e745e134996291b66
|
https://github.com/MLforHealth/state_representations_for_RLinHealth/tree/aa8dbb7d56caa95bf4380e3e745e134996291b66
|
_GRU_ODE
|
import torch
class _GRU_ODE(torch.nn.Module):
def __init__(self, input_channels, hidden_channels):
super(_GRU_ODE, self).__init__()
self.input_channels = input_channels
self.hidden_channels = hidden_channels
self.W_r = torch.nn.Linear(input_channels, hidden_channels, bias=False)
self.W_z = torch.nn.Linear(input_channels, hidden_channels, bias=False)
self.W_h = torch.nn.Linear(input_channels, hidden_channels, bias=False)
self.U_r = torch.nn.Linear(hidden_channels, hidden_channels)
self.U_z = torch.nn.Linear(hidden_channels, hidden_channels)
self.U_h = torch.nn.Linear(hidden_channels, hidden_channels)
def extra_repr(self):
return 'input_channels: {}, hidden_channels: {}'.format(self.
input_channels, self.hidden_channels)
def forward(self, x, h):
r = self.W_r(x) + self.U_r(h)
r = r.sigmoid()
z = self.W_z(x) + self.U_z(h)
z = z.sigmoid()
g = self.W_h(x) + self.U_h(r * h)
g = g.tanh()
return (1 - z) * (g - h)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_channels': 4, 'hidden_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_sigmoid_sigmoid_backward_0(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr3 + x2, xmask)
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp5 = tl.sigmoid(tmp4)
tmp7 = tmp5 * tmp6
tmp8 = 1.0
tmp9 = tmp8 - tmp5
tmp10 = tmp5 * tmp9
tl.store(out_ptr0 + x2, tmp7, xmask)
tl.store(out_ptr1 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_sub_tanh_1(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_out_ptr1 + x2, xmask)
tmp7 = tl.load(in_ptr2 + x2, xmask)
tmp8 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr4 + x2, xmask)
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp5 = tl.sigmoid(tmp4)
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp11 = libdevice.tanh(tmp10)
tmp12 = 1.0
tmp13 = tmp12 - tmp5
tmp15 = tmp11 - tmp14
tmp16 = tmp13 * tmp15
tl.store(in_out_ptr0 + x2, tmp5, xmask)
tl.store(in_out_ptr1 + x2, tmp11, xmask)
tl.store(out_ptr0 + x2, tmp16, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_5, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_5, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf3)
del primals_7
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf5)
del primals_9
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_sigmoid_sigmoid_backward_0[grid(256)](buf0,
buf1, primals_4, primals_5, buf6, buf10, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_4
buf7 = buf1
del buf1
extern_kernels.mm(reinterpret_tensor(buf6, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf7)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf8 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
buf9 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused_add_mul_rsub_sigmoid_sub_tanh_1[grid(256)](buf4,
buf8, buf3, primals_8, buf7, primals_11, primals_5, buf9, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del buf3
del buf7
del primals_11
del primals_8
return buf9, primals_5, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0
), buf4, reinterpret_tensor(buf6, (64, 4), (4, 1), 0
), buf8, primals_10, buf10
class _GRU_ODENew(torch.nn.Module):
def __init__(self, input_channels, hidden_channels):
super(_GRU_ODENew, self).__init__()
self.input_channels = input_channels
self.hidden_channels = hidden_channels
self.W_r = torch.nn.Linear(input_channels, hidden_channels, bias=False)
self.W_z = torch.nn.Linear(input_channels, hidden_channels, bias=False)
self.W_h = torch.nn.Linear(input_channels, hidden_channels, bias=False)
self.U_r = torch.nn.Linear(hidden_channels, hidden_channels)
self.U_z = torch.nn.Linear(hidden_channels, hidden_channels)
self.U_h = torch.nn.Linear(hidden_channels, hidden_channels)
def extra_repr(self):
return 'input_channels: {}, hidden_channels: {}'.format(self.
input_channels, self.hidden_channels)
def forward(self, input_0, input_1):
primals_1 = self.W_r.weight
primals_3 = self.W_z.weight
primals_6 = self.W_h.weight
primals_7 = self.U_r.weight
primals_4 = self.U_r.bias
primals_9 = self.U_z.weight
primals_8 = self.U_z.bias
primals_10 = self.U_h.weight
primals_11 = self.U_h.bias
primals_2 = input_0
primals_5 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
MLforHealth/state_representations_for_RLinHealth
|
_GRU_ODE
| false
| 8,516
|
[
"MIT"
] | 24
|
aa8dbb7d56caa95bf4380e3e745e134996291b66
|
https://github.com/MLforHealth/state_representations_for_RLinHealth/tree/aa8dbb7d56caa95bf4380e3e745e134996291b66
|
L2Conv2D
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim
import torch.utils.data
class L2Conv2D(nn.Module):
"""
Convolutional layer that computes the squared L2 distance instead of the conventional inner product.
"""
def __init__(self, num_prototypes, num_features, w_1, h_1):
"""
Create a new L2Conv2D layer
:param num_prototypes: The number of prototypes in the layer
:param num_features: The number of channels in the input features
:param w_1: Width of the prototypes
:param h_1: Height of the prototypes
"""
super().__init__()
prototype_shape = num_prototypes, num_features, w_1, h_1
self.prototype_vectors = nn.Parameter(torch.randn(prototype_shape),
requires_grad=True)
def forward(self, xs):
"""
Perform convolution over the input using the squared L2 distance for all prototypes in the layer
:param xs: A batch of input images obtained as output from some convolutional neural network F. Following the
notation from the paper, let the shape of xs be (batch_size, D, W, H), where
- D is the number of output channels of the conv net F
- W is the width of the convolutional output of F
- H is the height of the convolutional output of F
:return: a tensor of shape (batch_size, num_prototypes, W, H) obtained from computing the squared L2 distances
for patches of the input using all prototypes
"""
ones = torch.ones_like(self.prototype_vectors, device=xs.device)
xs_squared_l2 = F.conv2d(xs ** 2, weight=ones)
ps_squared_l2 = torch.sum(self.prototype_vectors ** 2, dim=(1, 2, 3))
ps_squared_l2 = ps_squared_l2.view(-1, 1, 1)
xs_conv = F.conv2d(xs, weight=self.prototype_vectors)
distance = xs_squared_l2 + ps_squared_l2 - 2 * xs_conv
distance = torch.sqrt(torch.abs(distance) + 1e-14)
if torch.isnan(distance).any():
raise Exception(
'Error: NaN values! Using the --log_probabilities flag might fix this issue'
)
return distance
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_prototypes': 4, 'num_features': 4, 'w_1': 4, 'h_1': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.optim
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_ones_like_pow_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0 * tmp0
tl.store(out_ptr0 + x0, tmp1, xmask)
@triton.jit
def triton_poi_fused_convolution_ones_like_pow_1(out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 1.0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_per_fused_pow_sum_2(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tl.store(out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_per_fused_abs_add_any_isnan_mul_sgn_sqrt_sub_3(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr
):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
r0 = rindex % 4
tmp0 = tl.load(in_ptr0 + r2, None)
tmp1 = tl.load(in_ptr1 + r0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + r2, None)
tmp2 = tmp0 + tmp1
tmp4 = 2.0
tmp5 = tmp3 * tmp4
tmp6 = tmp2 - tmp5
tmp7 = tl_math.abs(tmp6)
tmp8 = 1e-14
tmp9 = tmp7 + tmp8
tmp10 = libdevice.sqrt(tmp9)
tmp11 = tl.full([1, 1], 0, tl.int32)
tmp12 = tmp11 < tmp6
tmp13 = tmp12.to(tl.int8)
tmp14 = tmp6 < tmp11
tmp15 = tmp14.to(tl.int8)
tmp16 = tmp13 - tmp15
tmp17 = tmp16.to(tmp6.dtype)
tmp18 = libdevice.isnan(tmp10).to(tl.int1)
tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK])
tmp21 = triton_helpers.any(tmp19, 1)[:, None]
tl.store(out_ptr0 + tl.broadcast_to(r2, [XBLOCK, RBLOCK]), tmp10, None)
tl.store(out_ptr1 + tl.broadcast_to(r2, [XBLOCK, RBLOCK]), tmp17, None)
tl.store(out_ptr2 + tl.full([XBLOCK, 1], 0, tl.int32), tmp21, None)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_ones_like_pow_0[grid(256)](primals_2,
buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_convolution_ones_like_pow_1[grid(256)](buf1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf2 = extern_kernels.convolution(buf0, buf1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1))
del buf0
del buf1
buf3 = empty_strided_cuda((4,), (1,), torch.float32)
triton_per_fused_pow_sum_2[grid(4)](primals_1, buf3, 4, 64, XBLOCK=
1, num_warps=2, num_stages=1)
buf4 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 1, 1), (4, 1, 1, 1))
buf5 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
buf6 = empty_strided_cuda((), (), torch.bool)
triton_per_fused_abs_add_any_isnan_mul_sgn_sqrt_sub_3[grid(1)](buf2,
buf3, buf4, buf5, buf7, buf6, 1, 16, XBLOCK=1, num_warps=2,
num_stages=1)
del buf2
del buf3
del buf4
return buf5, buf6, primals_1, primals_2, buf5, buf7
class L2Conv2DNew(nn.Module):
"""
Convolutional layer that computes the squared L2 distance instead of the conventional inner product.
"""
def __init__(self, num_prototypes, num_features, w_1, h_1):
"""
Create a new L2Conv2D layer
:param num_prototypes: The number of prototypes in the layer
:param num_features: The number of channels in the input features
:param w_1: Width of the prototypes
:param h_1: Height of the prototypes
"""
super().__init__()
prototype_shape = num_prototypes, num_features, w_1, h_1
self.prototype_vectors = nn.Parameter(torch.randn(prototype_shape),
requires_grad=True)
def forward(self, input_0):
primals_1 = self.prototype_vectors
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
M-Nauta/ProtoTree
|
L2Conv2D
| false
| 8,517
|
[
"MIT"
] | 35
|
72cad5e42b0eb05c1312e5496f36b842726e081a
|
https://github.com/M-Nauta/ProtoTree/tree/72cad5e42b0eb05c1312e5496f36b842726e081a
|
Encoder
|
import torch
import torch.utils.data
from torch import nn
from torch.nn import functional
class Encoder(nn.Module):
def __init__(self, input_dim, hidden_dim, z_dim):
"""
Args:
input_dim: A integer indicating the size of input.
hidden_dim: A integer indicating the size of hidden dimension.
z_dim: A integer indicating the latent dimension.
"""
super().__init__()
self.linear = nn.Linear(input_dim, hidden_dim)
self.mu = nn.Linear(hidden_dim, z_dim)
self.var = nn.Linear(hidden_dim, z_dim)
def forward(self, x):
hidden = functional.relu(self.linear(x))
z_mu = self.mu(hidden)
z_var = self.var(hidden)
return z_mu, z_var
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'hidden_dim': 4, 'z_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_7
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0
), primals_6, primals_4, buf4
class EncoderNew(nn.Module):
def __init__(self, input_dim, hidden_dim, z_dim):
"""
Args:
input_dim: A integer indicating the size of input.
hidden_dim: A integer indicating the size of hidden dimension.
z_dim: A integer indicating the latent dimension.
"""
super().__init__()
self.linear = nn.Linear(input_dim, hidden_dim)
self.mu = nn.Linear(hidden_dim, z_dim)
self.var = nn.Linear(hidden_dim, z_dim)
def forward(self, input_0):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_4 = self.mu.weight
primals_5 = self.mu.bias
primals_6 = self.var.weight
primals_7 = self.var.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0], output[1]
|
MaurizioFD/recsys-challenge-2020-twitter
|
Encoder
| false
| 8,518
|
[
"Apache-2.0"
] | 44
|
95dc024fb4f8777aa62e1304536daece640428de
|
https://github.com/MaurizioFD/recsys-challenge-2020-twitter/tree/95dc024fb4f8777aa62e1304536daece640428de
|
BasicBlock
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
"""Basic residual block class"""
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv1d(in_planes, planes, kernel_size=3, stride=
stride, padding=1, bias=False)
self.conv2 = nn.Conv1d(planes, planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(nn.Conv1d(in_planes, self.
expansion * planes, kernel_size=1, stride=stride, bias=False))
def forward(self, x):
out = F.relu(self.conv1(x))
out = self.conv2(out)
out += self.shortcut(x)
out = F.relu(out)
return out
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_planes': 4, 'planes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x0, tmp4, xmask)
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3), (12, 3, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4, 3), (12, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_2, (1,
4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(1,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf0, (1, 4, 4), (16, 4, 1))
buf1 = reinterpret_tensor(buf0, (4, 4), (4, 1), 0)
del buf0
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(16)](buf1, buf5, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (1, 4, 4
), (0, 4, 1), 0), primals_3, stride=(1,), padding=(1,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf2, (1, 4, 4), (16, 4, 1))
buf3 = reinterpret_tensor(buf2, (4, 4), (4, 1), 0)
del buf2
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(16)](buf3,
primals_2, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1)
return buf3, primals_1, primals_3, reinterpret_tensor(primals_2, (1, 4,
4), (16, 4, 1), 0), reinterpret_tensor(buf1, (1, 4, 4), (16, 4, 1), 0
), buf4, buf5
class BasicBlockNew(nn.Module):
"""Basic residual block class"""
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlockNew, self).__init__()
self.conv1 = nn.Conv1d(in_planes, planes, kernel_size=3, stride=
stride, padding=1, bias=False)
self.conv2 = nn.Conv1d(planes, planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(nn.Conv1d(in_planes, self.
expansion * planes, kernel_size=1, stride=stride, bias=False))
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_3 = self.conv2.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Maosef/easy-to-hard
|
BasicBlock
| false
| 8,519
|
[
"MIT"
] | 44
|
711ec0965229444a6c51b1b06a4e2cad3e32d02e
|
https://github.com/Maosef/easy-to-hard/tree/711ec0965229444a6c51b1b06a4e2cad3e32d02e
|
FC_Q
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class FC_Q(nn.Module):
def __init__(self, state_dim, num_actions, num_nodes=128):
super(FC_Q, self).__init__()
self.q1 = nn.Linear(state_dim, num_nodes)
self.q2 = nn.Linear(num_nodes, num_nodes)
self.q3 = nn.Linear(num_nodes, num_actions)
self.i1 = nn.Linear(state_dim, num_nodes)
self.i2 = nn.Linear(num_nodes, num_nodes)
self.i3 = nn.Linear(num_nodes, num_actions)
def forward(self, state):
q = F.relu(self.q1(state))
q = F.relu(self.q2(q))
i = F.relu(self.i1(state))
i = F.relu(self.i2(i))
i = F.relu(self.i3(i))
return self.q3(q), F.log_softmax(i, dim=1), i
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4, 'num_actions': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__log_softmax_threshold_backward_2(in_ptr0, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = 0.0
tmp10 = tmp0 <= tmp9
tl.store(out_ptr0 + x3, tmp8, xmask)
tl.store(out_ptr1 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused__log_softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x3, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (128, 4), (4, 1))
assert_size_stride(primals_2, (128,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (128, 128), (128, 1))
assert_size_stride(primals_5, (128,), (1,))
assert_size_stride(primals_6, (128, 4), (4, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (128, 128), (128, 1))
assert_size_stride(primals_9, (128,), (1,))
assert_size_stride(primals_10, (4, 128), (128, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4, 128), (128, 1))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf0
buf17 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf1,
primals_2, buf17, 8192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 128), (128, 1), 0),
reinterpret_tensor(primals_4, (128, 128), (1, 128), 0), out=buf2)
buf3 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 128), (1, 4), 0), out=buf3)
del primals_6
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf3
buf15 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf4,
primals_7, buf15, 8192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
buf5 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf4, (64, 128), (128, 1), 0),
reinterpret_tensor(primals_8, (128, 128), (1, 128), 0), out=buf5)
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf5
buf14 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf6,
primals_9, buf14, 8192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_9
buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf6, (64, 128), (128, 1), 0),
reinterpret_tensor(primals_10, (128, 4), (1, 128), 0), out=buf7)
buf8 = reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf7
triton_poi_fused_relu_1[grid(256)](buf8, primals_11, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_11
buf9 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf2
buf16 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf9,
primals_5, buf16, 8192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf10 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_13, reinterpret_tensor(buf9, (64, 128),
(128, 1), 0), reinterpret_tensor(primals_12, (128, 4), (1, 128),
0), alpha=1, beta=1, out=buf10)
del primals_13
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused__log_softmax_threshold_backward_2[grid(256)](buf8,
buf11, buf13, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__log_softmax_3[grid(256)](buf11, buf12, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del buf11
return (reinterpret_tensor(buf10, (4, 4, 4, 4), (64, 16, 4, 1), 0),
buf12, buf8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(buf1, (64, 128), (128, 1), 0),
reinterpret_tensor(buf4, (64, 128), (128, 1), 0),
reinterpret_tensor(buf6, (64, 128), (128, 1), 0),
reinterpret_tensor(buf9, (64, 128), (128, 1), 0), buf12, primals_12,
buf13, primals_10, buf14, primals_8, buf15, buf16, primals_4, buf17)
class FC_QNew(nn.Module):
def __init__(self, state_dim, num_actions, num_nodes=128):
super(FC_QNew, self).__init__()
self.q1 = nn.Linear(state_dim, num_nodes)
self.q2 = nn.Linear(num_nodes, num_nodes)
self.q3 = nn.Linear(num_nodes, num_actions)
self.i1 = nn.Linear(state_dim, num_nodes)
self.i2 = nn.Linear(num_nodes, num_nodes)
self.i3 = nn.Linear(num_nodes, num_actions)
def forward(self, input_0):
primals_1 = self.q1.weight
primals_2 = self.q1.bias
primals_4 = self.q2.weight
primals_5 = self.q2.bias
primals_10 = self.q3.weight
primals_11 = self.q3.bias
primals_6 = self.i1.weight
primals_7 = self.i1.bias
primals_8 = self.i2.weight
primals_9 = self.i2.bias
primals_12 = self.i3.weight
primals_13 = self.i3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0], output[1], output[2]
|
MLforHealth/state_representations_for_RLinHealth
|
FC_Q
| false
| 8,520
|
[
"MIT"
] | 24
|
aa8dbb7d56caa95bf4380e3e745e134996291b66
|
https://github.com/MLforHealth/state_representations_for_RLinHealth/tree/aa8dbb7d56caa95bf4380e3e745e134996291b66
|
gem
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class gem(nn.Module):
def __init__(self, p=3.0, eps=1e-06):
super(gem, self).__init__()
self.p = p
self.eps = eps
def forward(self, x):
return F.avg_pool2d(x.clamp(min=self.eps).pow(self.p), (x.size(-2),
x.size(-1))).pow(1.0 / self.p)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_avg_pool2d_clamp_pow_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp30 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp35 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp40 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp45 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp50 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp55 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp60 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp65 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp70 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp75 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp1 = 1e-06
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = tmp2 * tmp2
tmp4 = tmp3 * tmp2
tmp6 = triton_helpers.maximum(tmp5, tmp1)
tmp7 = tmp6 * tmp6
tmp8 = tmp7 * tmp6
tmp9 = tmp8 + tmp4
tmp11 = triton_helpers.maximum(tmp10, tmp1)
tmp12 = tmp11 * tmp11
tmp13 = tmp12 * tmp11
tmp14 = tmp13 + tmp9
tmp16 = triton_helpers.maximum(tmp15, tmp1)
tmp17 = tmp16 * tmp16
tmp18 = tmp17 * tmp16
tmp19 = tmp18 + tmp14
tmp21 = triton_helpers.maximum(tmp20, tmp1)
tmp22 = tmp21 * tmp21
tmp23 = tmp22 * tmp21
tmp24 = tmp23 + tmp19
tmp26 = triton_helpers.maximum(tmp25, tmp1)
tmp27 = tmp26 * tmp26
tmp28 = tmp27 * tmp26
tmp29 = tmp28 + tmp24
tmp31 = triton_helpers.maximum(tmp30, tmp1)
tmp32 = tmp31 * tmp31
tmp33 = tmp32 * tmp31
tmp34 = tmp33 + tmp29
tmp36 = triton_helpers.maximum(tmp35, tmp1)
tmp37 = tmp36 * tmp36
tmp38 = tmp37 * tmp36
tmp39 = tmp38 + tmp34
tmp41 = triton_helpers.maximum(tmp40, tmp1)
tmp42 = tmp41 * tmp41
tmp43 = tmp42 * tmp41
tmp44 = tmp43 + tmp39
tmp46 = triton_helpers.maximum(tmp45, tmp1)
tmp47 = tmp46 * tmp46
tmp48 = tmp47 * tmp46
tmp49 = tmp48 + tmp44
tmp51 = triton_helpers.maximum(tmp50, tmp1)
tmp52 = tmp51 * tmp51
tmp53 = tmp52 * tmp51
tmp54 = tmp53 + tmp49
tmp56 = triton_helpers.maximum(tmp55, tmp1)
tmp57 = tmp56 * tmp56
tmp58 = tmp57 * tmp56
tmp59 = tmp58 + tmp54
tmp61 = triton_helpers.maximum(tmp60, tmp1)
tmp62 = tmp61 * tmp61
tmp63 = tmp62 * tmp61
tmp64 = tmp63 + tmp59
tmp66 = triton_helpers.maximum(tmp65, tmp1)
tmp67 = tmp66 * tmp66
tmp68 = tmp67 * tmp66
tmp69 = tmp68 + tmp64
tmp71 = triton_helpers.maximum(tmp70, tmp1)
tmp72 = tmp71 * tmp71
tmp73 = tmp72 * tmp71
tmp74 = tmp73 + tmp69
tmp76 = triton_helpers.maximum(tmp75, tmp1)
tmp77 = tmp76 * tmp76
tmp78 = tmp77 * tmp76
tmp79 = tmp78 + tmp74
tmp80 = 0.0625
tmp81 = tmp79 * tmp80
tmp82 = 0.3333333333333333
tmp83 = libdevice.pow(tmp81, tmp82)
tl.store(in_out_ptr0 + x0, tmp83, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_avg_pool2d_clamp_pow_0[grid(16)](buf1, arg0_1, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del arg0_1
return buf1,
class gemNew(nn.Module):
def __init__(self, p=3.0, eps=1e-06):
super(gemNew, self).__init__()
self.p = p
self.eps = eps
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
MCC-WH/Token
|
gem
| false
| 8,521
|
[
"MIT"
] | 30
|
eadc301f2df9e1851633be1b63c273659af0da49
|
https://github.com/MCC-WH/Token/tree/eadc301f2df9e1851633be1b63c273659af0da49
|
FFNN1
|
import torch
import torch.utils.data
from torch import nn
class FFNN1(nn.Module):
def __init__(self, input_size, hidden_size, hidden_dropout_prob):
super(FFNN1, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.hidden_dropout_prob = hidden_dropout_prob
self.dropout = nn.Dropout(hidden_dropout_prob)
self.first_layer = nn.Linear(input_size, hidden_size)
self.classifier = nn.Linear(hidden_size, 1)
def forward(self, x):
x = self.first_layer(x)
x = nn.ReLU()(x)
x = self.dropout(x)
x = self.classifier(x)
return x
def __str__(self):
return f"""Input size: {self.input_size}
Hidden size: {self.hidden_size}
Dropout: {self.hidden_dropout_prob}
Output Size: 1
"""
def get_params_string(self):
return (
f'{self.input_size}_{self.hidden_size}_{self.hidden_dropout_prob_1}'
)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4, 'hidden_dropout_prob': 0.5}
]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 4), (4, 1))
assert_size_stride(primals_5, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_5
return reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4, buf4
class FFNN1New(nn.Module):
def __init__(self, input_size, hidden_size, hidden_dropout_prob):
super(FFNN1New, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.hidden_dropout_prob = hidden_dropout_prob
self.dropout = nn.Dropout(hidden_dropout_prob)
self.first_layer = nn.Linear(input_size, hidden_size)
self.classifier = nn.Linear(hidden_size, 1)
def __str__(self):
return f"""Input size: {self.input_size}
Hidden size: {self.hidden_size}
Dropout: {self.hidden_dropout_prob}
Output Size: 1
"""
def get_params_string(self):
return (
f'{self.input_size}_{self.hidden_size}_{self.hidden_dropout_prob_1}'
)
def forward(self, input_0):
primals_1 = self.first_layer.weight
primals_2 = self.first_layer.bias
primals_4 = self.classifier.weight
primals_5 = self.classifier.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
MaurizioFD/recsys-challenge-2020-twitter
|
FFNN1
| false
| 8,522
|
[
"Apache-2.0"
] | 44
|
95dc024fb4f8777aa62e1304536daece640428de
|
https://github.com/MaurizioFD/recsys-challenge-2020-twitter/tree/95dc024fb4f8777aa62e1304536daece640428de
|
DDM_Encoder
|
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
weight_shape = list(m.weight.data.size())
fan_in = np.prod(weight_shape[1:4])
fan_out = np.prod(weight_shape[2:4]) * weight_shape[0]
w_bound = np.sqrt(6.0 / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
weight_shape = list(m.weight.data.size())
fan_in = weight_shape[1]
fan_out = weight_shape[0]
w_bound = np.sqrt(6.0 / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
m.bias.data.fill_(0)
elif classname.find('GRUCell') != -1:
for param in m.parameters():
if len(param.shape) >= 2:
init.orthogonal_(param.data)
else:
init.normal_(param.data)
class DDM_Encoder(torch.nn.Module):
def __init__(self, obs_space, dim, context_input=False, context_dim=0):
"""
architecture should be input, so that we can pass multiple jobs !
"""
super(DDM_Encoder, self).__init__()
if context_input:
self.linear1 = nn.Linear(obs_space + context_dim, dim)
else:
self.linear1 = nn.Linear(obs_space, dim)
self.linear2 = nn.Linear(dim, 32 * 3 * 3)
self.fc = nn.Linear(32 * 3 * 3, dim)
self.apply(weights_init)
self.train()
def forward(self, inputs):
x = F.elu(self.linear1(inputs))
x = F.elu(self.linear2(x))
x = F.tanh(self.fc(x))
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'obs_space': 4, 'dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import numpy as np
import torch.nn as nn
import torch.nn.init as init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tl.store(out_ptr0 + x0, tmp7, xmask)
@triton.jit
def triton_poi_fused_elu_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, None)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tl.store(out_ptr0 + x0, tmp7, None)
@triton.jit
def triton_poi_fused_tanh_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (288, 4), (4, 1))
assert_size_stride(primals_5, (288,), (1,))
assert_size_stride(primals_6, (4, 288), (288, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_elu_0[grid(256)](buf0, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 288), (288, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 288), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 288), (4608, 1152, 288, 1),
torch.float32)
triton_poi_fused_elu_1[grid(18432)](buf2, buf3, 18432, XBLOCK=256,
num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 288), (288, 1), 0),
reinterpret_tensor(primals_6, (288, 4), (1, 288), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
triton_poi_fused_tanh_2[grid(256)](buf5, primals_7, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_7
return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0
), buf2, reinterpret_tensor(buf3, (64, 288), (288, 1), 0
), buf5, primals_6, primals_4
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
weight_shape = list(m.weight.data.size())
fan_in = np.prod(weight_shape[1:4])
fan_out = np.prod(weight_shape[2:4]) * weight_shape[0]
w_bound = np.sqrt(6.0 / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
weight_shape = list(m.weight.data.size())
fan_in = weight_shape[1]
fan_out = weight_shape[0]
w_bound = np.sqrt(6.0 / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
m.bias.data.fill_(0)
elif classname.find('GRUCell') != -1:
for param in m.parameters():
if len(param.shape) >= 2:
init.orthogonal_(param.data)
else:
init.normal_(param.data)
class DDM_EncoderNew(torch.nn.Module):
def __init__(self, obs_space, dim, context_input=False, context_dim=0):
"""
architecture should be input, so that we can pass multiple jobs !
"""
super(DDM_EncoderNew, self).__init__()
if context_input:
self.linear1 = nn.Linear(obs_space + context_dim, dim)
else:
self.linear1 = nn.Linear(obs_space, dim)
self.linear2 = nn.Linear(dim, 32 * 3 * 3)
self.fc = nn.Linear(32 * 3 * 3, dim)
self.apply(weights_init)
self.train()
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_6 = self.fc.weight
primals_7 = self.fc.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
MLforHealth/state_representations_for_RLinHealth
|
DDM_Encoder
| false
| 8,523
|
[
"MIT"
] | 24
|
aa8dbb7d56caa95bf4380e3e745e134996291b66
|
https://github.com/MLforHealth/state_representations_for_RLinHealth/tree/aa8dbb7d56caa95bf4380e3e745e134996291b66
|
FFNNDual
|
import torch
import torch.utils.data
from torch import nn
class FFNNDual(nn.Module):
def __init__(self, input_size, hidden_size_1, hidden_size_2,
hidden_dropout_prob_1, hidden_dropout_prob_2):
super(FFNNDual, self).__init__()
self.input_size = input_size
self.hidden_size_1 = hidden_size_1
self.hidden_size_2 = hidden_size_2
self.hidden_dropout_prob_1 = hidden_dropout_prob_1
self.hidden_dropout_prob_2 = hidden_dropout_prob_2
self.dropout_1 = nn.Dropout(hidden_dropout_prob_1)
self.dropout_2 = nn.Dropout(hidden_dropout_prob_2)
self.first_layer = nn.Linear(input_size, hidden_size_1)
self.second_layer = nn.Linear(hidden_size_1, hidden_size_2)
self.classifier = nn.Linear(hidden_size_2, 2)
def forward(self, x):
x = self.first_layer(x)
x = nn.ReLU()(x)
x = self.dropout_1(x)
x = self.second_layer(x)
x = nn.ReLU()(x)
x = self.dropout_2(x)
x = self.classifier(x)
return x
def __str__(self):
return f"""Input size: {self.input_size}
Hidden size 1: {self.hidden_size_1}
Hidden size 2: {self.hidden_size_2}
Dropout 1: {self.hidden_dropout_prob_1}
Dropout 2: {self.hidden_dropout_prob_2}
Output Size: 2
"""
def get_params_string(self):
return (
f'dual_output_{self.input_size}_{self.hidden_size_1}_{self.hidden_size_2}_{self.hidden_dropout_prob_1}_{self.hidden_dropout_prob_2}'
)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size_1': 4, 'hidden_size_2': 4,
'hidden_dropout_prob_1': 0.5, 'hidden_dropout_prob_2': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (2, 4), (4, 1))
assert_size_stride(primals_7, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3,
primals_5, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 2), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_7
return reinterpret_tensor(buf4, (4, 4, 4, 2), (32, 8, 2, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(
buf3, (64, 4), (4, 1), 0), primals_6, buf5, primals_4, buf6
class FFNNDualNew(nn.Module):
def __init__(self, input_size, hidden_size_1, hidden_size_2,
hidden_dropout_prob_1, hidden_dropout_prob_2):
super(FFNNDualNew, self).__init__()
self.input_size = input_size
self.hidden_size_1 = hidden_size_1
self.hidden_size_2 = hidden_size_2
self.hidden_dropout_prob_1 = hidden_dropout_prob_1
self.hidden_dropout_prob_2 = hidden_dropout_prob_2
self.dropout_1 = nn.Dropout(hidden_dropout_prob_1)
self.dropout_2 = nn.Dropout(hidden_dropout_prob_2)
self.first_layer = nn.Linear(input_size, hidden_size_1)
self.second_layer = nn.Linear(hidden_size_1, hidden_size_2)
self.classifier = nn.Linear(hidden_size_2, 2)
def __str__(self):
return f"""Input size: {self.input_size}
Hidden size 1: {self.hidden_size_1}
Hidden size 2: {self.hidden_size_2}
Dropout 1: {self.hidden_dropout_prob_1}
Dropout 2: {self.hidden_dropout_prob_2}
Output Size: 2
"""
def get_params_string(self):
return (
f'dual_output_{self.input_size}_{self.hidden_size_1}_{self.hidden_size_2}_{self.hidden_dropout_prob_1}_{self.hidden_dropout_prob_2}'
)
def forward(self, input_0):
primals_1 = self.first_layer.weight
primals_2 = self.first_layer.bias
primals_4 = self.second_layer.weight
primals_5 = self.second_layer.bias
primals_6 = self.classifier.weight
primals_7 = self.classifier.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
MaurizioFD/recsys-challenge-2020-twitter
|
FFNNDual
| false
| 8,524
|
[
"Apache-2.0"
] | 44
|
95dc024fb4f8777aa62e1304536daece640428de
|
https://github.com/MaurizioFD/recsys-challenge-2020-twitter/tree/95dc024fb4f8777aa62e1304536daece640428de
|
FFNet
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class FFNet(nn.Module):
"""Modified ResidualNetworkSegment model class"""
def __init__(self, block, num_blocks, width, depth):
super(FFNet, self).__init__()
assert (depth - 4
) % 4 == 0, 'Depth not compatible with recurrent architectue.'
self.iters = (depth - 4) // 4
self.in_planes = int(width)
self.conv1 = nn.Conv1d(1, width, kernel_size=3, stride=1, padding=1,
bias=False)
layers = []
for _ in range(self.iters):
for i in range(len(num_blocks)):
layers.append(self._make_layer(block, width, num_blocks[i],
stride=1))
self.recur_block = nn.Sequential(*layers)
self.conv2 = nn.Conv1d(width, width, kernel_size=3, stride=1,
padding=1, bias=False)
self.conv3 = nn.Conv1d(width, int(width / 2), kernel_size=3, stride
=1, padding=1, bias=False)
self.conv4 = nn.Conv1d(int(width / 2), 2, kernel_size=3, stride=1,
padding=1, bias=False)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for strd in strides:
layers.append(block(self.in_planes, planes, strd))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.conv1(x))
out = self.recur_block(out)
thought = F.relu(self.conv2(out))
thought = F.relu(self.conv3(thought))
thought = self.conv4(thought)
return thought
def get_inputs():
return [torch.rand([4, 1, 64])]
def get_init_inputs():
return [[], {'block': 4, 'num_blocks': 4, 'width': 4, 'depth': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 3), (3, 3, 1))
assert_size_stride(primals_2, (4, 1, 64), (64, 64, 1))
assert_size_stride(primals_3, (4, 4, 3), (12, 3, 1))
assert_size_stride(primals_4, (2, 4, 3), (12, 3, 1))
assert_size_stride(primals_5, (2, 2, 3), (6, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,),
padding=(1,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 64), (256, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(1024)](buf1, 1024, XBLOCK=128,
num_warps=4, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1,),
padding=(1,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 64), (256, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_relu_0[grid(1024)](buf3, 1024, XBLOCK=128,
num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1,),
padding=(1,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf4, (4, 2, 64), (128, 64, 1))
buf5 = buf4
del buf4
triton_poi_fused_relu_1[grid(512)](buf5, 512, XBLOCK=256, num_warps
=4, num_stages=1)
buf6 = extern_kernels.convolution(buf5, primals_5, stride=(1,),
padding=(1,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf6, (4, 2, 64), (128, 64, 1))
return (buf6, primals_1, primals_2, primals_3, primals_4, primals_5,
buf1, buf3, buf5)
class FFNetNew(nn.Module):
"""Modified ResidualNetworkSegment model class"""
def __init__(self, block, num_blocks, width, depth):
super(FFNetNew, self).__init__()
assert (depth - 4
) % 4 == 0, 'Depth not compatible with recurrent architectue.'
self.iters = (depth - 4) // 4
self.in_planes = int(width)
self.conv1 = nn.Conv1d(1, width, kernel_size=3, stride=1, padding=1,
bias=False)
layers = []
for _ in range(self.iters):
for i in range(len(num_blocks)):
layers.append(self._make_layer(block, width, num_blocks[i],
stride=1))
self.recur_block = nn.Sequential(*layers)
self.conv2 = nn.Conv1d(width, width, kernel_size=3, stride=1,
padding=1, bias=False)
self.conv3 = nn.Conv1d(width, int(width / 2), kernel_size=3, stride
=1, padding=1, bias=False)
self.conv4 = nn.Conv1d(int(width / 2), 2, kernel_size=3, stride=1,
padding=1, bias=False)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for strd in strides:
layers.append(block(self.in_planes, planes, strd))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_3 = self.conv2.weight
primals_4 = self.conv3.weight
primals_5 = self.conv4.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Maosef/easy-to-hard
|
FFNet
| false
| 8,525
|
[
"MIT"
] | 44
|
711ec0965229444a6c51b1b06a4e2cad3e32d02e
|
https://github.com/Maosef/easy-to-hard/tree/711ec0965229444a6c51b1b06a4e2cad3e32d02e
|
Net
|
import torch
import torch.nn as nn
import torch.utils
class Net(nn.Module):
def __init__(self, n_inputs, n_units=50):
super(Net, self).__init__()
self.fc = nn.Linear(n_inputs, n_units)
self.out = nn.Linear(n_units, 1)
def forward(self, x):
x = torch.tanh(self.fc(x))
return torch.sigmoid(self.out(x))
def basis_funcs(self, x):
return torch.tanh(self.fc(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_inputs': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 3200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 50
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (50, 4), (4, 1))
assert_size_stride(primals_2, (50,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 50), (50, 1))
assert_size_stride(primals_5, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 50), (50, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 50), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 50), (800, 200, 50, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(3200)](buf1, primals_2, 3200, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 50), (50, 1), 0),
reinterpret_tensor(primals_4, (50, 1), (1, 50), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf2
triton_poi_fused_sigmoid_1[grid(64)](buf3, primals_5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_5
return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, buf3, primals_4
class NetNew(nn.Module):
def __init__(self, n_inputs, n_units=50):
super(NetNew, self).__init__()
self.fc = nn.Linear(n_inputs, n_units)
self.out = nn.Linear(n_units, 1)
def basis_funcs(self, x):
return torch.tanh(self.fc(x))
def forward(self, input_0):
primals_1 = self.fc.weight
primals_2 = self.fc.bias
primals_4 = self.out.weight
primals_5 = self.out.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
MSU-MLSys-Lab/CATE
|
Net
| false
| 8,526
|
[
"Apache-2.0"
] | 15
|
654c393d7df888d2c3f3b90f9e6752faa061157e
|
https://github.com/MSU-MLSys-Lab/CATE/tree/654c393d7df888d2c3f3b90f9e6752faa061157e
|
VGGOutputBlock
|
import torch
import torch.nn as nn
class VGGDense(nn.Module):
def __init__(self, in_channels, out_channels):
super(VGGDense, self).__init__()
self.fc = nn.Linear(in_features=in_channels, out_features=out_channels)
self.activ = nn.ReLU(inplace=True)
self.dropout = nn.Dropout(p=0.5)
def forward(self, x):
x = self.fc(x)
x = self.activ(x)
x = self.dropout(x)
return x
class VGGOutputBlock(nn.Module):
def __init__(self, in_channels, classes):
super(VGGOutputBlock, self).__init__()
mid_channels = 4096
self.fc1 = VGGDense(in_channels=in_channels, out_channels=mid_channels)
self.fc2 = VGGDense(in_channels=mid_channels, out_channels=mid_channels
)
self.fc3 = nn.Linear(in_features=mid_channels, out_features=classes)
def forward(self, x):
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'classes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x4 = xindex
x0 = xindex % 4096
tmp0 = tl.load(in_out_ptr0 + x4, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, None)
tl.store(out_ptr0 + x4, tmp6, None)
@triton.jit
def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 4096
x1 = xindex // 4096
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 16384 * (x1 % 4 // 4) +
65536 * ((4 * (x1 // 4 % 4) + x1 % 4) // 16)), None)
tl.store(out_ptr0 + x2, tmp0, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4096, 4), (4, 1))
assert_size_stride(primals_2, (4096,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4096, 4096), (4096, 1))
assert_size_stride(primals_5, (4096,), (1,))
assert_size_stride(primals_6, (4, 4096), (4096, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4096), (4096, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4096), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4096), (65536, 16384,
4096, 1), 0)
del buf0
buf8 = empty_strided_cuda((4, 4, 4, 4096), (65536, 16384, 4096, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(262144)](buf1,
primals_2, buf8, 262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4096), (4096, 1), torch.float32)
triton_poi_fused_view_1[grid(262144)](buf1, buf2, 262144, XBLOCK=
1024, num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (64, 4096), (4096, 1), 0)
del buf1
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4096, 4096),
(1, 4096), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4096), (65536, 16384,
4096, 1), 0)
del buf3
buf7 = empty_strided_cuda((4, 4, 4, 4096), (65536, 16384, 4096, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(262144)](buf4,
primals_5, buf7, 262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((64, 4096), (4096, 1), torch.float32)
triton_poi_fused_view_1[grid(262144)](buf4, buf5, 262144, XBLOCK=
1024, num_warps=4, num_stages=1)
del buf4
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, buf5, reinterpret_tensor(primals_6,
(4096, 4), (1, 4096), 0), alpha=1, beta=1, out=buf6)
del primals_7
return reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf2, buf5, primals_6, buf7, primals_4, buf8
class VGGDense(nn.Module):
def __init__(self, in_channels, out_channels):
super(VGGDense, self).__init__()
self.fc = nn.Linear(in_features=in_channels, out_features=out_channels)
self.activ = nn.ReLU(inplace=True)
self.dropout = nn.Dropout(p=0.5)
def forward(self, x):
x = self.fc(x)
x = self.activ(x)
x = self.dropout(x)
return x
class VGGOutputBlockNew(nn.Module):
def __init__(self, in_channels, classes):
super(VGGOutputBlockNew, self).__init__()
mid_channels = 4096
self.fc1 = VGGDense(in_channels=in_channels, out_channels=mid_channels)
self.fc2 = VGGDense(in_channels=mid_channels, out_channels=mid_channels
)
self.fc3 = nn.Linear(in_features=mid_channels, out_features=classes)
def forward(self, input_0):
primals_1 = self.fc1.fc.weight
primals_2 = self.fc1.fc.bias
primals_4 = self.fc2.fc.weight
primals_5 = self.fc2.fc.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
MarioMZhang/HAP-tryout
|
VGGOutputBlock
| false
| 8,527
|
[
"MIT"
] | 24
|
9a423f35b50766533a0d2cab8069316ccb21954b
|
https://github.com/MarioMZhang/HAP-tryout/tree/9a423f35b50766533a0d2cab8069316ccb21954b
|
GlobalAttentionGeneral
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.onnx
def conv1x1(in_planes, out_planes, bias=False):
"""1x1 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1,
padding=0, bias=bias)
class GlobalAttentionGeneral(nn.Module):
def __init__(self, idf, cdf):
super(GlobalAttentionGeneral, self).__init__()
self.conv_context = conv1x1(cdf, idf)
self.sm = nn.Softmax()
self.mask = None
def applyMask(self, mask):
self.mask = mask
def forward(self, input, context):
"""
input: batch x idf x ih x iw (queryL=ihxiw)
context: batch x cdf x sourceL
"""
ih, iw = input.size(2), input.size(3)
queryL = ih * iw
batch_size, sourceL = context.size(0), context.size(2)
target = input.view(batch_size, -1, queryL)
targetT = torch.transpose(target, 1, 2).contiguous()
sourceT = context.unsqueeze(3)
sourceT = self.conv_context(sourceT).squeeze(3)
attn = torch.bmm(targetT, sourceT)
attn = attn.view(batch_size * queryL, sourceL)
if self.mask is not None:
mask = self.mask.repeat(queryL, 1)
attn.data.masked_fill_(mask.data.bool(), -float('inf'))
attn = self.sm(attn)
attn = attn.view(batch_size, queryL, sourceL)
attn = torch.transpose(attn, 1, 2).contiguous()
weightedContext = torch.bmm(sourceT, attn)
weightedContext = weightedContext.view(batch_size, -1, ih, iw)
attn = attn.view(batch_size, -1, ih, iw)
return weightedContext, attn
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'idf': 4, 'cdf': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.parallel
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_transpose_0(in_ptr0, out_ptr0, out_ptr1, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
y2 = yindex % 4
y3 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x1 + 16 * y0), xmask & ymask)
tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask)
tl.store(out_ptr1 + (y2 + 4 * x1 + 64 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tmp1 = tl.load(in_ptr0 + (4 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2 + 16 * y3), tmp8, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_2, (4,
4, 4, 1), (16, 4, 1, 1), 0), primals_3, stride=(1, 1), padding=
(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0
), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 1), (16, 4, 1, 1))
buf1 = empty_strided_cuda((4, 16, 4), (64, 1, 16), torch.float32)
buf6 = empty_strided_cuda((4, 4, 16), (64, 1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_transpose_0[grid(16, 16)](primals_1, buf1,
buf6, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_1
buf2 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
extern_kernels.bmm(buf1, reinterpret_tensor(buf0, (4, 4, 4), (16, 4,
1), 0), out=buf2)
buf3 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0)
del buf1
triton_poi_fused__softmax_1[grid(256)](buf2, buf3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
triton_poi_fused_clone_2[grid(16, 16)](buf3, buf4, 16, 16, XBLOCK=
16, YBLOCK=16, num_warps=4, num_stages=1)
buf5 = reinterpret_tensor(buf3, (4, 4, 16), (64, 16, 1), 0)
del buf3
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1),
0), buf4, out=buf5)
return reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_3, reinterpret_tensor(primals_2, (4, 4, 4, 1), (16, 4, 1,
1), 0), buf2, reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf4, (4, 16, 4), (64, 1, 16), 0), buf6
def conv1x1(in_planes, out_planes, bias=False):
"""1x1 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1,
padding=0, bias=bias)
class GlobalAttentionGeneralNew(nn.Module):
def __init__(self, idf, cdf):
super(GlobalAttentionGeneralNew, self).__init__()
self.conv_context = conv1x1(cdf, idf)
self.sm = nn.Softmax()
self.mask = None
def applyMask(self, mask):
self.mask = mask
def forward(self, input_0, input_1):
primals_3 = self.conv_context.weight
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0], output[1]
|
MaxyLee/Style-AttnGAN
|
GlobalAttentionGeneral
| false
| 8,528
|
[
"MIT"
] | 36
|
d33d0df061c94b75ad4af5c750b8d6f37ee1a35a
|
https://github.com/MaxyLee/Style-AttnGAN/tree/d33d0df061c94b75ad4af5c750b8d6f37ee1a35a
|
FFModule
|
import torch
import torch.nn as nn
def swish(x):
return x * torch.sigmoid(x)
class FFModule(nn.Module):
def __init__(self, d_model, h_size, dropout=0.2):
super(FFModule, self).__init__()
self.layer_norm = nn.LayerNorm(d_model)
self.layer1 = nn.Linear(d_model, h_size)
self.swish_activation = swish
self.dropout = nn.Dropout(dropout)
self.layer2 = nn.Linear(h_size, d_model)
def forward(self, inputs):
x = self.layer_norm(inputs)
x = self.layer1(x)
x = self.swish_activation(x)
x = self.dropout(x)
x = self.layer2(x)
x = self.dropout(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'h_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(64)](primals_3, buf0,
buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(256)](primals_3, buf0,
buf1, primals_1, primals_2, buf2, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del buf0
del buf1
del primals_1
del primals_2
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_5
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sigmoid_2[grid(256)](buf3, buf4, 256, XBLOCK=
256, num_warps=4, num_stages=1)
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf4, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf5)
del primals_7
return reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_3, reinterpret_tensor(buf2, (64, 4), (4, 1), 0
), buf3, reinterpret_tensor(buf4, (64, 4), (4, 1), 0
), primals_6, primals_4
def swish(x):
return x * torch.sigmoid(x)
class FFModuleNew(nn.Module):
def __init__(self, d_model, h_size, dropout=0.2):
super(FFModuleNew, self).__init__()
self.layer_norm = nn.LayerNorm(d_model)
self.layer1 = nn.Linear(d_model, h_size)
self.swish_activation = swish
self.dropout = nn.Dropout(dropout)
self.layer2 = nn.Linear(h_size, d_model)
def forward(self, input_0):
primals_1 = self.layer_norm.weight
primals_2 = self.layer_norm.bias
primals_4 = self.layer1.weight
primals_5 = self.layer1.bias
primals_6 = self.layer2.weight
primals_7 = self.layer2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
Masao-Someki/Conformer
|
FFModule
| false
| 8,529
|
[
"MIT"
] | 18
|
866da9ae05a6d07304775c592caac8d516f67c92
|
https://github.com/Masao-Someki/Conformer/tree/866da9ae05a6d07304775c592caac8d516f67c92
|
BasicBlock
|
import torch
import torch.nn as nn
from abc import ABC
import torch.utils.data
import torch.nn.functional as F
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module, ABC):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(in_planes, planes, stride)
self.bn1 = nn.Sequential()
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.Sequential()
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, self.
expansion * planes, kernel_size=1, stride=stride, bias=False))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_planes': 4, 'planes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from abc import ABC
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x0, tmp4, xmask)
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(256)](buf1, 256, XBLOCK=128, num_warps
=4, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_add_relu_threshold_backward_1[grid(256)](buf3,
primals_2, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1)
return buf3, primals_1, primals_2, primals_3, buf1, buf4
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlockNew(nn.Module, ABC):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlockNew, self).__init__()
self.conv1 = conv3x3(in_planes, planes, stride)
self.bn1 = nn.Sequential()
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.Sequential()
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, self.
expansion * planes, kernel_size=1, stride=stride, bias=False))
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_3 = self.conv2.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Mattdl/RehearsalRevealed
|
BasicBlock
| false
| 8,530
|
[
"MIT"
] | 12
|
f9cd2548f6c6d3ff119b40fecdb0df6fcd1525f6
|
https://github.com/Mattdl/RehearsalRevealed/tree/f9cd2548f6c6d3ff119b40fecdb0df6fcd1525f6
|
EncoderLayer
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
def attention(q, k, v, d_k, mask=None, dropout=None):
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
mask = mask.unsqueeze(1)
scores = F.softmax(scores, dim=-1)
if dropout is not None:
scores = dropout(scores)
output = torch.matmul(scores, v)
return output
class FeedForward(nn.Module):
def __init__(self, d_model, d_ff=512, dropout=0.5):
super().__init__()
self.linear_1 = nn.Linear(d_model, d_ff)
self.dropout = nn.Dropout(dropout)
self.linear_2 = nn.Linear(d_ff, d_model)
def forward(self, x):
x = self.dropout(F.relu(self.linear_1(x)))
x = self.linear_2(x)
return x
class MultiHeadAttention(nn.Module):
def __init__(self, heads, d_model, dropout=0.5):
super().__init__()
self.d_model = d_model
self.d_k = d_model // heads
self.h = heads
self.q_linear = nn.Linear(d_model, d_model)
self.v_linear = nn.Linear(d_model, d_model)
self.k_linear = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(dropout)
self.out = nn.Linear(d_model, d_model)
def forward(self, q, k, v, mask=None):
bs = q.size(0)
k = self.k_linear(k).view(bs, -1, self.h, self.d_k)
q = self.q_linear(q).view(bs, -1, self.h, self.d_k)
v = self.v_linear(v).view(bs, -1, self.h, self.d_k)
k = k.transpose(1, 2)
q = q.transpose(1, 2)
v = v.transpose(1, 2)
scores = attention(q, k, v, self.d_k, mask, self.dropout)
concat = scores.transpose(1, 2).contiguous().view(bs, -1, self.d_model)
output = self.out(concat)
return output
class Norm(nn.Module):
def __init__(self, d_model, eps=1e-06):
super().__init__()
self.size = d_model
self.alpha = nn.Parameter(torch.ones(self.size))
self.bias = nn.Parameter(torch.zeros(self.size))
self.eps = eps
def forward(self, x):
norm = self.alpha * (x - x.mean(dim=-1, keepdim=True)) / (x.std(dim
=-1, keepdim=True) + self.eps) + self.bias
return norm
class EncoderLayer(nn.Module):
def __init__(self, d_model, heads, dropout=0.5):
super().__init__()
self.norm_1 = Norm(d_model)
self.norm_2 = Norm(d_model)
self.attn = MultiHeadAttention(heads, d_model, dropout)
self.ff = FeedForward(d_model)
self.dropout_1 = nn.Dropout(dropout)
self.dropout_2 = nn.Dropout(dropout)
def forward(self, x, mask):
x2 = self.norm_1(x)
x = x + self.dropout_1(self.attn(x2, x2, x2, mask))
x2 = self.norm_2(x)
x = x + self.dropout_2(self.ff(x2))
return x
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'heads': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp0 * tmp11
tmp13 = tmp2 - tmp10
tmp14 = tmp13 * tmp13
tmp15 = tmp3 - tmp10
tmp16 = tmp15 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = tmp5 - tmp10
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp21 = tmp7 - tmp10
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp24 = 3.0
tmp25 = tmp23 / tmp24
tmp26 = libdevice.sqrt(tmp25)
tmp27 = 1e-06
tmp28 = tmp26 + tmp27
tmp29 = tmp12 / tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + x2, tmp31, xmask)
@triton.jit
def triton_poi_fused_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_2(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = float('-inf')
tmp2 = tmp0 == tmp1
tmp3 = tmp2 == 0
tmp4 = tmp3.to(tl.int64)
tmp5 = tmp4 != 0
tmp6 = tmp5 == 0
tmp7 = tmp0 - tmp0
tmp8 = tl_math.exp(tmp7)
tmp9 = tmp8 / tmp8
tmp10 = 0.0
tmp11 = tl.where(tmp6, tmp10, tmp9)
tl.store(in_out_ptr0 + x0, tmp11, xmask)
@triton.jit
def triton_poi_fused_add_mean_std_3(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = 3.0
tmp29 = tmp27 / tmp28
tl.store(out_ptr0 + x2, tmp16, xmask)
tl.store(in_out_ptr0 + x2, tmp29, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_4(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x3 = xindex % 16
x2 = xindex // 16
x4 = xindex // 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x3, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp4 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x4, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 - tmp4
tmp6 = tmp0 * tmp5
tmp8 = libdevice.sqrt(tmp7)
tmp9 = 1e-06
tmp10 = tmp8 + tmp9
tmp11 = tmp6 / tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x5, tmp13, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_5(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 16
x0 = xindex % 4
x2 = xindex // 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_out_ptr0 + x4, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + x4, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17, primals_18
) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (4,), (1,))
assert_size_stride(primals_15, (512, 4), (4, 1))
assert_size_stride(primals_16, (512,), (1,))
assert_size_stride(primals_17, (4, 512), (512, 1))
assert_size_stride(primals_18, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_std_sub_0[grid(16)](primals_1,
primals_2, primals_3, buf0, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del primals_1
del primals_3
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_4, (4, 4), (1, 4
), 0), out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_6, (4, 4), (1, 4
), 0), out=buf2)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, buf0, reinterpret_tensor(primals_8,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_9
buf4 = reinterpret_tensor(buf2, (4, 4, 1, 1), (4, 1, 16, 16), 0)
del buf2
triton_poi_fused_1[grid(16)](buf4, primals_7, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_7
buf5 = reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 16, 16), 0)
del buf1
triton_poi_fused_1[grid(16)](buf5, primals_5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((16, 1, 1), (1, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf4, (16, 1, 1), (1, 0, 0),
0), reinterpret_tensor(buf5, (16, 1, 1), (1, 0, 0), 0), out=buf6)
buf7 = reinterpret_tensor(buf6, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf6
triton_poi_fused_2[grid(16)](buf7, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf8 = empty_strided_cuda((16, 1, 1), (1, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 1, 1), (1, 1, 1),
0), reinterpret_tensor(buf3, (16, 1, 1), (1, 1, 1), 0), out=buf8)
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_12, reinterpret_tensor(buf8, (4, 4), (
4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf9)
del primals_12
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf12 = buf11
del buf11
triton_poi_fused_add_mean_std_3[grid(16)](buf12, primals_2, buf9,
buf10, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf13 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_std_sub_4[grid(64)](primals_13,
primals_2, buf9, buf10, buf12, primals_14, buf13, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf10
del buf12
del primals_14
buf14 = empty_strided_cuda((16, 512), (512, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf13, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_15, (4, 512), (1, 4), 0), out=buf14)
buf15 = reinterpret_tensor(buf14, (4, 4, 512), (2048, 512, 1), 0)
del buf14
buf18 = empty_strided_cuda((4, 4, 512), (2048, 512, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_5[grid(8192)](buf15,
primals_16, buf18, 8192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_16
buf16 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf15, (16, 512), (512, 1), 0),
reinterpret_tensor(primals_17, (512, 4), (1, 512), 0), out=buf16)
buf17 = reinterpret_tensor(buf16, (4, 4, 4), (16, 4, 1), 0)
del buf16
triton_poi_fused_add_6[grid(64)](buf17, primals_2, buf9, primals_18,
64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_18
return (buf17, primals_2, primals_13, buf0, buf7, reinterpret_tensor(
buf3, (16, 1, 1), (1, 1, 4), 0), reinterpret_tensor(buf4, (16, 1, 1
), (1, 1, 4), 0), reinterpret_tensor(buf5, (16, 1, 1), (1, 4, 1), 0
), reinterpret_tensor(buf8, (4, 4), (4, 1), 0), buf9,
reinterpret_tensor(buf13, (16, 4), (4, 1), 0), reinterpret_tensor(
buf15, (16, 512), (512, 1), 0), primals_17, buf18, primals_15,
primals_11, primals_8, primals_6, primals_4)
def attention(q, k, v, d_k, mask=None, dropout=None):
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
mask = mask.unsqueeze(1)
scores = F.softmax(scores, dim=-1)
if dropout is not None:
scores = dropout(scores)
output = torch.matmul(scores, v)
return output
class FeedForward(nn.Module):
def __init__(self, d_model, d_ff=512, dropout=0.5):
super().__init__()
self.linear_1 = nn.Linear(d_model, d_ff)
self.dropout = nn.Dropout(dropout)
self.linear_2 = nn.Linear(d_ff, d_model)
def forward(self, x):
x = self.dropout(F.relu(self.linear_1(x)))
x = self.linear_2(x)
return x
class MultiHeadAttention(nn.Module):
def __init__(self, heads, d_model, dropout=0.5):
super().__init__()
self.d_model = d_model
self.d_k = d_model // heads
self.h = heads
self.q_linear = nn.Linear(d_model, d_model)
self.v_linear = nn.Linear(d_model, d_model)
self.k_linear = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(dropout)
self.out = nn.Linear(d_model, d_model)
def forward(self, q, k, v, mask=None):
bs = q.size(0)
k = self.k_linear(k).view(bs, -1, self.h, self.d_k)
q = self.q_linear(q).view(bs, -1, self.h, self.d_k)
v = self.v_linear(v).view(bs, -1, self.h, self.d_k)
k = k.transpose(1, 2)
q = q.transpose(1, 2)
v = v.transpose(1, 2)
scores = attention(q, k, v, self.d_k, mask, self.dropout)
concat = scores.transpose(1, 2).contiguous().view(bs, -1, self.d_model)
output = self.out(concat)
return output
class Norm(nn.Module):
def __init__(self, d_model, eps=1e-06):
super().__init__()
self.size = d_model
self.alpha = nn.Parameter(torch.ones(self.size))
self.bias = nn.Parameter(torch.zeros(self.size))
self.eps = eps
def forward(self, x):
norm = self.alpha * (x - x.mean(dim=-1, keepdim=True)) / (x.std(dim
=-1, keepdim=True) + self.eps) + self.bias
return norm
class EncoderLayerNew(nn.Module):
def __init__(self, d_model, heads, dropout=0.5):
super().__init__()
self.norm_1 = Norm(d_model)
self.norm_2 = Norm(d_model)
self.attn = MultiHeadAttention(heads, d_model, dropout)
self.ff = FeedForward(d_model)
self.dropout_1 = nn.Dropout(dropout)
self.dropout_2 = nn.Dropout(dropout)
def forward(self, input_0, input_1):
primals_1 = self.norm_1.alpha
primals_3 = self.norm_1.bias
primals_5 = self.norm_2.alpha
primals_7 = self.norm_2.bias
primals_2 = self.attn.q_linear.weight
primals_9 = self.attn.q_linear.bias
primals_4 = self.attn.v_linear.weight
primals_12 = self.attn.v_linear.bias
primals_6 = self.attn.k_linear.weight
primals_13 = self.attn.k_linear.bias
primals_8 = self.attn.out.weight
primals_14 = self.attn.out.bias
primals_15 = self.ff.linear_1.weight
primals_16 = self.ff.linear_1.bias
primals_17 = self.ff.linear_2.weight
primals_18 = self.ff.linear_2.bias
primals_10 = input_0
primals_11 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18])
return output[0]
|
MadanMl/PyTorch-Transformer-for-RUL-Prediction
|
EncoderLayer
| false
| 8,531
|
[
"Apache-2.0"
] | 25
|
5bf0a4739abdecbbc88118ea413393997bdc1e24
|
https://github.com/MadanMl/PyTorch-Transformer-for-RUL-Prediction/tree/5bf0a4739abdecbbc88118ea413393997bdc1e24
|
MultiHeadedAttention
|
import math
import torch
from typing import Optional
from typing import Tuple
from torch import nn
class MultiHeadedAttention(nn.Module):
"""Multi-Head Attention layer.
Args:
n_head (int): The number of heads.
n_feat (int): The number of features.
dropout_rate (float): Dropout rate.
"""
def __init__(self, n_head: 'int', n_feat: 'int', dropout_rate: 'float'):
"""Construct an MultiHeadedAttention object."""
super().__init__()
assert n_feat % n_head == 0
self.d_k = n_feat // n_head
self.h = n_head
self.linear_q = nn.Linear(n_feat, n_feat)
self.linear_k = nn.Linear(n_feat, n_feat)
self.linear_v = nn.Linear(n_feat, n_feat)
self.linear_out = nn.Linear(n_feat, n_feat)
self.dropout = nn.Dropout(p=dropout_rate)
def forward_qkv(self, query: 'torch.Tensor', key: 'torch.Tensor', value:
'torch.Tensor') ->Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Transform query, key and value.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
Returns:
torch.Tensor: Transformed query tensor, size
(#batch, n_head, time1, d_k).
torch.Tensor: Transformed key tensor, size
(#batch, n_head, time2, d_k).
torch.Tensor: Transformed value tensor, size
(#batch, n_head, time2, d_k).
"""
n_batch = query.size(0)
q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)
k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)
v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)
q = q.transpose(1, 2)
k = k.transpose(1, 2)
v = v.transpose(1, 2)
return q, k, v
def forward_attention(self, value: 'torch.Tensor', scores:
'torch.Tensor', mask: 'Optional[torch.Tensor]') ->torch.Tensor:
"""Compute attention context vector.
Args:
value (torch.Tensor): Transformed value, size
(#batch, n_head, time2, d_k).
scores (torch.Tensor): Attention score, size
(#batch, n_head, time1, time2).
mask (torch.Tensor): Mask, size (#batch, 1, time2) or
(#batch, time1, time2).
Returns:
torch.Tensor: Transformed value (#batch, time1, d_model)
weighted by the attention score (#batch, time1, time2).
"""
n_batch = value.size(0)
if mask is not None:
mask = mask.unsqueeze(1).eq(0)
scores = scores.masked_fill(mask, -float('inf'))
attn = torch.softmax(scores, dim=-1).masked_fill(mask, 0.0)
else:
attn = torch.softmax(scores, dim=-1)
p_attn = self.dropout(attn)
x = torch.matmul(p_attn, value)
x = x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)
return self.linear_out(x)
def forward(self, query: 'torch.Tensor', key: 'torch.Tensor', value:
'torch.Tensor', mask: 'Optional[torch.Tensor]', pos_emb:
'torch.Tensor'=torch.empty(0)) ->torch.Tensor:
"""Compute scaled dot product attention.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
(#batch, time1, time2).
1.When applying cross attention between decoder and encoder,
the batch padding mask for input is in (#batch, 1, T) shape.
2.When applying self attention of encoder,
the mask is in (#batch, T, T) shape.
3.When applying self attention of decoder,
the mask is in (#batch, L, L) shape.
4.If the different position in decoder see different block
of the encoder, such as Mocha, the passed in mask could be
in (#batch, L, T) shape. But there is no such case in current
Wenet.
Returns:
torch.Tensor: Output tensor (#batch, time1, d_model).
"""
q, k, v = self.forward_qkv(query, key, value)
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)
return self.forward_attention(v, scores, mask)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4,
4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'n_head': 4, 'n_feat': 4, 'dropout_rate': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from typing import Optional
from typing import Tuple
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_eq_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_div_masked_fill_2(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (4 * x0 + 16 * x2), xmask, eviction_policy=
'evict_last').to(tl.int1)
tmp1 = tl.load(in_ptr1 + 4 * x3, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * x2), xmask, eviction_policy
='evict_last').to(tl.int1)
tmp7 = tl.load(in_ptr1 + (1 + 4 * x3), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * x2), xmask,
eviction_policy='evict_last').to(tl.int1)
tmp12 = tl.load(in_ptr1 + (2 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * x2), xmask,
eviction_policy='evict_last').to(tl.int1)
tmp17 = tl.load(in_ptr1 + (3 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = float('-inf')
tmp5 = tl.where(tmp0, tmp4, tmp3)
tmp8 = tmp7 * tmp2
tmp9 = tl.where(tmp6, tmp4, tmp8)
tmp10 = triton_helpers.maximum(tmp5, tmp9)
tmp13 = tmp12 * tmp2
tmp14 = tl.where(tmp11, tmp4, tmp13)
tmp15 = triton_helpers.maximum(tmp10, tmp14)
tmp18 = tmp17 * tmp2
tmp19 = tl.where(tmp16, tmp4, tmp18)
tmp20 = triton_helpers.maximum(tmp15, tmp19)
tmp21 = tmp5 - tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp9 - tmp20
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = tmp14 - tmp20
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tmp19 - tmp20
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp28 + tmp30
tl.store(out_ptr0 + x3, tmp20, xmask)
tl.store(out_ptr1 + x3, tmp31, xmask)
@triton.jit
def triton_poi_fused__softmax_div_masked_fill_3(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 64
x4 = xindex % 16
x5 = xindex
x6 = xindex // 4
tmp0 = tl.load(in_ptr0 + (x4 + 16 * x3), xmask, eviction_policy=
'evict_last').to(tl.int1)
tmp1 = tl.load(in_ptr1 + x5, xmask)
tmp6 = tl.load(in_ptr2 + x6, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr3 + x6, xmask, eviction_policy='evict_last')
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = float('-inf')
tmp5 = tl.where(tmp0, tmp4, tmp3)
tmp7 = tmp5 - tmp6
tmp8 = tl_math.exp(tmp7)
tmp10 = tmp8 / tmp9
tmp11 = 0.0
tmp12 = tl.where(tmp0, tmp11, tmp10)
tl.store(out_ptr0 + x5, tmp12, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_10, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_3, buf3, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_3
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf0
triton_poi_fused_clone_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.bool)
triton_poi_fused_eq_1[grid(64)](primals_10, buf6, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_10
buf7 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf1
buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_div_masked_fill_2[grid(64)](buf6, buf5,
buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_div_masked_fill_3[grid(256)](buf6, buf5,
buf7, buf8, buf9, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf10 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf8
triton_poi_fused_clone_0[grid(16, 4)](buf2, primals_8, buf10, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_8
buf11 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11)
buf12 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf7
triton_poi_fused_clone_4[grid(16, 4)](buf11, buf12, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0)
del buf11
extern_kernels.addmm(primals_12, reinterpret_tensor(buf12, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf13)
del primals_12
return reinterpret_tensor(buf13, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0
), buf5, buf6, reinterpret_tensor(buf12, (16, 4), (4, 1), 0
), primals_11, reinterpret_tensor(buf9, (16, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf10, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0)
class MultiHeadedAttentionNew(nn.Module):
"""Multi-Head Attention layer.
Args:
n_head (int): The number of heads.
n_feat (int): The number of features.
dropout_rate (float): Dropout rate.
"""
def __init__(self, n_head: 'int', n_feat: 'int', dropout_rate: 'float'):
"""Construct an MultiHeadedAttention object."""
super().__init__()
assert n_feat % n_head == 0
self.d_k = n_feat // n_head
self.h = n_head
self.linear_q = nn.Linear(n_feat, n_feat)
self.linear_k = nn.Linear(n_feat, n_feat)
self.linear_v = nn.Linear(n_feat, n_feat)
self.linear_out = nn.Linear(n_feat, n_feat)
self.dropout = nn.Dropout(p=dropout_rate)
def forward_qkv(self, query: 'torch.Tensor', key: 'torch.Tensor', value:
'torch.Tensor') ->Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Transform query, key and value.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
Returns:
torch.Tensor: Transformed query tensor, size
(#batch, n_head, time1, d_k).
torch.Tensor: Transformed key tensor, size
(#batch, n_head, time2, d_k).
torch.Tensor: Transformed value tensor, size
(#batch, n_head, time2, d_k).
"""
n_batch = query.size(0)
q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)
k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)
v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)
q = q.transpose(1, 2)
k = k.transpose(1, 2)
v = v.transpose(1, 2)
return q, k, v
def forward_attention(self, value: 'torch.Tensor', scores:
'torch.Tensor', mask: 'Optional[torch.Tensor]') ->torch.Tensor:
"""Compute attention context vector.
Args:
value (torch.Tensor): Transformed value, size
(#batch, n_head, time2, d_k).
scores (torch.Tensor): Attention score, size
(#batch, n_head, time1, time2).
mask (torch.Tensor): Mask, size (#batch, 1, time2) or
(#batch, time1, time2).
Returns:
torch.Tensor: Transformed value (#batch, time1, d_model)
weighted by the attention score (#batch, time1, time2).
"""
n_batch = value.size(0)
if mask is not None:
mask = mask.unsqueeze(1).eq(0)
scores = scores.masked_fill(mask, -float('inf'))
attn = torch.softmax(scores, dim=-1).masked_fill(mask, 0.0)
else:
attn = torch.softmax(scores, dim=-1)
p_attn = self.dropout(attn)
x = torch.matmul(p_attn, value)
x = x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)
return self.linear_out(x)
def forward(self, input_0, input_1, input_2, input_3):
primals_2 = self.linear_q.weight
primals_3 = self.linear_q.bias
primals_4 = self.linear_k.weight
primals_5 = self.linear_k.bias
primals_7 = self.linear_v.weight
primals_8 = self.linear_v.bias
primals_11 = self.linear_out.weight
primals_12 = self.linear_out.bias
primals_1 = input_0
primals_6 = input_1
primals_9 = input_2
primals_10 = input_3
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
|
Mashiro083/wenet-onnx
|
MultiHeadedAttention
| false
| 8,532
|
[
"Apache-2.0"
] | 18
|
ae8f8451d73fa9ceac6f7738194543e83959ca86
|
https://github.com/Mashiro083/wenet-onnx/tree/ae8f8451d73fa9ceac6f7738194543e83959ca86
|
SMAPE
|
import torch
class SMAPE(torch.nn.Module):
"""Symmetric Mean Absolute error.
:math:`\\frac{|x - y|} {|x| + |y| + \\epsilon}`
Args:
eps(float): small number to avoid division by 0.
"""
def __init__(self, eps=0.01):
super(SMAPE, self).__init__()
self.eps = eps
def forward(self, im, ref):
loss = (torch.abs(im - ref) / (self.eps + torch.abs(im.detach()) +
torch.abs(ref.detach()))).mean()
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_add_div_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tl_math.abs(tmp0)
tmp5 = 0.01
tmp6 = tmp4 + tmp5
tmp7 = tl_math.abs(tmp1)
tmp8 = tmp6 + tmp7
tmp9 = tmp3 / tmp8
tmp10 = tl.broadcast_to(tmp9, [RBLOCK])
tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0))
tmp13 = 256.0
tmp14 = tmp12 / tmp13
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp14, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_abs_add_div_mean_sub_0[grid(1)](buf1, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class SMAPENew(torch.nn.Module):
"""Symmetric Mean Absolute error.
:math:`\\frac{|x - y|} {|x| + |y| + \\epsilon}`
Args:
eps(float): small number to avoid division by 0.
"""
def __init__(self, eps=0.01):
super(SMAPENew, self).__init__()
self.eps = eps
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Mephisto405/WCMC-Public
|
SMAPE
| false
| 8,533
|
[
"BSD-2-Clause"
] | 19
|
bd54f218d5239db84f404fbe1b465f9497bcf9e4
|
https://github.com/Mephisto405/WCMC-Public/tree/bd54f218d5239db84f404fbe1b465f9497bcf9e4
|
baseRNN_predict
|
import torch
import numpy as np
import torch.nn as nn
import torch.nn.init as init
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
weight_shape = list(m.weight.data.size())
fan_in = np.prod(weight_shape[1:4])
fan_out = np.prod(weight_shape[2:4]) * weight_shape[0]
w_bound = np.sqrt(6.0 / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
weight_shape = list(m.weight.data.size())
fan_in = weight_shape[1]
fan_out = weight_shape[0]
w_bound = np.sqrt(6.0 / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
m.bias.data.fill_(0)
elif classname.find('GRUCell') != -1:
for param in m.parameters():
if len(param.shape) >= 2:
init.orthogonal_(param.data)
else:
init.normal_(param.data)
class baseRNN_predict(nn.Module):
def __init__(self, h_size, obs_dim, num_actions, context_input=False):
super(baseRNN_predict, self).__init__()
self.l1 = nn.Linear(h_size, 64)
self.l2 = nn.Linear(64, 128)
self.l3 = nn.Linear(128, obs_dim)
self.apply(weights_init)
def forward(self, h):
h = torch.relu(self.l1(h))
h = torch.relu(self.l2(h))
obs = self.l3(h)
return obs
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'h_size': 4, 'obs_dim': 4, 'num_actions': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import numpy as np
import torch.nn as nn
import torch.nn.init as init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (64, 4), (4, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (128, 64), (64, 1))
assert_size_stride(primals_5, (128,), (1,))
assert_size_stride(primals_6, (4, 128), (128, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf0
buf6 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool
)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(4096)](buf1,
primals_2, buf6, 4096, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0),
reinterpret_tensor(primals_4, (64, 128), (1, 64), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf2
buf5 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(8192)](buf3,
primals_5, buf5, 8192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 128),
(128, 1), 0), reinterpret_tensor(primals_6, (128, 4), (1, 128),
0), alpha=1, beta=1, out=buf4)
del primals_7
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(
buf3, (64, 128), (128, 1), 0), primals_6, buf5, primals_4, buf6
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
weight_shape = list(m.weight.data.size())
fan_in = np.prod(weight_shape[1:4])
fan_out = np.prod(weight_shape[2:4]) * weight_shape[0]
w_bound = np.sqrt(6.0 / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
weight_shape = list(m.weight.data.size())
fan_in = weight_shape[1]
fan_out = weight_shape[0]
w_bound = np.sqrt(6.0 / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
m.bias.data.fill_(0)
elif classname.find('GRUCell') != -1:
for param in m.parameters():
if len(param.shape) >= 2:
init.orthogonal_(param.data)
else:
init.normal_(param.data)
class baseRNN_predictNew(nn.Module):
def __init__(self, h_size, obs_dim, num_actions, context_input=False):
super(baseRNN_predictNew, self).__init__()
self.l1 = nn.Linear(h_size, 64)
self.l2 = nn.Linear(64, 128)
self.l3 = nn.Linear(128, obs_dim)
self.apply(weights_init)
def forward(self, input_0):
primals_1 = self.l1.weight
primals_2 = self.l1.bias
primals_4 = self.l2.weight
primals_5 = self.l2.bias
primals_6 = self.l3.weight
primals_7 = self.l3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
MLforHealth/state_representations_for_RLinHealth
|
baseRNN_predict
| false
| 8,534
|
[
"MIT"
] | 24
|
aa8dbb7d56caa95bf4380e3e745e134996291b66
|
https://github.com/MLforHealth/state_representations_for_RLinHealth/tree/aa8dbb7d56caa95bf4380e3e745e134996291b66
|
LocalStatisticsNetwork
|
import torch
import torch.nn as nn
class LocalStatisticsNetwork(nn.Module):
def __init__(self, img_feature_channels: 'int'):
"""Local statistique nerwork
Args:
img_feature_channels (int): [Number of input channels]
"""
super().__init__()
self.conv1 = nn.Conv2d(in_channels=img_feature_channels,
out_channels=512, kernel_size=1, stride=1)
self.conv2 = nn.Conv2d(in_channels=512, out_channels=512,
kernel_size=1, stride=1)
self.conv3 = nn.Conv2d(in_channels=512, out_channels=1, kernel_size
=1, stride=1)
self.relu = nn.ReLU()
def forward(self, concat_feature: 'torch.Tensor') ->torch.Tensor:
x = self.conv1(concat_feature)
x = self.relu(x)
x = self.conv2(x)
x = self.relu(x)
local_statistics = self.conv3(x)
return local_statistics
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'img_feature_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask)
tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (512, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (512,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (512, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_5, (512,), (1,))
assert_size_stride(primals_6, (1, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(16, 16)](primals_3, buf0, 16, 16, XBLOCK=16,
YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 512, 4, 4), (8192, 1, 2048, 512))
buf2 = buf1
del buf1
triton_poi_fused_convolution_relu_1[grid(32768)](buf2, primals_2,
32768, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 512, 4, 4), (8192, 1, 2048, 512))
buf4 = buf3
del buf3
triton_poi_fused_convolution_relu_1[grid(32768)](buf4, primals_5,
32768, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf5 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 1, 4, 4), (16, 1, 4, 1))
buf6 = reinterpret_tensor(buf5, (4, 1, 4, 4), (16, 16, 4, 1), 0)
del buf5
triton_poi_fused_convolution_2[grid(64)](buf6, primals_7, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_7
return buf6, primals_1, buf0, primals_4, primals_6, buf2, buf4
class LocalStatisticsNetworkNew(nn.Module):
def __init__(self, img_feature_channels: 'int'):
"""Local statistique nerwork
Args:
img_feature_channels (int): [Number of input channels]
"""
super().__init__()
self.conv1 = nn.Conv2d(in_channels=img_feature_channels,
out_channels=512, kernel_size=1, stride=1)
self.conv2 = nn.Conv2d(in_channels=512, out_channels=512,
kernel_size=1, stride=1)
self.conv3 = nn.Conv2d(in_channels=512, out_channels=1, kernel_size
=1, stride=1)
self.relu = nn.ReLU()
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
MehdiZouitine/Learning-Disentangled-Representations-via-Mutual-Information-Estimation
|
LocalStatisticsNetwork
| false
| 8,535
|
[
"MIT"
] | 25
|
52952aff647a33b749b709cd7f0c3cd059c66b54
|
https://github.com/MehdiZouitine/Learning-Disentangled-Representations-via-Mutual-Information-Estimation/tree/52952aff647a33b749b709cd7f0c3cd059c66b54
|
AdaFM
|
import torch
from torch import nn
import torch.utils.data
import torch.utils.data.distributed
class AdaFM(nn.Module):
def __init__(self, in_channel, out_channel, style_dim=0):
super().__init__()
self.style_gama = nn.Parameter(torch.ones(in_channel, out_channel,
1, 1))
self.style_beta = nn.Parameter(torch.zeros(in_channel, out_channel,
1, 1))
def forward(self, input, style=0):
h = input.shape[2]
gamma = self.style_gama.repeat(1, 1, h, h)
beta = self.style_beta.repeat(1, 1, h, h)
out = gamma * input + beta
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channel': 4, 'out_channel': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_repeat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tl.store(out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_repeat_0[grid(256)](primals_2, primals_1,
primals_3, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
del primals_3
return buf0, primals_1
class AdaFMNew(nn.Module):
def __init__(self, in_channel, out_channel, style_dim=0):
super().__init__()
self.style_gama = nn.Parameter(torch.ones(in_channel, out_channel,
1, 1))
self.style_beta = nn.Parameter(torch.zeros(in_channel, out_channel,
1, 1))
def forward(self, input_0):
primals_2 = self.style_gama
primals_3 = self.style_beta
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
MiaoyunZhao/GANTransferLimitedData
|
AdaFM
| false
| 8,536
|
[
"MIT"
] | 41
|
5545bc37a1d7d4f28a9c3588aaa12a616bbddd88
|
https://github.com/MiaoyunZhao/GANTransferLimitedData/tree/5545bc37a1d7d4f28a9c3588aaa12a616bbddd88
|
Decoder
|
import torch
import torch.utils.data
from torch import nn
from torch.nn import functional
class Decoder(nn.Module):
def __init__(self, z_dim, hidden_dim, output_dim):
"""
Args:
z_dim: A integer indicating the latent size.
hidden_dim: A integer indicating the size of hidden dimension.
output_dim: A integer indicating the output dimension.
"""
super().__init__()
self.linear = nn.Linear(z_dim, hidden_dim)
self.out = nn.Linear(hidden_dim, output_dim)
def forward(self, x):
hidden = functional.relu(self.linear(x))
predicted = torch.sigmoid(self.out(hidden))
return predicted
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'z_dim': 4, 'hidden_dim': 4, 'output_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused_sigmoid_1[grid(256)](buf3, primals_5, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_5
return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf3, primals_4, buf4
class DecoderNew(nn.Module):
def __init__(self, z_dim, hidden_dim, output_dim):
"""
Args:
z_dim: A integer indicating the latent size.
hidden_dim: A integer indicating the size of hidden dimension.
output_dim: A integer indicating the output dimension.
"""
super().__init__()
self.linear = nn.Linear(z_dim, hidden_dim)
self.out = nn.Linear(hidden_dim, output_dim)
def forward(self, input_0):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_4 = self.out.weight
primals_5 = self.out.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
MaurizioFD/recsys-challenge-2020-twitter
|
Decoder
| false
| 8,537
|
[
"Apache-2.0"
] | 44
|
95dc024fb4f8777aa62e1304536daece640428de
|
https://github.com/MaurizioFD/recsys-challenge-2020-twitter/tree/95dc024fb4f8777aa62e1304536daece640428de
|
ClampModule
|
import torch
import torch as th
class ClampModule(th.nn.Module):
"""Why is this not a thing in the main library?"""
def __init__(self, min_v, max_v):
super().__init__()
self.min_v = min_v
self.max_v = max_v
def forward(self, x):
return th.clamp(x, self.min_v, self.max_v)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'min_v': 4, 'max_v': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch as th
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clamp_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 4.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = triton_helpers.minimum(tmp2, tmp1)
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class ClampModuleNew(th.nn.Module):
"""Why is this not a thing in the main library?"""
def __init__(self, min_v, max_v):
super().__init__()
self.min_v = min_v
self.max_v = max_v
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Miffyli/policy-supervectors
|
ClampModule
| false
| 8,538
|
[
"MIT"
] | 17
|
358284805e5bc96b95cae15e9741571e46d84bc9
|
https://github.com/Miffyli/policy-supervectors/tree/358284805e5bc96b95cae15e9741571e46d84bc9
|
ResnetBlock
|
import torch
from torch import nn
import torch.nn.functional as F
import torch.utils.data
import torch.utils.data.distributed
def actvn(x):
out = F.leaky_relu(x, 0.2)
return out
class ResnetBlock(nn.Module):
def __init__(self, fin, fout, fhidden=None, is_bias=True):
super().__init__()
self.is_bias = is_bias
self.learned_shortcut = fin != fout
self.fin = fin
self.fout = fout
if fhidden is None:
self.fhidden = min(fin, fout)
else:
self.fhidden = fhidden
self.conv_0 = nn.Conv2d(self.fin, self.fhidden, 3, stride=1, padding=1)
self.conv_1 = nn.Conv2d(self.fhidden, self.fout, 3, stride=1,
padding=1, bias=is_bias)
if self.learned_shortcut:
self.conv_s = nn.Conv2d(self.fin, self.fout, 1, stride=1,
padding=0, bias=False)
def forward(self, x, intitial=False):
x_s = self._shortcut(x)
if intitial:
dx = self.conv_0(x)
else:
dx = self.conv_0(actvn(x))
dx = self.conv_1(actvn(dx))
out = x_s + 0.1 * dx
return out
def _shortcut(self, x):
if self.learned_shortcut:
x_s = self.conv_s(x)
else:
x_s = x
return x_s
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'fin': 4, 'fout': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.nn.functional as F
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp7, xmask)
@triton.jit
def triton_poi_fused_add_convolution_mul_2(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_out_ptr0 + x3, xmask)
tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = 0.1
tmp5 = tmp3 * tmp4
tmp6 = tmp0 + tmp5
tl.store(in_out_ptr0 + x3, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(256)](primals_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_convolution_leaky_relu_1[grid(256)](buf1,
primals_3, buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf1
del primals_3
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = buf4
del buf4
triton_poi_fused_add_convolution_mul_2[grid(256)](buf5, primals_1,
primals_5, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_5
return buf5, primals_2, primals_4, buf0, buf2, buf3
def actvn(x):
out = F.leaky_relu(x, 0.2)
return out
class ResnetBlockNew(nn.Module):
def __init__(self, fin, fout, fhidden=None, is_bias=True):
super().__init__()
self.is_bias = is_bias
self.learned_shortcut = fin != fout
self.fin = fin
self.fout = fout
if fhidden is None:
self.fhidden = min(fin, fout)
else:
self.fhidden = fhidden
self.conv_0 = nn.Conv2d(self.fin, self.fhidden, 3, stride=1, padding=1)
self.conv_1 = nn.Conv2d(self.fhidden, self.fout, 3, stride=1,
padding=1, bias=is_bias)
if self.learned_shortcut:
self.conv_s = nn.Conv2d(self.fin, self.fout, 1, stride=1,
padding=0, bias=False)
def _shortcut(self, x):
if self.learned_shortcut:
x_s = self.conv_s(x)
else:
x_s = x
return x_s
def forward(self, input_0):
primals_2 = self.conv_0.weight
primals_3 = self.conv_0.bias
primals_4 = self.conv_1.weight
primals_5 = self.conv_1.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
MiaoyunZhao/GANTransferLimitedData
|
ResnetBlock
| false
| 8,539
|
[
"MIT"
] | 41
|
5545bc37a1d7d4f28a9c3588aaa12a616bbddd88
|
https://github.com/MiaoyunZhao/GANTransferLimitedData/tree/5545bc37a1d7d4f28a9c3588aaa12a616bbddd88
|
RelativeMSE
|
import torch
class RelativeMSE(torch.nn.Module):
"""Relative Mean-Squared Error.
:math:`0.5 * \\frac{(x - y)^2}{y^2 + \\epsilon}`
Args:
eps(float): small number to avoid division by 0.
"""
def __init__(self, eps=0.01):
super(RelativeMSE, self).__init__()
self.eps = eps
def forward(self, im, ref):
"""Evaluate the metric.
Args:
im(torch.Tensor): image.
ref(torch.Tensor): reference.
"""
mse = torch.pow(im - ref, 2)
loss = mse / (torch.pow(ref, 2) + self.eps)
loss = 0.5 * torch.mean(loss)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mean_mul_pow_sub_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tmp1 * tmp1
tmp5 = 0.01
tmp6 = tmp4 + tmp5
tmp7 = tmp3 / tmp6
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = 256.0
tmp12 = tmp10 / tmp11
tmp13 = 0.5
tmp14 = tmp12 * tmp13
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp14, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mean_mul_pow_sub_0[grid(1)](buf1, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class RelativeMSENew(torch.nn.Module):
"""Relative Mean-Squared Error.
:math:`0.5 * \\frac{(x - y)^2}{y^2 + \\epsilon}`
Args:
eps(float): small number to avoid division by 0.
"""
def __init__(self, eps=0.01):
super(RelativeMSENew, self).__init__()
self.eps = eps
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Mephisto405/WCMC-Public
|
RelativeMSE
| false
| 8,540
|
[
"BSD-2-Clause"
] | 19
|
bd54f218d5239db84f404fbe1b465f9497bcf9e4
|
https://github.com/Mephisto405/WCMC-Public/tree/bd54f218d5239db84f404fbe1b465f9497bcf9e4
|
MLP
|
import torch
import torch.nn as nn
class MLP(nn.Module):
def __init__(self, in_dim, out_dim):
super(MLP, self).__init__()
out = max(8, in_dim * 2)
self.input = nn.Linear(in_dim, out)
self.fc = nn.Linear(out, out)
self.fc2 = nn.Linear(out, out)
self.output = nn.Linear(out, out_dim)
self.relu = nn.LeakyReLU()
def forward(self, x):
x = x.float()
x = self.relu(self.input(x))
x = self.relu(self.fc(x))
x = self.relu(self.fc2(x))
x = self.relu(self.output(x))
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4, 'out_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (8, 4), (4, 1))
assert_size_stride(primals_3, (8,), (1,))
assert_size_stride(primals_4, (8, 8), (8, 1))
assert_size_stride(primals_5, (8,), (1,))
assert_size_stride(primals_6, (8, 8), (8, 1))
assert_size_stride(primals_7, (8,), (1,))
assert_size_stride(primals_8, (4, 8), (8, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 8), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(512)](buf0, primals_3, buf1,
buf2, 512, XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf3 = buf0
del buf0
extern_kernels.mm(reinterpret_tensor(buf2, (64, 8), (8, 1), 0),
reinterpret_tensor(primals_4, (8, 8), (1, 8), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool)
buf5 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
triton_poi_fused_leaky_relu_0[grid(512)](buf3, primals_5, buf4,
buf5, 512, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf6 = buf3
del buf3
extern_kernels.mm(reinterpret_tensor(buf5, (64, 8), (8, 1), 0),
reinterpret_tensor(primals_6, (8, 8), (1, 8), 0), out=buf6)
buf7 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool)
buf8 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
triton_poi_fused_leaky_relu_0[grid(512)](buf6, primals_7, buf7,
buf8, 512, XBLOCK=128, num_warps=4, num_stages=1)
del buf6
del primals_7
buf9 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf8, (64, 8), (8, 1), 0),
reinterpret_tensor(primals_8, (8, 4), (1, 8), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_leaky_relu_1[grid(256)](buf9, primals_9, buf10,
buf11, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf9
del primals_9
return buf11, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), buf1, reinterpret_tensor(buf2, (64, 8), (8, 1), 0
), buf4, reinterpret_tensor(buf5, (64, 8), (8, 1), 0
), buf7, reinterpret_tensor(buf8, (64, 8), (8, 1), 0
), buf10, primals_8, primals_6, primals_4
class MLPNew(nn.Module):
def __init__(self, in_dim, out_dim):
super(MLPNew, self).__init__()
out = max(8, in_dim * 2)
self.input = nn.Linear(in_dim, out)
self.fc = nn.Linear(out, out)
self.fc2 = nn.Linear(out, out)
self.output = nn.Linear(out, out_dim)
self.relu = nn.LeakyReLU()
def forward(self, input_0):
primals_2 = self.input.weight
primals_3 = self.input.bias
primals_4 = self.fc.weight
primals_5 = self.fc.bias
primals_6 = self.fc2.weight
primals_7 = self.fc2.bias
primals_8 = self.output.weight
primals_9 = self.output.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
Malta-Lab/IUPE
|
MLP
| false
| 8,541
|
[
"MIT"
] | 10
|
44ddf119917538f02bb69509fec7a8314eed419f
|
https://github.com/Malta-Lab/IUPE/tree/44ddf119917538f02bb69509fec7a8314eed419f
|
FFChessNet
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class FFChessNet(nn.Module):
"""Modified ResidualNetworkSegment model class"""
def __init__(self, block, num_blocks, width, depth):
super(FFChessNet, self).__init__()
assert (depth - 4
) % 4 == 0, 'Depth not compatible with recurrent architectue.'
self.iters = (depth - 4) // 4
self.in_planes = int(width * 64)
self.conv1 = nn.Conv2d(12, int(width * 64), kernel_size=3, stride=1,
padding=1, bias=False)
layers = []
for i in range(len(num_blocks)):
for _ in range(self.iters):
layers.append(self._make_layer(block, int(width * 64),
num_blocks[i], stride=1))
self.recur_block = nn.Sequential(*layers)
self.conv2 = nn.Conv2d(int(width * 64), 32, kernel_size=3, stride=1,
padding=1, bias=False)
self.conv3 = nn.Conv2d(32, 8, kernel_size=3, stride=1, padding=1,
bias=False)
self.conv4 = nn.Conv2d(8, 2, kernel_size=3, stride=1, padding=1,
bias=False)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for strd in strides:
layers.append(block(self.in_planes, planes, strd))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.conv1(x))
out = self.recur_block(out)
thought = F.relu(self.conv2(out))
thought = F.relu(self.conv3(thought))
thought = self.conv4(thought)
return thought
def get_inputs():
return [torch.rand([4, 12, 64, 64])]
def get_init_inputs():
return [[], {'block': 4, 'num_blocks': [4, 4], 'width': 4, 'depth': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, None)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, None)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (256, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_2, (4, 12, 64, 64), (49152, 4096, 64, 1))
assert_size_stride(primals_3, (32, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_4, (8, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (2, 8, 3, 3), (72, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 256, 64, 64), (1048576, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(4194304)](buf1, 4194304, XBLOCK=1024,
num_warps=4, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_relu_1[grid(524288)](buf3, 524288, XBLOCK=512,
num_warps=8, num_stages=1)
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 8, 64, 64), (32768, 4096, 64, 1))
buf5 = buf4
del buf4
triton_poi_fused_relu_2[grid(131072)](buf5, 131072, XBLOCK=512,
num_warps=8, num_stages=1)
buf6 = extern_kernels.convolution(buf5, primals_5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 2, 64, 64), (8192, 4096, 64, 1))
return (buf6, primals_1, primals_2, primals_3, primals_4, primals_5,
buf1, buf3, buf5)
class FFChessNetNew(nn.Module):
"""Modified ResidualNetworkSegment model class"""
def __init__(self, block, num_blocks, width, depth):
super(FFChessNetNew, self).__init__()
assert (depth - 4
) % 4 == 0, 'Depth not compatible with recurrent architectue.'
self.iters = (depth - 4) // 4
self.in_planes = int(width * 64)
self.conv1 = nn.Conv2d(12, int(width * 64), kernel_size=3, stride=1,
padding=1, bias=False)
layers = []
for i in range(len(num_blocks)):
for _ in range(self.iters):
layers.append(self._make_layer(block, int(width * 64),
num_blocks[i], stride=1))
self.recur_block = nn.Sequential(*layers)
self.conv2 = nn.Conv2d(int(width * 64), 32, kernel_size=3, stride=1,
padding=1, bias=False)
self.conv3 = nn.Conv2d(32, 8, kernel_size=3, stride=1, padding=1,
bias=False)
self.conv4 = nn.Conv2d(8, 2, kernel_size=3, stride=1, padding=1,
bias=False)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for strd in strides:
layers.append(block(self.in_planes, planes, strd))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_3 = self.conv2.weight
primals_4 = self.conv3.weight
primals_5 = self.conv4.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Maosef/easy-to-hard
|
FFChessNet
| false
| 8,542
|
[
"MIT"
] | 44
|
711ec0965229444a6c51b1b06a4e2cad3e32d02e
|
https://github.com/Maosef/easy-to-hard/tree/711ec0965229444a6c51b1b06a4e2cad3e32d02e
|
RelPositionMultiHeadedAttention
|
import math
import numpy
import torch
import torch.nn as nn
class RelPositionMultiHeadedAttention(nn.Module):
"""Multi-Head Attention layer with relative position encoding.
This class is aquired from
https://github.com/espnet/espnet/blob/master/espnet/nets/pytorch_backend/transformer/attention.py
(Apache2.0 Licence) and modified a little by Masao-Someki
Paper: https://arxiv.org/abs/1901.02860
:param int n_head: the number of head s
:param int n_feat: the number of features
:param float dropout_rate: dropout rate
"""
def __init__(self, d_model, n_head=4, dropout=0.2):
"""Construct an RelPositionMultiHeadedAttention object."""
super(RelPositionMultiHeadedAttention, self).__init__()
self.linear_pos = nn.Linear(d_model, d_model, bias=False)
self.layer_norm = nn.LayerNorm(d_model)
assert d_model % n_head == 0
self.d_k = d_model // n_head
self.n_head = n_head
self.qkv_net = nn.Linear(d_model, d_model * 3, bias=False)
self.linear_out = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(p=dropout)
self.pos_bias_u = nn.Parameter(torch.Tensor(self.n_head, self.d_k))
self.pos_bias_v = nn.Parameter(torch.Tensor(self.n_head, self.d_k))
torch.nn.init.xavier_uniform_(self.pos_bias_u)
torch.nn.init.xavier_uniform_(self.pos_bias_v)
def rel_shift(self, x, zero_triu=False):
"""Compute relative positinal encoding.
:param torch.Tensor x: (batch, time, size)
:param bool zero_triu: return the lower triangular part of the matrix
"""
zero_pad = torch.zeros((*x.size()[:3], 1), device=x.device, dtype=x
.dtype)
x_padded = torch.cat([zero_pad, x], dim=-1)
x_padded = x_padded.view(*x.size()[:2], x.size(3) + 1, x.size(2))
x = x_padded[:, :, 1:].view_as(x)
if zero_triu:
ones = torch.ones((x.size(2), x.size(3)))
x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :]
return x
def forward_qkv(self, w, mem):
"""Transform query, key and value.
:param torch.Tensor query: (batch, time1, size)
:param torch.Tensor key: (batch, time2, size)
:param torch.Tensor value: (batch, time2, size)
:return torch.Tensor transformed query, key and value
"""
bsz = w.size(0)
if mem is not None:
cat = torch.cat([mem, w], 0)
w_heads = self.qkv_net(self.layer_norm(cat))
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
w_head_q = w_head_q[-qlen:]
else:
w_heads = self.qkv_net(self.layer_norm(w))
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
qlen = w.size(1)
klen = w_head_k.size(1)
w_head_q = w_head_q.view(bsz, qlen, self.n_head, self.d_k)
w_head_k = w_head_k.view(bsz, klen, self.n_head, self.d_k)
w_head_v = w_head_v.view(bsz, klen, self.n_head, self.d_k)
return w_head_q, w_head_k.transpose(1, 2), w_head_v.transpose(1, 2)
def forward_attention(self, value, scores, mask):
"""Compute attention context vector.
:param torch.Tensor value: (batch, time2, size)
:param torch.Tensor scores: (batch, time1, time2)
:param torch.Tensor mask: (batch, time1, time2)
:return torch.Tensor transformed `value` (batch, time2, d_model)
weighted by the attention score (batch, time1, time2)
"""
n_batch = value.size(0)
if mask is not None:
mask = mask.unsqueeze(1).eq(0)
min_value = float(numpy.finfo(torch.tensor(0, dtype=scores.
dtype).numpy().dtype).min)
scores = scores.masked_fill(mask, min_value)
self.attn = torch.softmax(scores, dim=-1).masked_fill(mask, 0.0)
else:
self.attn = torch.softmax(scores, dim=-1)
p_attn = self.dropout(self.attn)
x = torch.matmul(p_attn, value)
x = x.transpose(1, 2).contiguous().view(n_batch, -1, self.n_head *
self.d_k)
return self.linear_out(x)
def forward(self, query, pos_emb, mem=None, mask=None):
"""Compute 'Scaled Dot Product Attention' with rel. positional encoding.
:param torch.Tensor query: (batch, time1, size)
:param torch.Tensor key: (batch, time2, size)
:param torch.Tensor value: (batch, time2, size)
:param torch.Tensor pos_emb: (batch, time1, size)
:param torch.Tensor mask: (batch, time1, time2)
:param torch.nn.Dropout dropout:
:return torch.Tensor: attentined and transformed `value` (batch, time1, d_model)
weighted by the query dot key attention (batch, head, time1, time2)
"""
q, k, v = self.forward_qkv(query, mem)
n_batch_pos = pos_emb.size(0)
p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.n_head,
self.d_k)
p = p.transpose(1, 2)
q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)
q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)
matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))
matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))
matrix_bd = self.rel_shift(matrix_bd)
scores = (matrix_ac + matrix_bd) / math.sqrt(self.d_k)
return self.forward_attention(v, scores, mask)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import numpy
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp0 + tmp3
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
tl.store(out_ptr1 + (x2 + 4 * y3), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (4 + y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_add_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + 4 * x3, xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (1 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp29 = tl.load(in_ptr0 + (2 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp41 = tl.load(in_ptr0 + (3 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp1 = (4 + 4 * x0) % 5
tl.full([1], 0, tl.int64)
tmp4 = tl.full([1], 1, tl.int64)
tmp5 = tmp1 < tmp4
tmp6 = 0.0
tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype)
tmp8 = tl.where(tmp5, tmp6, tmp7)
tmp9 = tmp1 >= tmp4
tl.full([1], 5, tl.int64)
tmp12 = tl.load(in_ptr1 + (4 * ((4 + 4 * x0) // 5) + 16 * x1 + 16 * ((1 +
x0) // 5) + 64 * x2 + 64 * ((1 + x0 + 5 * x1) // 20) + (-1 + (4 + 4 *
x0) % 5)), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp13 = tl.where(tmp5, tmp8, tmp12)
tmp14 = tmp0 + tmp13
tmp15 = 1.0
tmp16 = tmp14 * tmp15
tmp18 = 4 * x0 % 5
tmp20 = tmp18 < tmp4
tmp21 = tl.where(tmp20, tmp6, tmp7)
tmp22 = tmp18 >= tmp4
tmp24 = tl.load(in_ptr1 + (4 + 4 * (4 * x0 // 5) + 16 * x1 + 16 * ((5 +
4 * x0) // 20) + 64 * x2 + 64 * ((5 + 4 * x0 + 20 * x1) // 80) + (-
1 + 4 * x0 % 5)), tmp22 & xmask, eviction_policy='evict_last',
other=0.0)
tmp25 = tl.where(tmp20, tmp21, tmp24)
tmp26 = tmp17 + tmp25
tmp27 = tmp26 * tmp15
tmp28 = triton_helpers.maximum(tmp16, tmp27)
tmp30 = (6 + 4 * x0) % 5
tmp32 = tmp30 < tmp4
tmp33 = tl.where(tmp32, tmp6, tmp7)
tmp34 = tmp30 >= tmp4
tmp36 = tl.load(in_ptr1 + (4 * ((6 + 4 * x0) // 5) + 16 * x1 + 16 * ((3 +
2 * x0) // 10) + 64 * x2 + 64 * ((3 + 2 * x0 + 10 * x1) // 40) + (-
1 + (6 + 4 * x0) % 5)), tmp34 & xmask, eviction_policy='evict_last',
other=0.0)
tmp37 = tl.where(tmp32, tmp33, tmp36)
tmp38 = tmp29 + tmp37
tmp39 = tmp38 * tmp15
tmp40 = triton_helpers.maximum(tmp28, tmp39)
tmp42 = (7 + 4 * x0) % 5
tmp44 = tmp42 < tmp4
tmp45 = tl.where(tmp44, tmp6, tmp7)
tmp46 = tmp42 >= tmp4
tmp48 = tl.load(in_ptr1 + (4 * ((7 + 4 * x0) // 5) + 16 * x1 + 16 * ((7 +
4 * x0) // 20) + 64 * x2 + 64 * ((7 + 4 * x0 + 20 * x1) // 80) + (-
1 + (7 + 4 * x0) % 5)), tmp46 & xmask, eviction_policy='evict_last',
other=0.0)
tmp49 = tl.where(tmp44, tmp45, tmp48)
tmp50 = tmp41 + tmp49
tmp51 = tmp50 * tmp15
tmp52 = triton_helpers.maximum(tmp40, tmp51)
tmp53 = tmp16 - tmp52
tmp54 = tmp53 * tmp15
tmp55 = tl_math.exp(tmp54)
tmp56 = tmp27 - tmp52
tmp57 = tmp56 * tmp15
tmp58 = tl_math.exp(tmp57)
tmp59 = tmp55 + tmp58
tmp60 = tmp39 - tmp52
tmp61 = tmp60 * tmp15
tmp62 = tl_math.exp(tmp61)
tmp63 = tmp59 + tmp62
tmp64 = tmp51 - tmp52
tmp65 = tmp64 * tmp15
tmp66 = tl_math.exp(tmp65)
tmp67 = tmp63 + tmp66
tl.store(out_ptr0 + x3, tmp52, xmask)
tl.store(out_ptr1 + x3, tmp67, xmask)
@triton.jit
def triton_poi_fused__softmax_add_6(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x6 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp17 = tl.load(in_ptr1 + x6, xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr2 + x6, xmask, eviction_policy='evict_last')
tmp1 = (4 + x0 + 4 * x1) % 5
tl.full([1], 0, tl.int64)
tmp4 = tl.full([1], 1, tl.int64)
tmp5 = tmp1 < tmp4
tmp6 = 0.0
tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype)
tmp8 = tl.where(tmp5, tmp6, tmp7)
tmp9 = tmp1 >= tmp4
tl.full([1], 5, tl.int64)
tmp12 = tl.load(in_ptr0 + (4 * ((4 + x0 + 4 * x1) // 5) + 16 * x2 + 16 *
((4 + x0 + 4 * x1) // 20) + 64 * x3 + 64 * ((4 + x0 + 4 * x1 + 20 *
x2) // 80) + (-1 + (4 + x0 + 4 * x1) % 5)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp13 = tl.where(tmp5, tmp8, tmp12)
tmp14 = tmp0 + tmp13
tmp15 = 1.0
tmp16 = tmp14 * tmp15
tmp18 = tmp16 - tmp17
tmp19 = tmp18 * tmp15
tmp20 = tl_math.exp(tmp19)
tmp22 = tmp20 / tmp21
tl.store(in_out_ptr0 + x4, tmp22, xmask)
@triton.jit
def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (8 + y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, 1), (1, 1))
assert_size_stride(primals_8, (4, 1), (1, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(16)](primals_1, buf0,
buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(64)](primals_1, buf0,
buf1, primals_2, primals_3, buf2, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf0
del buf1
del primals_2
del primals_3
buf3 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 12), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_5, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4)
del primals_6
buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_2[grid(16, 4)](buf3, primals_7, primals_8,
buf5, buf8, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_7
del primals_8
buf6 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32)
triton_poi_fused_clone_3[grid(16, 4)](buf3, buf6, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf7 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf6, (16, 1, 4), (4, 0, 1), 0), out=buf7)
buf9 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32)
triton_poi_fused_clone_4[grid(16, 4)](buf4, buf9, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf10 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf9, (16, 1, 4), (4, 0, 1), 0), out=buf10)
buf11 = reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf4
buf12 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_add_5[grid(64)](buf7, buf10, buf11, buf12,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf13 = reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf7
triton_poi_fused__softmax_add_6[grid(256)](buf13, buf10, buf11,
buf12, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf10
buf14 = reinterpret_tensor(buf12, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf12
triton_poi_fused_clone_7[grid(16, 4)](buf3, buf14, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
del buf3
buf15 = reinterpret_tensor(buf11, (16, 4, 1), (4, 1, 1), 0)
del buf11
extern_kernels.bmm(reinterpret_tensor(buf13, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf14, (16, 4, 1), (4, 1, 0), 0), out=buf15)
buf16 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_4[grid(16, 4)](buf15, buf16, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf17 = reinterpret_tensor(buf15, (16, 4), (4, 1), 0)
del buf15
extern_kernels.addmm(primals_10, reinterpret_tensor(buf16, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf17)
del primals_10
return reinterpret_tensor(buf17, (4, 4, 4), (16, 4, 1), 0
), buf13, primals_1, reinterpret_tensor(buf2, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_5, (16, 4), (4, 1), 0
), buf13, reinterpret_tensor(buf16, (16, 4), (4, 1), 0
), primals_9, reinterpret_tensor(buf14, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 4), 0
), reinterpret_tensor(buf5, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 4), 0), primals_4
class RelPositionMultiHeadedAttentionNew(nn.Module):
"""Multi-Head Attention layer with relative position encoding.
This class is aquired from
https://github.com/espnet/espnet/blob/master/espnet/nets/pytorch_backend/transformer/attention.py
(Apache2.0 Licence) and modified a little by Masao-Someki
Paper: https://arxiv.org/abs/1901.02860
:param int n_head: the number of head s
:param int n_feat: the number of features
:param float dropout_rate: dropout rate
"""
def __init__(self, d_model, n_head=4, dropout=0.2):
"""Construct an RelPositionMultiHeadedAttention object."""
super(RelPositionMultiHeadedAttentionNew, self).__init__()
self.linear_pos = nn.Linear(d_model, d_model, bias=False)
self.layer_norm = nn.LayerNorm(d_model)
assert d_model % n_head == 0
self.d_k = d_model // n_head
self.n_head = n_head
self.qkv_net = nn.Linear(d_model, d_model * 3, bias=False)
self.linear_out = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(p=dropout)
self.pos_bias_u = nn.Parameter(torch.Tensor(self.n_head, self.d_k))
self.pos_bias_v = nn.Parameter(torch.Tensor(self.n_head, self.d_k))
torch.nn.init.xavier_uniform_(self.pos_bias_u)
torch.nn.init.xavier_uniform_(self.pos_bias_v)
def rel_shift(self, x, zero_triu=False):
"""Compute relative positinal encoding.
:param torch.Tensor x: (batch, time, size)
:param bool zero_triu: return the lower triangular part of the matrix
"""
zero_pad = torch.zeros((*x.size()[:3], 1), device=x.device, dtype=x
.dtype)
x_padded = torch.cat([zero_pad, x], dim=-1)
x_padded = x_padded.view(*x.size()[:2], x.size(3) + 1, x.size(2))
x = x_padded[:, :, 1:].view_as(x)
if zero_triu:
ones = torch.ones((x.size(2), x.size(3)))
x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :]
return x
def forward_qkv(self, w, mem):
"""Transform query, key and value.
:param torch.Tensor query: (batch, time1, size)
:param torch.Tensor key: (batch, time2, size)
:param torch.Tensor value: (batch, time2, size)
:return torch.Tensor transformed query, key and value
"""
bsz = w.size(0)
if mem is not None:
cat = torch.cat([mem, w], 0)
w_heads = self.qkv_net(self.layer_norm(cat))
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
w_head_q = w_head_q[-qlen:]
else:
w_heads = self.qkv_net(self.layer_norm(w))
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
qlen = w.size(1)
klen = w_head_k.size(1)
w_head_q = w_head_q.view(bsz, qlen, self.n_head, self.d_k)
w_head_k = w_head_k.view(bsz, klen, self.n_head, self.d_k)
w_head_v = w_head_v.view(bsz, klen, self.n_head, self.d_k)
return w_head_q, w_head_k.transpose(1, 2), w_head_v.transpose(1, 2)
def forward_attention(self, value, scores, mask):
"""Compute attention context vector.
:param torch.Tensor value: (batch, time2, size)
:param torch.Tensor scores: (batch, time1, time2)
:param torch.Tensor mask: (batch, time1, time2)
:return torch.Tensor transformed `value` (batch, time2, d_model)
weighted by the attention score (batch, time1, time2)
"""
n_batch = value.size(0)
if mask is not None:
mask = mask.unsqueeze(1).eq(0)
min_value = float(numpy.finfo(torch.tensor(0, dtype=scores.
dtype).numpy().dtype).min)
scores = scores.masked_fill(mask, min_value)
self.attn = torch.softmax(scores, dim=-1).masked_fill(mask, 0.0)
else:
self.attn = torch.softmax(scores, dim=-1)
p_attn = self.dropout(self.attn)
x = torch.matmul(p_attn, value)
x = x.transpose(1, 2).contiguous().view(n_batch, -1, self.n_head *
self.d_k)
return self.linear_out(x)
def forward(self, input_0, input_1):
primals_7 = self.pos_bias_u
primals_8 = self.pos_bias_v
primals_6 = self.linear_pos.weight
primals_2 = self.layer_norm.weight
primals_3 = self.layer_norm.bias
primals_4 = self.qkv_net.weight
primals_9 = self.linear_out.weight
primals_10 = self.linear_out.bias
primals_1 = input_0
primals_5 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
|
Masao-Someki/Conformer
|
RelPositionMultiHeadedAttention
| false
| 8,543
|
[
"MIT"
] | 18
|
866da9ae05a6d07304775c592caac8d516f67c92
|
https://github.com/Masao-Someki/Conformer/tree/866da9ae05a6d07304775c592caac8d516f67c92
|
SuperPointNet
|
import torch
class SuperPointNet(torch.nn.Module):
""" Pytorch definition of SuperPoint Network. """
def __init__(self):
super(SuperPointNet, self).__init__()
self.relu = torch.nn.ReLU(inplace=True)
self.pool = torch.nn.MaxPool2d(kernel_size=2, stride=2)
self.numberOfClasses = 1
c1, c2, c3, c4, c5, d1 = 64, 64, 128, 128, 256, 256
self.conv1a = torch.nn.Conv2d(1, c1, kernel_size=3, stride=1, padding=1
)
self.conv1b = torch.nn.Conv2d(c1, c1, kernel_size=3, stride=1,
padding=1)
self.conv2a = torch.nn.Conv2d(c1, c2, kernel_size=3, stride=1,
padding=1)
self.conv2b = torch.nn.Conv2d(c2, c2, kernel_size=3, stride=1,
padding=1)
self.conv3a = torch.nn.Conv2d(c2, c3, kernel_size=3, stride=1,
padding=1)
self.conv3b = torch.nn.Conv2d(c3, c3, kernel_size=3, stride=1,
padding=1)
self.conv4a = torch.nn.Conv2d(c3, c4, kernel_size=3, stride=1,
padding=1)
self.conv4b = torch.nn.Conv2d(c4, c4, kernel_size=3, stride=1,
padding=1)
self.convPa = torch.nn.Conv2d(c4, c5, kernel_size=3, stride=1,
padding=1)
self.convPb = torch.nn.Conv2d(c5, 65, kernel_size=1, stride=1,
padding=0)
self.convDa = torch.nn.Conv2d(c4, c5, kernel_size=3, stride=1,
padding=1)
self.convDb = torch.nn.Conv2d(c5, d1, kernel_size=1, stride=1,
padding=0)
def forward(self, x):
""" Forward pass that jointly computes unprocessed point and descriptor
tensors.
Input
x: Image pytorch tensor shaped N x 1 x H x W.
Output
semi: Output point pytorch tensor shaped N x 65 x H/8 x W/8.git c
desc: Output descriptor pytorch tensor shaped N x 256 x H/8 x W/8.
"""
x = self.relu(self.conv1a(x))
x = self.relu(self.conv1b(x))
x = self.pool(x)
x = self.relu(self.conv2a(x))
x = self.relu(self.conv2b(x))
x = self.pool(x)
x = self.relu(self.conv3a(x))
x = self.relu(self.conv3b(x))
x = self.pool(x)
x = self.relu(self.conv4a(x))
x = self.relu(self.conv4b(x))
cPa = self.relu(self.convPa(x))
semi = self.convPb(cPa)
cDa = self.relu(self.convDa(x))
desc = self.convDb(cDa)
dn = torch.norm(desc, p=2, dim=1)
desc = desc.div(torch.unsqueeze(dn, 1))
return semi, desc
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (y0 + 64 * x2 + 262144 * y1), tmp4, ymask)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_6(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 64
x1 = xindex // 64 % 32
x2 = xindex // 2048
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 8192 * x2), None)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 8192 * x2), None)
tmp3 = tl.load(in_ptr0 + (4096 + x0 + 128 * x1 + 8192 * x2), None)
tmp5 = tl.load(in_ptr0 + (4160 + x0 + 128 * x1 + 8192 * x2), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, None)
tl.store(out_ptr1 + x3, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 64
x1 = xindex // 64 % 16
x2 = xindex // 1024
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 4096 * x2), None)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 4096 * x2), None)
tmp3 = tl.load(in_ptr0 + (2048 + x0 + 128 * x1 + 4096 * x2), None)
tmp5 = tl.load(in_ptr0 + (2112 + x0 + 128 * x1 + 4096 * x2), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, None)
tl.store(out_ptr1 + x3, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_10(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 128
x1 = xindex // 128 % 8
x2 = xindex // 1024
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1 + 4096 * x2), None)
tmp1 = tl.load(in_ptr0 + (128 + x0 + 256 * x1 + 4096 * x2), None)
tmp3 = tl.load(in_ptr0 + (2048 + x0 + 256 * x1 + 4096 * x2), None)
tmp5 = tl.load(in_ptr0 + (2176 + x0 + 256 * x1 + 4096 * x2), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, None)
tl.store(out_ptr1 + x3, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_12(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_13(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 260
xnumel = 64
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 65
y1 = yindex // 65
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 65 * x2 + 4160 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 64 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_per_fused_convolution_linalg_vector_norm_14(in_out_ptr0,
in_out_ptr1, in_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (r1 + 256 * x0), None)
tmp1 = tl.load(in_ptr0 + r1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = libdevice.sqrt(tmp6)
tl.store(in_out_ptr0 + (r1 + 256 * x0), tmp2, None)
tl.debug_barrier()
tl.store(in_out_ptr1 + x0, tmp7, None)
@triton.jit
def triton_poi_fused_div_15(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
xnumel = 64
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 256
y1 = yindex // 256
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 256 * x2 + 16384 * y1), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2 + 64 * y1), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 / tmp1
tl.store(out_ptr0 + (x2 + 64 * y3), tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25) = args
args.clear()
assert_size_stride(primals_1, (64, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_9, (64,), (1,))
assert_size_stride(primals_10, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_11, (128,), (1,))
assert_size_stride(primals_12, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_13, (128,), (1,))
assert_size_stride(primals_14, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_15, (128,), (1,))
assert_size_stride(primals_16, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_17, (128,), (1,))
assert_size_stride(primals_18, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_19, (256,), (1,))
assert_size_stride(primals_20, (65, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_21, (65,), (1,))
assert_size_stride(primals_22, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_23, (256,), (1,))
assert_size_stride(primals_24, (256, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_25, (256,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch.
float32)
get_raw_stream(0)
triton_poi_fused_0[grid(4096, 9)](primals_4, buf0, 4096, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf1 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch.
float32)
triton_poi_fused_0[grid(4096, 9)](primals_6, buf1, 4096, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf2 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch.
float32)
triton_poi_fused_0[grid(4096, 9)](primals_8, buf2, 4096, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_8
buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch
.float32)
triton_poi_fused_1[grid(8192, 9)](primals_10, buf3, 8192, 9, XBLOCK
=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_10
buf4 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_2[grid(16384, 9)](primals_12, buf4, 16384, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_12
buf5 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_2[grid(16384, 9)](primals_14, buf5, 16384, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_14
buf6 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_2[grid(16384, 9)](primals_16, buf6, 16384, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_16
buf7 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_3[grid(32768, 9)](primals_18, buf7, 32768, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_18
buf8 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_3[grid(32768, 9)](primals_22, buf8, 32768, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_22
buf9 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf10 = empty_strided_cuda((4, 64, 64, 64), (262144, 1, 4096, 64),
torch.float32)
triton_poi_fused_convolution_relu_4[grid(256, 4096)](buf9,
primals_2, buf10, 256, 4096, XBLOCK=16, YBLOCK=256, num_warps=8,
num_stages=1)
del buf9
del primals_2
buf11 = extern_kernels.convolution(buf10, buf0, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 64, 64, 64), (262144, 1, 4096, 64))
buf12 = buf11
del buf11
triton_poi_fused_convolution_relu_5[grid(1048576)](buf12, primals_5,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf13 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64),
torch.float32)
buf14 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_6[grid(262144)](buf12,
buf13, buf14, 262144, XBLOCK=512, num_warps=8, num_stages=1)
buf15 = extern_kernels.convolution(buf13, buf1, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 64, 32, 32), (65536, 1, 2048, 64))
buf16 = buf15
del buf15
triton_poi_fused_convolution_relu_7[grid(262144)](buf16, primals_7,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_7
buf17 = extern_kernels.convolution(buf16, buf2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 64, 32, 32), (65536, 1, 2048, 64))
buf18 = buf17
del buf17
triton_poi_fused_convolution_relu_7[grid(262144)](buf18, primals_9,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_9
buf19 = empty_strided_cuda((4, 64, 16, 16), (16384, 1, 1024, 64),
torch.float32)
buf20 = empty_strided_cuda((4, 64, 16, 16), (16384, 1, 1024, 64),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_8[grid(65536)](buf18,
buf19, buf20, 65536, XBLOCK=512, num_warps=4, num_stages=1)
buf21 = extern_kernels.convolution(buf19, buf3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf21, (4, 128, 16, 16), (32768, 1, 2048, 128))
buf22 = buf21
del buf21
triton_poi_fused_convolution_relu_9[grid(131072)](buf22, primals_11,
131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_11
buf23 = extern_kernels.convolution(buf22, buf4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf23, (4, 128, 16, 16), (32768, 1, 2048, 128))
buf24 = buf23
del buf23
triton_poi_fused_convolution_relu_9[grid(131072)](buf24, primals_13,
131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_13
buf25 = empty_strided_cuda((4, 128, 8, 8), (8192, 1, 1024, 128),
torch.float32)
buf26 = empty_strided_cuda((4, 128, 8, 8), (8192, 1, 1024, 128),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_10[grid(32768)](buf24,
buf25, buf26, 32768, XBLOCK=256, num_warps=4, num_stages=1)
buf27 = extern_kernels.convolution(buf25, buf5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf27, (4, 128, 8, 8), (8192, 1, 1024, 128))
buf28 = buf27
del buf27
triton_poi_fused_convolution_relu_11[grid(32768)](buf28, primals_15,
32768, XBLOCK=128, num_warps=4, num_stages=1)
del primals_15
buf29 = extern_kernels.convolution(buf28, buf6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf29, (4, 128, 8, 8), (8192, 1, 1024, 128))
buf30 = buf29
del buf29
triton_poi_fused_convolution_relu_11[grid(32768)](buf30, primals_17,
32768, XBLOCK=128, num_warps=4, num_stages=1)
del primals_17
buf31 = extern_kernels.convolution(buf30, buf7, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf31, (4, 256, 8, 8), (16384, 1, 2048, 256))
buf32 = buf31
del buf31
triton_poi_fused_convolution_relu_12[grid(65536)](buf32, primals_19,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_19
buf33 = extern_kernels.convolution(buf32, primals_20, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf33, (4, 65, 8, 8), (4160, 1, 520, 65))
buf34 = empty_strided_cuda((4, 65, 8, 8), (4160, 64, 8, 1), torch.
float32)
triton_poi_fused_convolution_13[grid(260, 64)](buf33, primals_21,
buf34, 260, 64, XBLOCK=64, YBLOCK=4, num_warps=4, num_stages=1)
del buf33
del primals_21
buf35 = extern_kernels.convolution(buf30, buf8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf35, (4, 256, 8, 8), (16384, 1, 2048, 256))
buf36 = buf35
del buf35
triton_poi_fused_convolution_relu_12[grid(65536)](buf36, primals_23,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_23
buf37 = extern_kernels.convolution(buf36, primals_24, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf37, (4, 256, 8, 8), (16384, 1, 2048, 256))
buf38 = buf37
del buf37
buf39 = empty_strided_cuda((4, 8, 8), (64, 8, 1), torch.float32)
buf40 = buf39
del buf39
triton_per_fused_convolution_linalg_vector_norm_14[grid(256)](buf38,
buf40, primals_25, 256, 256, num_warps=2, num_stages=1)
del primals_25
buf41 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.float32)
triton_poi_fused_div_15[grid(1024, 64)](buf38, buf40, buf41, 1024,
64, XBLOCK=64, YBLOCK=8, num_warps=4, num_stages=1)
return (buf34, buf41, primals_1, primals_3, buf0, buf1, buf2, buf3,
buf4, buf5, buf6, buf7, primals_20, buf8, primals_24, buf10, buf12,
buf13, buf14, buf16, buf18, buf19, buf20, buf22, buf24, buf25,
buf26, buf28, buf30, buf32, buf36, buf38, reinterpret_tensor(buf40,
(4, 1, 8, 8), (64, 64, 8, 1), 0))
class SuperPointNetNew(torch.nn.Module):
""" Pytorch definition of SuperPoint Network. """
def __init__(self):
super(SuperPointNetNew, self).__init__()
self.relu = torch.nn.ReLU(inplace=True)
self.pool = torch.nn.MaxPool2d(kernel_size=2, stride=2)
self.numberOfClasses = 1
c1, c2, c3, c4, c5, d1 = 64, 64, 128, 128, 256, 256
self.conv1a = torch.nn.Conv2d(1, c1, kernel_size=3, stride=1, padding=1
)
self.conv1b = torch.nn.Conv2d(c1, c1, kernel_size=3, stride=1,
padding=1)
self.conv2a = torch.nn.Conv2d(c1, c2, kernel_size=3, stride=1,
padding=1)
self.conv2b = torch.nn.Conv2d(c2, c2, kernel_size=3, stride=1,
padding=1)
self.conv3a = torch.nn.Conv2d(c2, c3, kernel_size=3, stride=1,
padding=1)
self.conv3b = torch.nn.Conv2d(c3, c3, kernel_size=3, stride=1,
padding=1)
self.conv4a = torch.nn.Conv2d(c3, c4, kernel_size=3, stride=1,
padding=1)
self.conv4b = torch.nn.Conv2d(c4, c4, kernel_size=3, stride=1,
padding=1)
self.convPa = torch.nn.Conv2d(c4, c5, kernel_size=3, stride=1,
padding=1)
self.convPb = torch.nn.Conv2d(c5, 65, kernel_size=1, stride=1,
padding=0)
self.convDa = torch.nn.Conv2d(c4, c5, kernel_size=3, stride=1,
padding=1)
self.convDb = torch.nn.Conv2d(c5, d1, kernel_size=1, stride=1,
padding=0)
def forward(self, input_0):
primals_1 = self.conv1a.weight
primals_2 = self.conv1a.bias
primals_4 = self.conv1b.weight
primals_5 = self.conv1b.bias
primals_6 = self.conv2a.weight
primals_7 = self.conv2a.bias
primals_8 = self.conv2b.weight
primals_9 = self.conv2b.bias
primals_10 = self.conv3a.weight
primals_11 = self.conv3a.bias
primals_12 = self.conv3b.weight
primals_13 = self.conv3b.bias
primals_14 = self.conv4a.weight
primals_15 = self.conv4a.bias
primals_16 = self.conv4b.weight
primals_17 = self.conv4b.bias
primals_18 = self.convPa.weight
primals_19 = self.convPa.bias
primals_20 = self.convPb.weight
primals_21 = self.convPb.bias
primals_22 = self.convDa.weight
primals_23 = self.convDa.bias
primals_24 = self.convDb.weight
primals_25 = self.convDb.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25])
return output[0], output[1]
|
MamonaAwan/UnsupervisedLandmarks
|
SuperPointNet
| false
| 8,544
|
[
"MIT"
] | 26
|
89180755b891fd28e0199560d628dc8b0d2b3e68
|
https://github.com/MamonaAwan/UnsupervisedLandmarks/tree/89180755b891fd28e0199560d628dc8b0d2b3e68
|
TonemappedRelativeMSE
|
import torch
def _tonemap(im):
"""Helper Reinhards tonemapper.
Args:
im(torch.Tensor): image to tonemap.
Returns:
(torch.Tensor) tonemaped image.
"""
im = torch.clamp(im, min=0)
return im / (1 + im)
class TonemappedRelativeMSE(torch.nn.Module):
"""Relative mean-squared error on tonemaped images.
Args:
eps(float): small number to avoid division by 0.
"""
def __init__(self, eps=0.01):
super(TonemappedRelativeMSE, self).__init__()
self.eps = eps
def forward(self, im, ref):
im = _tonemap(im)
ref = _tonemap(ref)
mse = torch.pow(im - ref, 2)
loss = mse / (torch.pow(ref, 2) + self.eps)
loss = 0.5 * torch.mean(loss)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_clamp_div_mean_mul_pow_sub_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp6 = tl.load(in_ptr1 + r0, None)
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 1.0
tmp4 = tmp2 + tmp3
tmp5 = tmp2 / tmp4
tmp7 = triton_helpers.maximum(tmp6, tmp1)
tmp8 = tmp7 + tmp3
tmp9 = tmp7 / tmp8
tmp10 = tmp5 - tmp9
tmp11 = tmp10 * tmp10
tmp12 = tmp9 * tmp9
tmp13 = 0.01
tmp14 = tmp12 + tmp13
tmp15 = tmp11 / tmp14
tmp16 = tl.broadcast_to(tmp15, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tmp19 = 256.0
tmp20 = tmp18 / tmp19
tmp21 = 0.5
tmp22 = tmp20 * tmp21
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp22, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_clamp_div_mean_mul_pow_sub_0[grid(1)](buf1,
arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
def _tonemap(im):
"""Helper Reinhards tonemapper.
Args:
im(torch.Tensor): image to tonemap.
Returns:
(torch.Tensor) tonemaped image.
"""
im = torch.clamp(im, min=0)
return im / (1 + im)
class TonemappedRelativeMSENew(torch.nn.Module):
"""Relative mean-squared error on tonemaped images.
Args:
eps(float): small number to avoid division by 0.
"""
def __init__(self, eps=0.01):
super(TonemappedRelativeMSENew, self).__init__()
self.eps = eps
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Mephisto405/WCMC-Public
|
TonemappedRelativeMSE
| false
| 8,545
|
[
"BSD-2-Clause"
] | 19
|
bd54f218d5239db84f404fbe1b465f9497bcf9e4
|
https://github.com/Mephisto405/WCMC-Public/tree/bd54f218d5239db84f404fbe1b465f9497bcf9e4
|
RelPositionMultiHeadedAttention
|
import math
import torch
from typing import Optional
from typing import Tuple
from torch import nn
class MultiHeadedAttention(nn.Module):
"""Multi-Head Attention layer.
Args:
n_head (int): The number of heads.
n_feat (int): The number of features.
dropout_rate (float): Dropout rate.
"""
def __init__(self, n_head: 'int', n_feat: 'int', dropout_rate: 'float'):
"""Construct an MultiHeadedAttention object."""
super().__init__()
assert n_feat % n_head == 0
self.d_k = n_feat // n_head
self.h = n_head
self.linear_q = nn.Linear(n_feat, n_feat)
self.linear_k = nn.Linear(n_feat, n_feat)
self.linear_v = nn.Linear(n_feat, n_feat)
self.linear_out = nn.Linear(n_feat, n_feat)
self.dropout = nn.Dropout(p=dropout_rate)
def forward_qkv(self, query: 'torch.Tensor', key: 'torch.Tensor', value:
'torch.Tensor') ->Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Transform query, key and value.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
Returns:
torch.Tensor: Transformed query tensor, size
(#batch, n_head, time1, d_k).
torch.Tensor: Transformed key tensor, size
(#batch, n_head, time2, d_k).
torch.Tensor: Transformed value tensor, size
(#batch, n_head, time2, d_k).
"""
n_batch = query.size(0)
q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)
k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)
v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)
q = q.transpose(1, 2)
k = k.transpose(1, 2)
v = v.transpose(1, 2)
return q, k, v
def forward_attention(self, value: 'torch.Tensor', scores:
'torch.Tensor', mask: 'Optional[torch.Tensor]') ->torch.Tensor:
"""Compute attention context vector.
Args:
value (torch.Tensor): Transformed value, size
(#batch, n_head, time2, d_k).
scores (torch.Tensor): Attention score, size
(#batch, n_head, time1, time2).
mask (torch.Tensor): Mask, size (#batch, 1, time2) or
(#batch, time1, time2).
Returns:
torch.Tensor: Transformed value (#batch, time1, d_model)
weighted by the attention score (#batch, time1, time2).
"""
n_batch = value.size(0)
if mask is not None:
mask = mask.unsqueeze(1).eq(0)
scores = scores.masked_fill(mask, -float('inf'))
attn = torch.softmax(scores, dim=-1).masked_fill(mask, 0.0)
else:
attn = torch.softmax(scores, dim=-1)
p_attn = self.dropout(attn)
x = torch.matmul(p_attn, value)
x = x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)
return self.linear_out(x)
def forward(self, query: 'torch.Tensor', key: 'torch.Tensor', value:
'torch.Tensor', mask: 'Optional[torch.Tensor]', pos_emb:
'torch.Tensor'=torch.empty(0)) ->torch.Tensor:
"""Compute scaled dot product attention.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
(#batch, time1, time2).
1.When applying cross attention between decoder and encoder,
the batch padding mask for input is in (#batch, 1, T) shape.
2.When applying self attention of encoder,
the mask is in (#batch, T, T) shape.
3.When applying self attention of decoder,
the mask is in (#batch, L, L) shape.
4.If the different position in decoder see different block
of the encoder, such as Mocha, the passed in mask could be
in (#batch, L, T) shape. But there is no such case in current
Wenet.
Returns:
torch.Tensor: Output tensor (#batch, time1, d_model).
"""
q, k, v = self.forward_qkv(query, key, value)
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)
return self.forward_attention(v, scores, mask)
class RelPositionMultiHeadedAttention(MultiHeadedAttention):
"""Multi-Head Attention layer with relative position encoding.
Paper: https://arxiv.org/abs/1901.02860
Args:
n_head (int): The number of heads.
n_feat (int): The number of features.
dropout_rate (float): Dropout rate.
"""
def __init__(self, n_head, n_feat, dropout_rate):
"""Construct an RelPositionMultiHeadedAttention object."""
super().__init__(n_head, n_feat, dropout_rate)
self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)
self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))
self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))
torch.nn.init.xavier_uniform_(self.pos_bias_u)
torch.nn.init.xavier_uniform_(self.pos_bias_v)
def rel_shift(self, x, zero_triu: 'bool'=False):
"""Compute relative positinal encoding.
Args:
x (torch.Tensor): Input tensor (batch, time, size).
zero_triu (bool): If true, return the lower triangular part of
the matrix.
Returns:
torch.Tensor: Output tensor.
"""
zero_pad = torch.zeros((x.size()[0], x.size()[1], x.size()[2], 1),
device=x.device, dtype=x.dtype)
x_padded = torch.cat([zero_pad, x], dim=-1)
x_padded = x_padded.view(x.size()[0], x.size()[1], x.size(3) + 1, x
.size(2))
x = x_padded[:, :, 1:].view_as(x)
if zero_triu:
ones = torch.ones((x.size(2), x.size(3)))
x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :]
return x
def forward(self, query: 'torch.Tensor', key: 'torch.Tensor', value:
'torch.Tensor', mask: 'Optional[torch.Tensor]', pos_emb: 'torch.Tensor'
):
"""Compute 'Scaled Dot Product Attention' with rel. positional encoding.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
(#batch, time1, time2).
pos_emb (torch.Tensor): Positional embedding tensor
(#batch, time2, size).
Returns:
torch.Tensor: Output tensor (#batch, time1, d_model).
"""
q, k, v = self.forward_qkv(query, key, value)
q = q.transpose(1, 2)
n_batch_pos = pos_emb.size(0)
p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)
p = p.transpose(1, 2)
q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)
q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)
matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))
matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))
scores = (matrix_ac + matrix_bd) / math.sqrt(self.d_k)
return self.forward_attention(v, scores, mask)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4,
4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'n_head': 4, 'n_feat': 4, 'dropout_rate': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
from typing import Optional
from typing import Tuple
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + y0, ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp2 + tmp5
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
tl.store(out_ptr1 + (x2 + 4 * y3), tmp6, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_eq_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_add_div_masked_fill_4(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (4 * x0 + 16 * x2), xmask, eviction_policy=
'evict_last').to(tl.int1)
tmp1 = tl.load(in_ptr1 + 4 * x3, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + 4 * x3, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * x2), xmask, eviction_policy
='evict_last').to(tl.int1)
tmp9 = tl.load(in_ptr1 + (1 + 4 * x3), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (1 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * x2), xmask,
eviction_policy='evict_last').to(tl.int1)
tmp16 = tl.load(in_ptr1 + (2 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr2 + (2 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * x2), xmask,
eviction_policy='evict_last').to(tl.int1)
tmp23 = tl.load(in_ptr1 + (3 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp24 = tl.load(in_ptr2 + (3 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp3 = tmp1 + tmp2
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp6 = float('-inf')
tmp7 = tl.where(tmp0, tmp6, tmp5)
tmp11 = tmp9 + tmp10
tmp12 = tmp11 * tmp4
tmp13 = tl.where(tmp8, tmp6, tmp12)
tmp14 = triton_helpers.maximum(tmp7, tmp13)
tmp18 = tmp16 + tmp17
tmp19 = tmp18 * tmp4
tmp20 = tl.where(tmp15, tmp6, tmp19)
tmp21 = triton_helpers.maximum(tmp14, tmp20)
tmp25 = tmp23 + tmp24
tmp26 = tmp25 * tmp4
tmp27 = tl.where(tmp22, tmp6, tmp26)
tmp28 = triton_helpers.maximum(tmp21, tmp27)
tmp29 = tmp7 - tmp28
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp13 - tmp28
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp30 + tmp32
tmp34 = tmp20 - tmp28
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp33 + tmp35
tmp37 = tmp27 - tmp28
tmp38 = tl_math.exp(tmp37)
tmp39 = tmp36 + tmp38
tl.store(out_ptr0 + x3, tmp28, xmask)
tl.store(out_ptr1 + x3, tmp39, xmask)
@triton.jit
def triton_poi_fused__softmax_add_div_masked_fill_5(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 64
x4 = xindex % 16
x5 = xindex
x6 = xindex // 4
tmp0 = tl.load(in_ptr0 + (x4 + 16 * x3), xmask, eviction_policy=
'evict_last').to(tl.int1)
tmp1 = tl.load(in_out_ptr0 + x5, xmask)
tmp2 = tl.load(in_ptr1 + x5, xmask)
tmp8 = tl.load(in_ptr2 + x6, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + x6, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp6 = float('-inf')
tmp7 = tl.where(tmp0, tmp6, tmp5)
tmp9 = tmp7 - tmp8
tmp10 = tl_math.exp(tmp9)
tmp12 = tmp10 / tmp11
tmp13 = 0.0
tmp14 = tl.where(tmp0, tmp13, tmp12)
tl.store(in_out_ptr0 + x5, tmp12, xmask)
tl.store(out_ptr0 + x5, tmp14, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_10, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4, 1), (1, 1))
assert_size_stride(primals_13, (4, 1), (1, 1))
assert_size_stride(primals_14, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_15, (4, 4), (4, 1))
assert_size_stride(primals_16, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_10, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf3)
del primals_11
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_3, primals_12,
primals_13, buf4, buf7, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1,
num_stages=1)
del primals_12
del primals_13
del primals_3
buf5 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf0
triton_poi_fused_clone_1[grid(16, 4)](buf1, primals_5, buf5, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6)
buf8 = reinterpret_tensor(buf1, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf1
triton_poi_fused_clone_2[grid(16, 4)](buf3, buf8, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf9 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf8, (16, 1, 4), (4, 0, 1), 0), out=buf9)
buf10 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.bool)
triton_poi_fused_eq_3[grid(64)](primals_14, buf10, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_14
buf11 = reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf3
buf12 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_add_div_masked_fill_4[grid(64)](buf10,
buf6, buf9, buf11, buf12, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf13 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf6
buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_add_div_masked_fill_5[grid(256)](buf13,
buf10, buf9, buf11, buf12, buf14, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf9
buf15 = reinterpret_tensor(buf12, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf12
triton_poi_fused_clone_1[grid(16, 4)](buf2, primals_8, buf15, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
del primals_8
buf16 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf14, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf15, (16, 4, 1), (4, 1, 0), 0), out=buf16)
buf17 = reinterpret_tensor(buf11, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf11
triton_poi_fused_clone_2[grid(16, 4)](buf16, buf17, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf18 = reinterpret_tensor(buf16, (16, 4), (4, 1), 0)
del buf16
extern_kernels.addmm(primals_16, reinterpret_tensor(buf17, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_15, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf18)
del primals_16
return reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_10, (16, 4), (4, 1), 0
), buf10, buf13, reinterpret_tensor(buf17, (16, 4), (4, 1), 0
), primals_15, reinterpret_tensor(buf14, (16, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf15, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf7, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 4), 0
), reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0)
class MultiHeadedAttention(nn.Module):
"""Multi-Head Attention layer.
Args:
n_head (int): The number of heads.
n_feat (int): The number of features.
dropout_rate (float): Dropout rate.
"""
def __init__(self, n_head: 'int', n_feat: 'int', dropout_rate: 'float'):
"""Construct an MultiHeadedAttention object."""
super().__init__()
assert n_feat % n_head == 0
self.d_k = n_feat // n_head
self.h = n_head
self.linear_q = nn.Linear(n_feat, n_feat)
self.linear_k = nn.Linear(n_feat, n_feat)
self.linear_v = nn.Linear(n_feat, n_feat)
self.linear_out = nn.Linear(n_feat, n_feat)
self.dropout = nn.Dropout(p=dropout_rate)
def forward_qkv(self, query: 'torch.Tensor', key: 'torch.Tensor', value:
'torch.Tensor') ->Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Transform query, key and value.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
Returns:
torch.Tensor: Transformed query tensor, size
(#batch, n_head, time1, d_k).
torch.Tensor: Transformed key tensor, size
(#batch, n_head, time2, d_k).
torch.Tensor: Transformed value tensor, size
(#batch, n_head, time2, d_k).
"""
n_batch = query.size(0)
q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)
k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)
v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)
q = q.transpose(1, 2)
k = k.transpose(1, 2)
v = v.transpose(1, 2)
return q, k, v
def forward_attention(self, value: 'torch.Tensor', scores:
'torch.Tensor', mask: 'Optional[torch.Tensor]') ->torch.Tensor:
"""Compute attention context vector.
Args:
value (torch.Tensor): Transformed value, size
(#batch, n_head, time2, d_k).
scores (torch.Tensor): Attention score, size
(#batch, n_head, time1, time2).
mask (torch.Tensor): Mask, size (#batch, 1, time2) or
(#batch, time1, time2).
Returns:
torch.Tensor: Transformed value (#batch, time1, d_model)
weighted by the attention score (#batch, time1, time2).
"""
n_batch = value.size(0)
if mask is not None:
mask = mask.unsqueeze(1).eq(0)
scores = scores.masked_fill(mask, -float('inf'))
attn = torch.softmax(scores, dim=-1).masked_fill(mask, 0.0)
else:
attn = torch.softmax(scores, dim=-1)
p_attn = self.dropout(attn)
x = torch.matmul(p_attn, value)
x = x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)
return self.linear_out(x)
def forward(self, query: 'torch.Tensor', key: 'torch.Tensor', value:
'torch.Tensor', mask: 'Optional[torch.Tensor]', pos_emb:
'torch.Tensor'=torch.empty(0)) ->torch.Tensor:
"""Compute scaled dot product attention.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
(#batch, time1, time2).
1.When applying cross attention between decoder and encoder,
the batch padding mask for input is in (#batch, 1, T) shape.
2.When applying self attention of encoder,
the mask is in (#batch, T, T) shape.
3.When applying self attention of decoder,
the mask is in (#batch, L, L) shape.
4.If the different position in decoder see different block
of the encoder, such as Mocha, the passed in mask could be
in (#batch, L, T) shape. But there is no such case in current
Wenet.
Returns:
torch.Tensor: Output tensor (#batch, time1, d_model).
"""
q, k, v = self.forward_qkv(query, key, value)
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)
return self.forward_attention(v, scores, mask)
class RelPositionMultiHeadedAttentionNew(MultiHeadedAttention):
"""Multi-Head Attention layer with relative position encoding.
Paper: https://arxiv.org/abs/1901.02860
Args:
n_head (int): The number of heads.
n_feat (int): The number of features.
dropout_rate (float): Dropout rate.
"""
def __init__(self, n_head, n_feat, dropout_rate):
"""Construct an RelPositionMultiHeadedAttention object."""
super().__init__(n_head, n_feat, dropout_rate)
self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)
self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))
self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))
torch.nn.init.xavier_uniform_(self.pos_bias_u)
torch.nn.init.xavier_uniform_(self.pos_bias_v)
def rel_shift(self, x, zero_triu: 'bool'=False):
"""Compute relative positinal encoding.
Args:
x (torch.Tensor): Input tensor (batch, time, size).
zero_triu (bool): If true, return the lower triangular part of
the matrix.
Returns:
torch.Tensor: Output tensor.
"""
zero_pad = torch.zeros((x.size()[0], x.size()[1], x.size()[2], 1),
device=x.device, dtype=x.dtype)
x_padded = torch.cat([zero_pad, x], dim=-1)
x_padded = x_padded.view(x.size()[0], x.size()[1], x.size(3) + 1, x
.size(2))
x = x_padded[:, :, 1:].view_as(x)
if zero_triu:
ones = torch.ones((x.size(2), x.size(3)))
x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :]
return x
def forward(self, input_0, input_1, input_2, input_3, input_4):
primals_12 = self.pos_bias_u
primals_13 = self.pos_bias_v
primals_2 = self.linear_q.weight
primals_3 = self.linear_q.bias
primals_4 = self.linear_k.weight
primals_5 = self.linear_k.bias
primals_7 = self.linear_v.weight
primals_8 = self.linear_v.bias
primals_11 = self.linear_out.weight
primals_16 = self.linear_out.bias
primals_15 = self.linear_pos.weight
primals_1 = input_0
primals_6 = input_1
primals_9 = input_2
primals_10 = input_3
primals_14 = input_4
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16])
return output[0]
|
Mashiro083/wenet-onnx
|
RelPositionMultiHeadedAttention
| false
| 8,546
|
[
"Apache-2.0"
] | 18
|
ae8f8451d73fa9ceac6f7738194543e83959ca86
|
https://github.com/Mashiro083/wenet-onnx/tree/ae8f8451d73fa9ceac6f7738194543e83959ca86
|
IRW_L1_Loss
|
import torch
import torch.utils.data
import torch
import torch.nn as nn
class IRW_L1_Loss(nn.Module):
def __init__(self, threshold):
super(IRW_L1_Loss, self).__init__()
self.threshold = threshold
def forward(self, x, y, beta):
beta = beta.view(len(x), 1, 1, 1)
beta = torch.nn.functional.threshold(beta, self.threshold, 0.0)
assert len(beta) == len(x)
loss = torch.mean(torch.abs(beta * x - beta * y))
return loss
def get_inputs():
return [torch.rand([4, 1, 1, 1]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 1, 1, 1])]
def get_init_inputs():
return [[], {'threshold': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.utils.data
import torch
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_mean_mul_sub_threshold_0(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r1 = rindex // 64
r2 = rindex
tmp0 = tl.load(in_ptr0 + r1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + r1, None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr2 + r2, None)
tmp1 = 4.0
tmp2 = tmp0 <= tmp1
tmp3 = 0.0
tmp4 = tl.where(tmp2, tmp3, tmp0)
tmp6 = tmp4 * tmp5
tmp8 = tmp4 * tmp7
tmp9 = tmp6 - tmp8
tmp10 = tl_math.abs(tmp9)
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 256.0
tmp15 = tmp13 / tmp14
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp15, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(arg1_1, (4, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_abs_mean_mul_sub_threshold_0[grid(1)](buf1, arg0_1,
arg1_1, arg2_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf1,
class IRW_L1_LossNew(nn.Module):
def __init__(self, threshold):
super(IRW_L1_LossNew, self).__init__()
self.threshold = threshold
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg2_1 = input_1
arg1_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
Mid-Push/IrwGAN
|
IRW_L1_Loss
| false
| 8,547
|
[
"BSD-3-Clause"
] | 31
|
f56e7274cf7de3362459549dd807b66b93dc5e89
|
https://github.com/Mid-Push/IrwGAN/tree/f56e7274cf7de3362459549dd807b66b93dc5e89
|
Attention
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data.distributed
def matmul(x, y):
if x.dim() == y.dim():
return x @ y
if x.dim() == y.dim() - 1:
return (x.unsqueeze(-2) @ y).squeeze(-2)
return (x @ y.unsqueeze(-2)).squeeze(-2)
class Attention(nn.Module):
def __init__(self, d_key, drop_ratio, causal):
super().__init__()
self.scale = math.sqrt(d_key)
self.dropout = nn.Dropout(drop_ratio)
self.causal = causal
def forward(self, query, key, value):
dot_products = matmul(query, key.transpose(1, 2))
if query.dim() == 3 and (self is None or self.causal):
tri = torch.ones(key.size(1), key.size(1)).triu(1) * INF
if key.is_cuda:
tri = tri
dot_products.data.sub_(tri.unsqueeze(0))
return matmul(self.dropout(F.softmax(dot_products / self.scale, dim
=2)), value)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_key': 4, 'drop_ratio': 0.5, 'causal': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import torch.nn as nn
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x3, tmp17, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), out
=buf1)
del arg1_1
buf2 = buf0
del buf0
triton_poi_fused__softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
triton_poi_fused__softmax_2[grid(256)](buf2, buf3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf4
)
del arg2_1
del buf3
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0),
def matmul(x, y):
if x.dim() == y.dim():
return x @ y
if x.dim() == y.dim() - 1:
return (x.unsqueeze(-2) @ y).squeeze(-2)
return (x @ y.unsqueeze(-2)).squeeze(-2)
class AttentionNew(nn.Module):
def __init__(self, d_key, drop_ratio, causal):
super().__init__()
self.scale = math.sqrt(d_key)
self.dropout = nn.Dropout(drop_ratio)
self.causal = causal
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
MichiganCOG/Video-Grounding
|
Attention
| false
| 8,548
|
[
"MIT"
] | 41
|
3e0ec0b69578a59be583911590354fe77d357cab
|
https://github.com/MichiganCOG/Video-Grounding/tree/3e0ec0b69578a59be583911590354fe77d357cab
|
MultiHead
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data.distributed
def matmul(x, y):
if x.dim() == y.dim():
return x @ y
if x.dim() == y.dim() - 1:
return (x.unsqueeze(-2) @ y).squeeze(-2)
return (x @ y.unsqueeze(-2)).squeeze(-2)
class Linear(nn.Linear):
def forward(self, x):
size = x.size()
return super().forward(x.contiguous().view(-1, size[-1])).view(*
size[:-1], -1)
class Attention(nn.Module):
def __init__(self, d_key, drop_ratio, causal):
super().__init__()
self.scale = math.sqrt(d_key)
self.dropout = nn.Dropout(drop_ratio)
self.causal = causal
def forward(self, query, key, value):
dot_products = matmul(query, key.transpose(1, 2))
if query.dim() == 3 and (self is None or self.causal):
tri = torch.ones(key.size(1), key.size(1)).triu(1) * INF
if key.is_cuda:
tri = tri
dot_products.data.sub_(tri.unsqueeze(0))
return matmul(self.dropout(F.softmax(dot_products / self.scale, dim
=2)), value)
class MultiHead(nn.Module):
def __init__(self, d_key, d_value, n_heads, drop_ratio, causal=False):
super().__init__()
self.attention = Attention(d_key, drop_ratio, causal=causal)
self.wq = Linear(d_key, d_key, bias=False)
self.wk = Linear(d_key, d_key, bias=False)
self.wv = Linear(d_value, d_value, bias=False)
self.wo = Linear(d_value, d_key, bias=False)
self.n_heads = n_heads
def forward(self, query, key, value):
query, key, value = self.wq(query), self.wk(key), self.wv(value)
query, key, value = (x.chunk(self.n_heads, -1) for x in (query, key,
value))
return self.wo(torch.cat([self.attention(q, k, v) for q, k, v in
zip(query, key, value)], -1))
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'d_key': 4, 'd_value': 4, 'n_heads': 4, 'drop_ratio': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x2, tmp17, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + x1, tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + x1, tmp14 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tl.full([1], 4, tl.int64)
tmp19 = tl.load(in_ptr3 + x1, tmp16 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tl.store(out_ptr0 + x2, tmp22, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_5, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1),
0), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf3, buf4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf5 = buf3
del buf3
triton_poi_fused__softmax_1[grid(64)](buf4, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf5, reinterpret_tensor(buf2, (4, 4, 1), (16, 4,
1), 0), out=buf6)
buf7 = buf4
del buf4
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1),
1), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 1), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_0[grid(64)](buf7, buf8, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf9 = buf7
del buf7
triton_poi_fused__softmax_1[grid(64)](buf8, buf9, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf9, reinterpret_tensor(buf2, (4, 4, 1), (16, 4,
1), 1), out=buf10)
buf11 = buf8
del buf8
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1),
2), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 2), out=buf11)
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_0[grid(64)](buf11, buf12, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf13 = buf11
del buf11
triton_poi_fused__softmax_1[grid(64)](buf12, buf13, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf13, reinterpret_tensor(buf2, (4, 4, 1), (16,
4, 1), 2), out=buf14)
buf15 = buf12
del buf12
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1),
3), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 3), out=buf15)
buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_0[grid(64)](buf15, buf16, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf17 = buf15
del buf15
triton_poi_fused__softmax_1[grid(64)](buf16, buf17, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf18 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf17, reinterpret_tensor(buf2, (4, 4, 1), (16,
4, 1), 3), out=buf18)
buf19 = buf16
del buf16
triton_poi_fused_cat_2[grid(64)](buf6, buf10, buf14, buf18, buf19,
64, XBLOCK=64, num_warps=1, num_stages=1)
del buf10
del buf14
del buf18
del buf6
buf20 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf19, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf20)
return reinterpret_tensor(buf20, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_5, (16, 4), (4, 1), 0
), buf5, buf9, buf13, buf17, reinterpret_tensor(buf19, (16, 4), (4,
1), 0), primals_7, reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 3
), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 3
), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 3
), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 2
), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 2
), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 2
), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 1
), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 1
), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 1
), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 0
), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 0
), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 0)
def matmul(x, y):
if x.dim() == y.dim():
return x @ y
if x.dim() == y.dim() - 1:
return (x.unsqueeze(-2) @ y).squeeze(-2)
return (x @ y.unsqueeze(-2)).squeeze(-2)
class Linear(nn.Linear):
def forward(self, x):
size = x.size()
return super().forward(x.contiguous().view(-1, size[-1])).view(*
size[:-1], -1)
class Attention(nn.Module):
def __init__(self, d_key, drop_ratio, causal):
super().__init__()
self.scale = math.sqrt(d_key)
self.dropout = nn.Dropout(drop_ratio)
self.causal = causal
def forward(self, query, key, value):
dot_products = matmul(query, key.transpose(1, 2))
if query.dim() == 3 and (self is None or self.causal):
tri = torch.ones(key.size(1), key.size(1)).triu(1) * INF
if key.is_cuda:
tri = tri
dot_products.data.sub_(tri.unsqueeze(0))
return matmul(self.dropout(F.softmax(dot_products / self.scale, dim
=2)), value)
class MultiHeadNew(nn.Module):
def __init__(self, d_key, d_value, n_heads, drop_ratio, causal=False):
super().__init__()
self.attention = Attention(d_key, drop_ratio, causal=causal)
self.wq = Linear(d_key, d_key, bias=False)
self.wk = Linear(d_key, d_key, bias=False)
self.wv = Linear(d_value, d_value, bias=False)
self.wo = Linear(d_value, d_key, bias=False)
self.n_heads = n_heads
def forward(self, input_0, input_1, input_2):
primals_2 = self.wq.weight
primals_4 = self.wk.weight
primals_6 = self.wv.weight
primals_7 = self.wo.weight
primals_1 = input_0
primals_3 = input_1
primals_5 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
MichiganCOG/Video-Grounding
|
MultiHead
| false
| 8,549
|
[
"MIT"
] | 41
|
3e0ec0b69578a59be583911590354fe77d357cab
|
https://github.com/MichiganCOG/Video-Grounding/tree/3e0ec0b69578a59be583911590354fe77d357cab
|
FeedForward
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data.distributed
class Linear(nn.Linear):
def forward(self, x):
size = x.size()
return super().forward(x.contiguous().view(-1, size[-1])).view(*
size[:-1], -1)
class FeedForward(nn.Module):
def __init__(self, d_model, d_hidden):
super().__init__()
self.linear1 = Linear(d_model, d_hidden)
self.linear2 = Linear(d_hidden, d_model)
def forward(self, x):
return self.linear2(F.relu(self.linear1(x)))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'd_hidden': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_3, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4, buf3
class Linear(nn.Linear):
def forward(self, x):
size = x.size()
return super().forward(x.contiguous().view(-1, size[-1])).view(*
size[:-1], -1)
class FeedForwardNew(nn.Module):
def __init__(self, d_model, d_hidden):
super().__init__()
self.linear1 = Linear(d_model, d_hidden)
self.linear2 = Linear(d_hidden, d_model)
def forward(self, input_0):
primals_2 = self.linear1.weight
primals_3 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
MichiganCOG/Video-Grounding
|
FeedForward
| false
| 8,550
|
[
"MIT"
] | 41
|
3e0ec0b69578a59be583911590354fe77d357cab
|
https://github.com/MichiganCOG/Video-Grounding/tree/3e0ec0b69578a59be583911590354fe77d357cab
|
TonemappedMSE
|
import torch
def _tonemap(im):
"""Helper Reinhards tonemapper.
Args:
im(torch.Tensor): image to tonemap.
Returns:
(torch.Tensor) tonemaped image.
"""
im = torch.clamp(im, min=0)
return im / (1 + im)
class TonemappedMSE(torch.nn.Module):
"""Mean-squared error on tonemaped images.
Args:
eps(float): small number to avoid division by 0.
"""
def __init__(self, eps=0.01):
super(TonemappedMSE, self).__init__()
self.eps = eps
def forward(self, im, ref):
im = _tonemap(im)
ref = _tonemap(ref)
loss = torch.pow(im - ref, 2)
loss = 0.5 * torch.mean(loss)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_clamp_div_mean_mul_pow_sub_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp6 = tl.load(in_ptr1 + r0, None)
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 1.0
tmp4 = tmp2 + tmp3
tmp5 = tmp2 / tmp4
tmp7 = triton_helpers.maximum(tmp6, tmp1)
tmp8 = tmp7 + tmp3
tmp9 = tmp7 / tmp8
tmp10 = tmp5 - tmp9
tmp11 = tmp10 * tmp10
tmp12 = tl.broadcast_to(tmp11, [RBLOCK])
tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0))
tmp15 = 256.0
tmp16 = tmp14 / tmp15
tmp17 = 0.5
tmp18 = tmp16 * tmp17
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp18, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_clamp_div_mean_mul_pow_sub_0[grid(1)](buf1,
arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
def _tonemap(im):
"""Helper Reinhards tonemapper.
Args:
im(torch.Tensor): image to tonemap.
Returns:
(torch.Tensor) tonemaped image.
"""
im = torch.clamp(im, min=0)
return im / (1 + im)
class TonemappedMSENew(torch.nn.Module):
"""Mean-squared error on tonemaped images.
Args:
eps(float): small number to avoid division by 0.
"""
def __init__(self, eps=0.01):
super(TonemappedMSENew, self).__init__()
self.eps = eps
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Mephisto405/WCMC-Public
|
TonemappedMSE
| false
| 8,551
|
[
"BSD-2-Clause"
] | 19
|
bd54f218d5239db84f404fbe1b465f9497bcf9e4
|
https://github.com/Mephisto405/WCMC-Public/tree/bd54f218d5239db84f404fbe1b465f9497bcf9e4
|
EncoderLayer
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data.distributed
def matmul(x, y):
if x.dim() == y.dim():
return x @ y
if x.dim() == y.dim() - 1:
return (x.unsqueeze(-2) @ y).squeeze(-2)
return (x @ y.unsqueeze(-2)).squeeze(-2)
class Linear(nn.Linear):
def forward(self, x):
size = x.size()
return super().forward(x.contiguous().view(-1, size[-1])).view(*
size[:-1], -1)
class FeedForward(nn.Module):
def __init__(self, d_model, d_hidden):
super().__init__()
self.linear1 = Linear(d_model, d_hidden)
self.linear2 = Linear(d_hidden, d_model)
def forward(self, x):
return self.linear2(F.relu(self.linear1(x)))
class Attention(nn.Module):
def __init__(self, d_key, drop_ratio, causal):
super().__init__()
self.scale = math.sqrt(d_key)
self.dropout = nn.Dropout(drop_ratio)
self.causal = causal
def forward(self, query, key, value):
dot_products = matmul(query, key.transpose(1, 2))
if query.dim() == 3 and (self is None or self.causal):
tri = torch.ones(key.size(1), key.size(1)).triu(1) * INF
if key.is_cuda:
tri = tri
dot_products.data.sub_(tri.unsqueeze(0))
return matmul(self.dropout(F.softmax(dot_products / self.scale, dim
=2)), value)
class MultiHead(nn.Module):
def __init__(self, d_key, d_value, n_heads, drop_ratio, causal=False):
super().__init__()
self.attention = Attention(d_key, drop_ratio, causal=causal)
self.wq = Linear(d_key, d_key, bias=False)
self.wk = Linear(d_key, d_key, bias=False)
self.wv = Linear(d_value, d_value, bias=False)
self.wo = Linear(d_value, d_key, bias=False)
self.n_heads = n_heads
def forward(self, query, key, value):
query, key, value = self.wq(query), self.wk(key), self.wv(value)
query, key, value = (x.chunk(self.n_heads, -1) for x in (query, key,
value))
return self.wo(torch.cat([self.attention(q, k, v) for q, k, v in
zip(query, key, value)], -1))
class LayerNorm(nn.Module):
def __init__(self, d_model, eps=1e-06):
super().__init__()
self.gamma = nn.Parameter(torch.ones(d_model))
self.beta = nn.Parameter(torch.zeros(d_model))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.gamma * (x - mean) / (std + self.eps) + self.beta
class ResidualBlock(nn.Module):
def __init__(self, layer, d_model, drop_ratio):
super().__init__()
self.layer = layer
self.dropout = nn.Dropout(drop_ratio)
self.layernorm = LayerNorm(d_model)
def forward(self, *x):
return self.layernorm(x[0] + self.dropout(self.layer(*x)))
class EncoderLayer(nn.Module):
def __init__(self, d_model, d_hidden, n_heads, drop_ratio):
super().__init__()
self.selfattn = ResidualBlock(MultiHead(d_model, d_model, n_heads,
drop_ratio), d_model, drop_ratio)
self.feedforward = ResidualBlock(FeedForward(d_model, d_hidden),
d_model, drop_ratio)
def forward(self, x):
return self.feedforward(self.selfattn(x, x, x))
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'd_hidden': 4, 'n_heads': 4, 'drop_ratio': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x2, tmp17, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + x1, tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + x1, tmp14 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tl.full([1], 4, tl.int64)
tmp19 = tl.load(in_ptr3 + x1, tmp16 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tl.store(out_ptr0 + x2, tmp22, xmask)
@triton.jit
def triton_poi_fused_add_mean_std_3(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = 3.0
tmp29 = tmp27 / tmp28
tl.store(in_out_ptr0 + x0, tmp29, xmask)
tl.store(out_ptr0 + x0, tmp16, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_4(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x2, xmask)
tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 - tmp4
tmp6 = tmp0 * tmp5
tmp8 = libdevice.sqrt(tmp7)
tmp9 = 1e-06
tmp10 = tmp8 + tmp9
tmp11 = tmp6 / tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_5(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_7(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp0 * tmp11
tmp13 = tmp2 - tmp10
tmp14 = tmp13 * tmp13
tmp15 = tmp3 - tmp10
tmp16 = tmp15 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = tmp5 - tmp10
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp21 = tmp7 - tmp10
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp24 = 3.0
tmp25 = tmp23 / tmp24
tmp26 = libdevice.sqrt(tmp25)
tmp27 = 1e-06
tmp28 = tmp26 + tmp27
tmp29 = tmp12 / tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + x2, tmp31, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
del primals_4
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1),
0), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf3, buf4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf5 = buf3
del buf3
triton_poi_fused__softmax_1[grid(64)](buf4, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf5, reinterpret_tensor(buf2, (4, 4, 1), (16, 4,
1), 0), out=buf6)
buf7 = buf4
del buf4
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1),
1), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 1), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_0[grid(64)](buf7, buf8, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf9 = buf7
del buf7
triton_poi_fused__softmax_1[grid(64)](buf8, buf9, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf9, reinterpret_tensor(buf2, (4, 4, 1), (16, 4,
1), 1), out=buf10)
buf11 = buf8
del buf8
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1),
2), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 2), out=buf11)
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_0[grid(64)](buf11, buf12, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf13 = buf11
del buf11
triton_poi_fused__softmax_1[grid(64)](buf12, buf13, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf13, reinterpret_tensor(buf2, (4, 4, 1), (16,
4, 1), 2), out=buf14)
buf15 = buf12
del buf12
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1),
3), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 3), out=buf15)
buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_0[grid(64)](buf15, buf16, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf17 = buf15
del buf15
triton_poi_fused__softmax_1[grid(64)](buf16, buf17, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf18 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf17, reinterpret_tensor(buf2, (4, 4, 1), (16,
4, 1), 3), out=buf18)
buf19 = buf16
del buf16
triton_poi_fused_cat_2[grid(64)](buf6, buf10, buf14, buf18, buf19,
64, XBLOCK=64, num_warps=1, num_stages=1)
del buf10
del buf14
buf20 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf19, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf20)
buf21 = reinterpret_tensor(buf6, (4, 4, 1), (4, 1, 16), 0)
del buf6
buf22 = buf21
del buf21
buf23 = reinterpret_tensor(buf18, (4, 4, 1), (4, 1, 16), 0)
del buf18
triton_poi_fused_add_mean_std_3[grid(16)](buf22, primals_1, buf20,
buf23, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf24 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_std_sub_4[grid(64)](primals_6,
primals_1, buf20, buf23, buf22, primals_7, buf24, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf22
del buf23
del primals_7
buf25 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf24, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf25)
buf26 = reinterpret_tensor(buf25, (4, 4, 4), (16, 4, 1), 0)
del buf25
buf30 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_5[grid(64)](buf26,
primals_9, buf30, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_9
buf27 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf26, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf27)
buf28 = reinterpret_tensor(buf27, (4, 4, 4), (16, 4, 1), 0)
del buf27
triton_poi_fused_add_6[grid(64)](buf28, buf24, primals_11, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_11
buf29 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_std_sub_7[grid(64)](primals_12,
buf28, primals_13, buf29, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_13
return (buf29, primals_1, primals_6, primals_12, buf5, buf9, buf13,
buf17, reinterpret_tensor(buf19, (16, 4), (4, 1), 0), buf20,
reinterpret_tensor(buf24, (16, 4), (4, 1), 0), reinterpret_tensor(
buf26, (16, 4), (4, 1), 0), buf28, primals_10, buf30, primals_8,
primals_5, reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 3),
reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 3),
reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 3),
reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 2),
reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 2),
reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 2),
reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 1),
reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 1),
reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 1),
reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 0),
reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 0),
reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 0))
def matmul(x, y):
if x.dim() == y.dim():
return x @ y
if x.dim() == y.dim() - 1:
return (x.unsqueeze(-2) @ y).squeeze(-2)
return (x @ y.unsqueeze(-2)).squeeze(-2)
class Linear(nn.Linear):
def forward(self, x):
size = x.size()
return super().forward(x.contiguous().view(-1, size[-1])).view(*
size[:-1], -1)
class FeedForward(nn.Module):
def __init__(self, d_model, d_hidden):
super().__init__()
self.linear1 = Linear(d_model, d_hidden)
self.linear2 = Linear(d_hidden, d_model)
def forward(self, x):
return self.linear2(F.relu(self.linear1(x)))
class Attention(nn.Module):
def __init__(self, d_key, drop_ratio, causal):
super().__init__()
self.scale = math.sqrt(d_key)
self.dropout = nn.Dropout(drop_ratio)
self.causal = causal
def forward(self, query, key, value):
dot_products = matmul(query, key.transpose(1, 2))
if query.dim() == 3 and (self is None or self.causal):
tri = torch.ones(key.size(1), key.size(1)).triu(1) * INF
if key.is_cuda:
tri = tri
dot_products.data.sub_(tri.unsqueeze(0))
return matmul(self.dropout(F.softmax(dot_products / self.scale, dim
=2)), value)
class MultiHead(nn.Module):
def __init__(self, d_key, d_value, n_heads, drop_ratio, causal=False):
super().__init__()
self.attention = Attention(d_key, drop_ratio, causal=causal)
self.wq = Linear(d_key, d_key, bias=False)
self.wk = Linear(d_key, d_key, bias=False)
self.wv = Linear(d_value, d_value, bias=False)
self.wo = Linear(d_value, d_key, bias=False)
self.n_heads = n_heads
def forward(self, query, key, value):
query, key, value = self.wq(query), self.wk(key), self.wv(value)
query, key, value = (x.chunk(self.n_heads, -1) for x in (query, key,
value))
return self.wo(torch.cat([self.attention(q, k, v) for q, k, v in
zip(query, key, value)], -1))
class LayerNorm(nn.Module):
def __init__(self, d_model, eps=1e-06):
super().__init__()
self.gamma = nn.Parameter(torch.ones(d_model))
self.beta = nn.Parameter(torch.zeros(d_model))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.gamma * (x - mean) / (std + self.eps) + self.beta
class ResidualBlock(nn.Module):
def __init__(self, layer, d_model, drop_ratio):
super().__init__()
self.layer = layer
self.dropout = nn.Dropout(drop_ratio)
self.layernorm = LayerNorm(d_model)
def forward(self, *x):
return self.layernorm(x[0] + self.dropout(self.layer(*x)))
class EncoderLayerNew(nn.Module):
def __init__(self, d_model, d_hidden, n_heads, drop_ratio):
super().__init__()
self.selfattn = ResidualBlock(MultiHead(d_model, d_model, n_heads,
drop_ratio), d_model, drop_ratio)
self.feedforward = ResidualBlock(FeedForward(d_model, d_hidden),
d_model, drop_ratio)
def forward(self, input_0):
primals_2 = self.selfattn.layer.wq.weight
primals_3 = self.selfattn.layer.wk.weight
primals_4 = self.selfattn.layer.wv.weight
primals_5 = self.selfattn.layer.wo.weight
primals_6 = self.selfattn.layernorm.gamma
primals_7 = self.selfattn.layernorm.beta
primals_8 = self.feedforward.layer.linear1.weight
primals_9 = self.feedforward.layer.linear1.bias
primals_10 = self.feedforward.layer.linear2.weight
primals_11 = self.feedforward.layer.linear2.bias
primals_12 = self.feedforward.layernorm.gamma
primals_13 = self.feedforward.layernorm.beta
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
|
MichiganCOG/Video-Grounding
|
EncoderLayer
| false
| 8,552
|
[
"MIT"
] | 41
|
3e0ec0b69578a59be583911590354fe77d357cab
|
https://github.com/MichiganCOG/Video-Grounding/tree/3e0ec0b69578a59be583911590354fe77d357cab
|
A
|
import torch
import torch.nn
class A(torch.nn.Module):
def forward(self, x):
return x + 1
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class ANew(torch.nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
ModECI/MDF
|
A
| false
| 8,553
|
[
"Apache-2.0"
] | 12
|
76d5db6a1c9f691ca5be36d60d28e6e529762e7e
|
https://github.com/ModECI/MDF/tree/76d5db6a1c9f691ca5be36d60d28e6e529762e7e
|
TripletMarginLoss
|
from torch.autograd import Function
import torch
class PairwiseDistance(Function):
def __init__(self, p):
super(PairwiseDistance, self).__init__()
self.norm = p
def forward(self, x1, x2):
assert x1.size() == x2.size()
eps = 0.0001 / x1.size(1)
diff = torch.abs(x1 - x2)
out = torch.pow(diff, self.norm).sum(dim=1)
return torch.pow(out + eps, 1.0 / self.norm)
class TripletMarginLoss(torch.nn.Module):
"""Triplet loss function.
"""
def __init__(self, margin):
super(TripletMarginLoss, self).__init__()
self.margin = margin
self.pdist = PairwiseDistance(2)
def forward(self, repr):
queue_len_plus = repr.shape[-1]
pos = repr[:, 0].unsqueeze(-1).repeat(1, queue_len_plus - 1)
neg = repr[:, 1:]
dist_hinge = torch.clamp(self.margin + neg - pos, min=0.0)
loss = torch.mean(dist_hinge, 1).mean()
return loss
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'margin': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch.autograd import Function
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_clamp_mean_repeat_sub_0(in_out_ptr0, in_ptr0,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp1 = 4.0
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp5 = 0.0
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp8 = tmp7 + tmp1
tmp9 = tmp8 - tmp3
tmp10 = triton_helpers.maximum(tmp9, tmp5)
tmp11 = tmp6 + tmp10
tmp13 = tmp12 + tmp1
tmp14 = tmp13 - tmp3
tmp15 = triton_helpers.maximum(tmp14, tmp5)
tmp16 = tmp11 + tmp15
tmp17 = 3.0
tmp18 = tmp16 / tmp17
tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK])
tmp21 = tl.sum(tmp19, 1)[:, None]
tmp22 = tmp21 / tmp1
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp22, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_clamp_mean_repeat_sub_0[grid(1)](buf1, arg0_1,
1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf1,
class PairwiseDistance(Function):
def __init__(self, p):
super(PairwiseDistance, self).__init__()
self.norm = p
def forward(self, x1, x2):
assert x1.size() == x2.size()
eps = 0.0001 / x1.size(1)
diff = torch.abs(x1 - x2)
out = torch.pow(diff, self.norm).sum(dim=1)
return torch.pow(out + eps, 1.0 / self.norm)
class TripletMarginLossNew(torch.nn.Module):
"""Triplet loss function.
"""
def __init__(self, margin):
super(TripletMarginLossNew, self).__init__()
self.margin = margin
self.pdist = PairwiseDistance(2)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Mikexu007/AS_CAL
|
TripletMarginLoss
| false
| 8,554
|
[
"MIT"
] | 14
|
966328ae65bb16ba9b7aab153d8150c08c26c81f
|
https://github.com/Mikexu007/AS_CAL/tree/966328ae65bb16ba9b7aab153d8150c08c26c81f
|
ResNet
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class ResNet(nn.Module):
"""Modified ResNet model class"""
def __init__(self, block, num_blocks, depth, width=1):
super(ResNet, self).__init__()
self.iters = int((depth - 4) // 4)
self.in_planes = int(width * 64)
self.conv1 = nn.Conv2d(3, int(width * 64), kernel_size=3, stride=1,
padding=1, bias=False)
layers = []
for j in range(self.iters):
for i in range(len(num_blocks)):
layers.append(self._make_layer(block, int(width * 64),
num_blocks[i], stride=1))
self.recur_block = nn.Sequential(*layers)
self.conv2 = nn.Conv2d(int(width * 64), 32, kernel_size=3, stride=1,
padding=1, bias=False)
self.conv3 = nn.Conv2d(32, 8, kernel_size=3, stride=1, padding=1,
bias=False)
self.conv4 = nn.Conv2d(8, 2, kernel_size=3, stride=1, padding=1,
bias=False)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for strd in strides:
layers.append(block(self.in_planes, planes, strd))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.conv1(x))
out = self.recur_block(out)
out = F.relu(self.conv2(out))
out = F.relu(self.conv3(out))
thought = self.conv4(out)
return thought
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {'block': 4, 'num_blocks': 4, 'depth': 1}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, None)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, None)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_3, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_4, (8, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (2, 8, 3, 3), (72, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(1048576)](buf1, 1048576, XBLOCK=1024,
num_warps=4, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_relu_1[grid(524288)](buf3, 524288, XBLOCK=512,
num_warps=8, num_stages=1)
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 8, 64, 64), (32768, 4096, 64, 1))
buf5 = buf4
del buf4
triton_poi_fused_relu_2[grid(131072)](buf5, 131072, XBLOCK=512,
num_warps=8, num_stages=1)
buf6 = extern_kernels.convolution(buf5, primals_5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 2, 64, 64), (8192, 4096, 64, 1))
return (buf6, primals_1, primals_2, primals_3, primals_4, primals_5,
buf1, buf3, buf5)
class ResNetNew(nn.Module):
"""Modified ResNet model class"""
def __init__(self, block, num_blocks, depth, width=1):
super(ResNetNew, self).__init__()
self.iters = int((depth - 4) // 4)
self.in_planes = int(width * 64)
self.conv1 = nn.Conv2d(3, int(width * 64), kernel_size=3, stride=1,
padding=1, bias=False)
layers = []
for j in range(self.iters):
for i in range(len(num_blocks)):
layers.append(self._make_layer(block, int(width * 64),
num_blocks[i], stride=1))
self.recur_block = nn.Sequential(*layers)
self.conv2 = nn.Conv2d(int(width * 64), 32, kernel_size=3, stride=1,
padding=1, bias=False)
self.conv3 = nn.Conv2d(32, 8, kernel_size=3, stride=1, padding=1,
bias=False)
self.conv4 = nn.Conv2d(8, 2, kernel_size=3, stride=1, padding=1,
bias=False)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for strd in strides:
layers.append(block(self.in_planes, planes, strd))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_3 = self.conv2.weight
primals_4 = self.conv3.weight
primals_5 = self.conv4.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Maosef/easy-to-hard
|
ResNet
| false
| 8,555
|
[
"MIT"
] | 44
|
711ec0965229444a6c51b1b06a4e2cad3e32d02e
|
https://github.com/Maosef/easy-to-hard/tree/711ec0965229444a6c51b1b06a4e2cad3e32d02e
|
CA_Block
|
import torch
import torch.nn as nn
class CA_Block(nn.Module):
def __init__(self, in_dim):
super(CA_Block, self).__init__()
self.chanel_in = in_dim
self.gamma = nn.Parameter(torch.ones(1))
self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
"""
inputs :
x : input feature maps (B X C X H X W)
returns :
out : channel attentive features
"""
m_batchsize, C, height, width = x.size()
proj_query = x.view(m_batchsize, C, -1)
proj_key = x.view(m_batchsize, C, -1).permute(0, 2, 1)
energy = torch.bmm(proj_query, proj_key)
attention = self.softmax(energy)
proj_value = x.view(m_batchsize, C, -1)
out = torch.bmm(attention, proj_value)
out = out.view(m_batchsize, C, height, width)
out = self.gamma * out + x
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x1 = xindex // 16 % 4
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + x3, xmask)
tmp4 = tl.load(in_ptr2 + (x0 + 16 * x2 + 64 * x1), xmask)
tmp3 = tmp1 * tmp2
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + x3, tmp5, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = torch.ops.aten._scaled_dot_product_efficient_attention.default(
reinterpret_tensor(primals_1, (1, 4, 4, 16), (256, 64, 16, 1),
0), reinterpret_tensor(primals_1, (1, 4, 4, 16), (256, 64, 16,
1), 0), reinterpret_tensor(primals_1, (1, 4, 4, 16), (256, 64,
16, 1), 0), None, False, scale=1.0)
buf1 = buf0[0]
del buf0
buf5 = empty_strided_cuda((4, 4, 4, 4), (16, 64, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_0[grid(256)](primals_2, buf1, primals_1,
buf5, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
return buf5, reinterpret_tensor(buf1, (4, 4, 4, 4), (16, 64, 4, 1), 0)
class CA_BlockNew(nn.Module):
def __init__(self, in_dim):
super(CA_BlockNew, self).__init__()
self.chanel_in = in_dim
self.gamma = nn.Parameter(torch.ones(1))
self.softmax = nn.Softmax(dim=-1)
def forward(self, input_0):
primals_2 = self.gamma
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
Mhaiyang/CVPR2021_PFNet
|
CA_Block
| false
| 8,556
|
[
"BSD-3-Clause"
] | 24
|
2c4cab0730e6a0619fad79092f0b34f71c3b56c4
|
https://github.com/Mhaiyang/CVPR2021_PFNet/tree/2c4cab0730e6a0619fad79092f0b34f71c3b56c4
|
MlpAttention
|
import torch
import torch.nn as nn
class Self_Attn1D(nn.Module):
""" Self attention Layer """
def __init__(self, in_dim, activation, k=8):
super(Self_Attn1D, self).__init__()
self.chanel_in = in_dim
self.activation = activation
self.query_conv = nn.Conv1d(in_channels=in_dim, out_channels=in_dim //
k, kernel_size=1)
self.key_conv = nn.Conv1d(in_channels=in_dim, out_channels=in_dim //
k, kernel_size=1)
self.value_conv = nn.Conv1d(in_channels=in_dim, out_channels=in_dim,
kernel_size=1)
self.gamma = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, return_attn=False):
"""
inputs :
x : input feature maps(B X C X T)
returns :
out : self attention value + input feature
attention: B X N X N (N is Width*T)
"""
B, C = x.size()
T = 1
x = x.view(B, C, T)
proj_query = self.query_conv(x).view(B, -1, T).permute(0, 2, 1)
proj_key = self.key_conv(x).view(B, -1, T)
energy = torch.bmm(proj_query, proj_key)
attention = self.softmax(energy)
proj_value = self.value_conv(x).view(B, -1, T)
out = torch.bmm(proj_value, attention.permute(0, 2, 1))
out = out.view(B, C, T)
out = self.gamma * out + x
out = out.squeeze(2)
return out, attention
class MlpAttention(nn.Module):
def __init__(self, in_dim, out_dim):
super(MlpAttention, self).__init__()
out = max(8, in_dim * 2)
self.input = nn.Linear(in_dim, out)
self.output = nn.Linear(out, out_dim)
self.attention = Self_Attn1D(out, nn.LeakyReLU)
self.relu = nn.LeakyReLU()
def forward(self, x):
x = x.float()
x = self.input(x)
x, _att = self.attention(x)
x = self.relu(self.output(x))
return x
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4, 'out_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tmp0 - tmp0
tmp2 = tl_math.exp(tmp1)
tmp3 = tmp2 / tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_mul_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask)
tmp3 = tmp1 * tmp2
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (8, 4), (4, 1))
assert_size_stride(primals_3, (8,), (1,))
assert_size_stride(primals_4, (1, 8, 1), (8, 1, 1))
assert_size_stride(primals_5, (1,), (1,))
assert_size_stride(primals_6, (1, 8, 1), (8, 1, 1))
assert_size_stride(primals_7, (1,), (1,))
assert_size_stride(primals_8, (8, 8, 1), (8, 1, 1))
assert_size_stride(primals_9, (8,), (1,))
assert_size_stride(primals_10, (1,), (1,))
assert_size_stride(primals_11, (4, 8), (8, 1))
assert_size_stride(primals_12, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
extern_kernels.addmm(primals_3, primals_1, reinterpret_tensor(
primals_2, (4, 8), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (4, 8, 1
), (8, 1, 1), 0), primals_4, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf1, (4, 1, 1), (1, 1, 1))
buf2 = extern_kernels.convolution(reinterpret_tensor(buf0, (4, 8, 1
), (8, 1, 1), 0), primals_6, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf2, (4, 1, 1), (1, 1, 1))
buf3 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(4)](buf3, primals_5, 4, XBLOCK=
4, num_warps=1, num_stages=1)
del primals_5
buf4 = reinterpret_tensor(buf2, (4, 1, 1), (1, 4, 4), 0)
del buf2
triton_poi_fused_convolution_0[grid(4)](buf4, primals_7, 4, XBLOCK=
4, num_warps=1, num_stages=1)
del primals_7
buf5 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (4, 1, 1), (1, 0, 0), 0
), buf4, out=buf5)
buf6 = buf5
del buf5
triton_poi_fused__softmax_1[grid(4)](buf6, 4, XBLOCK=4, num_warps=1,
num_stages=1)
buf7 = extern_kernels.convolution(reinterpret_tensor(buf0, (4, 8, 1
), (8, 1, 1), 0), primals_8, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf7, (4, 8, 1), (8, 1, 1))
buf8 = reinterpret_tensor(buf7, (4, 8, 1), (8, 1, 32), 0)
del buf7
triton_poi_fused_convolution_2[grid(32)](buf8, primals_9, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_9
buf9 = empty_strided_cuda((4, 8, 1), (8, 1, 1), torch.float32)
extern_kernels.bmm(buf8, buf6, out=buf9)
buf10 = empty_strided_cuda((4, 8, 1), (8, 1, 1), torch.float32)
triton_poi_fused_add_mul_3[grid(32)](primals_10, buf9, buf0, buf10,
32, XBLOCK=32, num_warps=1, num_stages=1)
buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf10, (4, 8), (8, 1), 0),
reinterpret_tensor(primals_11, (8, 4), (1, 8), 0), out=buf11)
buf12 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_leaky_relu_4[grid(16)](buf11, primals_12, buf12,
buf13, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf11
del primals_12
return (buf13, primals_1, primals_4, primals_6, primals_8, primals_10,
reinterpret_tensor(buf0, (4, 8, 1), (8, 1, 1), 0), buf6, buf9,
reinterpret_tensor(buf10, (4, 8), (8, 1), 0), buf12, primals_11,
reinterpret_tensor(buf8, (4, 1, 8), (8, 1, 1), 0), buf3,
reinterpret_tensor(buf4, (4, 1, 1), (1, 1, 1), 0))
class Self_Attn1D(nn.Module):
""" Self attention Layer """
def __init__(self, in_dim, activation, k=8):
super(Self_Attn1D, self).__init__()
self.chanel_in = in_dim
self.activation = activation
self.query_conv = nn.Conv1d(in_channels=in_dim, out_channels=in_dim //
k, kernel_size=1)
self.key_conv = nn.Conv1d(in_channels=in_dim, out_channels=in_dim //
k, kernel_size=1)
self.value_conv = nn.Conv1d(in_channels=in_dim, out_channels=in_dim,
kernel_size=1)
self.gamma = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, return_attn=False):
"""
inputs :
x : input feature maps(B X C X T)
returns :
out : self attention value + input feature
attention: B X N X N (N is Width*T)
"""
B, C = x.size()
T = 1
x = x.view(B, C, T)
proj_query = self.query_conv(x).view(B, -1, T).permute(0, 2, 1)
proj_key = self.key_conv(x).view(B, -1, T)
energy = torch.bmm(proj_query, proj_key)
attention = self.softmax(energy)
proj_value = self.value_conv(x).view(B, -1, T)
out = torch.bmm(proj_value, attention.permute(0, 2, 1))
out = out.view(B, C, T)
out = self.gamma * out + x
out = out.squeeze(2)
return out, attention
class MlpAttentionNew(nn.Module):
def __init__(self, in_dim, out_dim):
super(MlpAttentionNew, self).__init__()
out = max(8, in_dim * 2)
self.input = nn.Linear(in_dim, out)
self.output = nn.Linear(out, out_dim)
self.attention = Self_Attn1D(out, nn.LeakyReLU)
self.relu = nn.LeakyReLU()
def forward(self, input_0):
primals_2 = self.input.weight
primals_3 = self.input.bias
primals_11 = self.output.weight
primals_12 = self.output.bias
primals_5 = self.attention.gamma
primals_4 = self.attention.query_conv.weight
primals_7 = self.attention.query_conv.bias
primals_6 = self.attention.key_conv.weight
primals_10 = self.attention.key_conv.bias
primals_8 = self.attention.value_conv.weight
primals_9 = self.attention.value_conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
|
Malta-Lab/IUPE
|
MlpAttention
| false
| 8,557
|
[
"MIT"
] | 10
|
44ddf119917538f02bb69509fec7a8314eed419f
|
https://github.com/Malta-Lab/IUPE/tree/44ddf119917538f02bb69509fec7a8314eed419f
|
SquadDiscriminator
|
import torch
import torch.nn as nn
class SquadDiscriminator(nn.Module):
def __init__(self, feature_size):
super(SquadDiscriminator, self).__init__()
self.bilinear = nn.Bilinear(feature_size, feature_size, 1)
for m in self.modules():
self.weights_init(m)
def weights_init(self, m):
if isinstance(m, nn.Bilinear):
nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
def forward(self, global_enc, local_enc):
global_enc = global_enc.unsqueeze(1)
global_enc = global_enc.expand(-1, local_enc.size(1), -1)
scores = self.bilinear(global_enc.contiguous(), local_enc.contiguous())
return scores
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'feature_size': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
buf1 = torch.ops.aten._trilinear.default(reinterpret_tensor(buf0, (
16, 4), (4, 1), 0), primals_3, reinterpret_tensor(primals_2, (
16, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3])
del primals_3
buf2 = buf1
del buf1
buf3 = reinterpret_tensor(buf2, (4, 4, 1), (4, 1, 1), 0)
del buf2
triton_poi_fused_add_1[grid(16)](buf3, primals_4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_4
return buf3, reinterpret_tensor(buf0, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0)
class SquadDiscriminatorNew(nn.Module):
def __init__(self, feature_size):
super(SquadDiscriminatorNew, self).__init__()
self.bilinear = nn.Bilinear(feature_size, feature_size, 1)
for m in self.modules():
self.weights_init(m)
def weights_init(self, m):
if isinstance(m, nn.Bilinear):
nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
def forward(self, input_0, input_1):
primals_3 = self.bilinear.weight
primals_4 = self.bilinear.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
MiuLab/QAInfomax
|
SquadDiscriminator
| false
| 8,558
|
[
"MIT"
] | 19
|
0985bc1df68d21c93de1bd6038d69f9792a9f62a
|
https://github.com/MiuLab/QAInfomax/tree/0985bc1df68d21c93de1bd6038d69f9792a9f62a
|
IOU
|
import torch
class IOU(torch.nn.Module):
def __init__(self):
super(IOU, self).__init__()
def _iou(self, pred, target):
pred = torch.sigmoid(pred)
inter = (pred * target).sum(dim=(2, 3))
union = (pred + target).sum(dim=(2, 3)) - inter
iou = 1 - inter / union
return iou.mean()
def forward(self, pred, target):
return self._iou(pred, target)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mul_sigmoid_sum_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 + tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tmp1 * tmp2
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.where(xmask, tmp9, 0)
tmp12 = tl.sum(tmp11, 1)[:, None]
tl.store(out_ptr0 + x0, tmp7, xmask)
tl.store(out_ptr1 + x0, tmp12, xmask)
@triton.jit
def triton_per_fused_div_mean_rsub_sub_1(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp1 - tmp0
tmp3 = tmp0 / tmp2
tmp4 = 1.0
tmp5 = tmp4 - tmp3
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.sum(tmp6, 1)[:, None]
tmp9 = 16.0
tmp10 = tmp8 / tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp10, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_add_mul_sigmoid_sum_0[grid(16)](arg0_1, arg1_1,
buf0, buf1, 16, 16, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
triton_per_fused_div_mean_rsub_sub_1[grid(1)](buf3, buf1, buf0, 1,
16, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf1
return buf3,
class IOUNew(torch.nn.Module):
def __init__(self):
super(IOUNew, self).__init__()
def _iou(self, pred, target):
pred = torch.sigmoid(pred)
inter = (pred * target).sum(dim=(2, 3))
union = (pred + target).sum(dim=(2, 3)) - inter
iou = 1 - inter / union
return iou.mean()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Mhaiyang/CVPR2021_PFNet
|
IOU
| false
| 8,559
|
[
"BSD-3-Clause"
] | 24
|
2c4cab0730e6a0619fad79092f0b34f71c3b56c4
|
https://github.com/Mhaiyang/CVPR2021_PFNet/tree/2c4cab0730e6a0619fad79092f0b34f71c3b56c4
|
GaussianGenerator
|
import torch
import numpy as np
import torch.nn as nn
class GaussianGenerator(nn.Module):
def __init__(self, dims):
super(GaussianGenerator, self).__init__()
self.z_dim = dims[0]
self.linear_var = nn.Parameter(1.0 * torch.ones([self.z_dim]))
self.bias = nn.Parameter(torch.zeros([self.z_dim]))
self.lmbda = 0.001
self.dist = None
def forward(self, z):
out = z * self.linear_var ** 2
out = out + self.lmbda * z + self.bias
return out
def log_density(self, x):
Sigma = self.linear_var ** 2 + self.lmbda
Sigma = Sigma ** 2
location = x - self.bias
quad = torch.einsum('nd,nd,d->n', location, location, 1.0 / Sigma)
quad = quad.unsqueeze(-1)
value = -0.5 * quad - 0.5 * torch.log(2.0 * np.pi * Sigma).sum()
return value
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dims': [4, 4]}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_pow_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp3 = tmp0 * tmp2
tmp4 = 0.001
tmp5 = tmp0 * tmp4
tmp6 = tmp3 + tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_pow_0[grid(256)](primals_2, primals_1,
primals_3, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
return buf0, primals_1, primals_2
class GaussianGeneratorNew(nn.Module):
def __init__(self, dims):
super(GaussianGeneratorNew, self).__init__()
self.z_dim = dims[0]
self.linear_var = nn.Parameter(1.0 * torch.ones([self.z_dim]))
self.bias = nn.Parameter(torch.zeros([self.z_dim]))
self.lmbda = 0.001
self.dist = None
def log_density(self, x):
Sigma = self.linear_var ** 2 + self.lmbda
Sigma = Sigma ** 2
location = x - self.bias
quad = torch.einsum('nd,nd,d->n', location, location, 1.0 / Sigma)
quad = quad.unsqueeze(-1)
value = -0.5 * quad - 0.5 * torch.log(2.0 * np.pi * Sigma).sum()
return value
def forward(self, input_0):
primals_1 = self.linear_var
primals_3 = self.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
MichaelArbel/GeneralizedEBM
|
GaussianGenerator
| false
| 8,560
|
[
"BSD-3-Clause"
] | 40
|
b2fb244bacef23a7347aecc0e8ff4863153f94f0
|
https://github.com/MichaelArbel/GeneralizedEBM/tree/b2fb244bacef23a7347aecc0e8ff4863153f94f0
|
ResBlock
|
import torch
import torch.nn as nn
class ResBlock(nn.Module):
def __init__(self, in_c):
super(ResBlock, self).__init__()
self.conv1 = nn.Conv2d(in_c, in_c, kernel_size=3, stride=1, padding
=1, bias=True)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(in_c, in_c, kernel_size=3, stride=1, padding
=1, bias=True)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.relu(out)
out = self.conv2(out)
out = out + identity
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_c': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_convolution_1(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x3, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(256)](buf1, primals_3, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2
del buf2
triton_poi_fused_add_convolution_1[grid(256)](buf3, primals_5,
primals_1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
return buf3, primals_1, primals_2, primals_4, buf1
class ResBlockNew(nn.Module):
def __init__(self, in_c):
super(ResBlockNew, self).__init__()
self.conv1 = nn.Conv2d(in_c, in_c, kernel_size=3, stride=1, padding
=1, bias=True)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(in_c, in_c, kernel_size=3, stride=1, padding
=1, bias=True)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
MohitLamba94/LLPackNet
|
ResBlock
| false
| 8,561
|
[
"MIT"
] | 15
|
440e20ac48aed0beca5f473358ec85d24d477575
|
https://github.com/MohitLamba94/LLPackNet/tree/440e20ac48aed0beca5f473358ec85d24d477575
|
Summarize
|
import torch
import torch.nn as nn
class Summarize(nn.Module):
def __init__(self):
super(Summarize, self).__init__()
self.sigmoid = nn.Sigmoid()
def forward(self, vec):
return self.sigmoid(torch.mean(vec, dim=1))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mean_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tl.sigmoid(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_sigmoid_0[grid(64)](arg0_1, buf0, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del arg0_1
return buf0,
class SummarizeNew(nn.Module):
def __init__(self):
super(SummarizeNew, self).__init__()
self.sigmoid = nn.Sigmoid()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
MiuLab/QAInfomax
|
Summarize
| false
| 8,562
|
[
"MIT"
] | 19
|
0985bc1df68d21c93de1bd6038d69f9792a9f62a
|
https://github.com/MiuLab/QAInfomax/tree/0985bc1df68d21c93de1bd6038d69f9792a9f62a
|
make_dense
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class make_dense(nn.Module):
def __init__(self, nChannels=64, growthRate=32, kernel_size=3):
super(make_dense, self).__init__()
self.conv = nn.Conv2d(nChannels, growthRate, kernel_size=
kernel_size, padding=(kernel_size - 1) // 2, bias=False)
def forward(self, x):
out = F.relu(self.conv(x))
out = torch.cat((x, out), 1)
return out
def get_inputs():
return [torch.rand([4, 64, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 96
x0 = xindex % 4096
x2 = xindex // 393216
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 262144 * x2), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 96, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 4096 * (-64 + x1) + 131072 * x2), tmp6,
other=0.0)
tmp10 = tl.full([1], 0, tl.int32)
tmp11 = triton_helpers.maximum(tmp10, tmp9)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp6, tmp11, tmp12)
tmp14 = tl.where(tmp4, tmp5, tmp13)
tl.store(out_ptr0 + x3, tmp14, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(out_ptr0 + x0, tmp4, None)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_2, (4, 64, 64, 64), (262144, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf1 = empty_strided_cuda((4, 96, 64, 64), (393216, 4096, 64, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(1572864)](primals_2, buf0, buf1,
1572864, XBLOCK=1024, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(524288)](buf0, buf2,
524288, XBLOCK=1024, num_warps=4, num_stages=1)
del buf0
return buf1, primals_1, primals_2, buf2
class make_denseNew(nn.Module):
def __init__(self, nChannels=64, growthRate=32, kernel_size=3):
super(make_denseNew, self).__init__()
self.conv = nn.Conv2d(nChannels, growthRate, kernel_size=
kernel_size, padding=(kernel_size - 1) // 2, bias=False)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
MohitLamba94/LLPackNet
|
make_dense
| false
| 8,563
|
[
"MIT"
] | 15
|
440e20ac48aed0beca5f473358ec85d24d477575
|
https://github.com/MohitLamba94/LLPackNet/tree/440e20ac48aed0beca5f473358ec85d24d477575
|
ScaledDotProductAttention
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class ScaledDotProductAttention(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, q, k, v):
attn = torch.bmm(q, k.transpose(1, 2))
attn = attn / self.temperature
raw_attn = attn
log_attn = F.log_softmax(attn, 2)
attn = self.softmax(attn)
attn = self.dropout(attn)
output = torch.bmm(attn, v)
return output, attn, log_attn, raw_attn
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'temperature': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_div_0(in_ptr0, out_ptr0, out_ptr1, out_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp0 * tmp15
tl.store(out_ptr0 + x2, tmp17, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
tl.store(out_ptr2 + x2, tmp18, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), (
16, 1, 4), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_div_0[grid(64)](buf0, buf1, buf4, buf6,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf2 = buf0
del buf0
triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = buf1
del buf1
extern_kernels.bmm(buf2, arg2_1, out=buf3)
del arg2_1
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__log_softmax_2[grid(64)](buf4, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf4
return buf3, buf2, buf5, buf6
class ScaledDotProductAttentionNew(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0], output[1], output[2], output[3]
|
MinkiJ/SnaTCHer
|
ScaledDotProductAttention
| false
| 8,564
|
[
"MIT"
] | 12
|
335c42469f0a7ad72c5c3480c8effc8c293823e0
|
https://github.com/MinkiJ/SnaTCHer/tree/335c42469f0a7ad72c5c3480c8effc8c293823e0
|
SafeLog
|
import torch
import torch.nn as nn
class SafeLog(nn.Module):
def __init__(self, eps=1e-06):
super(SafeLog, self).__init__()
self.eps = eps
def forward(self, X):
return torch.log(torch.clamp(X, min=self.eps))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clamp_log_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1e-06
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = tl_math.log(tmp2)
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_log_0[grid(256)](arg0_1, buf0, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class SafeLogNew(nn.Module):
def __init__(self, eps=1e-06):
super(SafeLogNew, self).__init__()
self.eps = eps
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Mrswolf/brainda
|
SafeLog
| false
| 8,565
|
[
"MIT"
] | 24
|
cbd2fa6334d9e6243324dbaf086be4eb4047e801
|
https://github.com/Mrswolf/brainda/tree/cbd2fa6334d9e6243324dbaf086be4eb4047e801
|
ScaledTanh
|
import torch
from torch import Tensor
import torch.nn as nn
from torch import tanh
class ScaledTanh(nn.Module):
def __init__(self, factor):
super(ScaledTanh, self).__init__()
self.factor = factor
def forward(self, inputs: 'Tensor') ->Tensor:
return tanh(inputs) * self.factor
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'factor': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tmp2 = 4.0
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_tanh_0[grid(256)](arg0_1, buf0, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class ScaledTanhNew(nn.Module):
def __init__(self, factor):
super(ScaledTanhNew, self).__init__()
self.factor = factor
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
MhmdSyd/celldetection
|
ScaledTanh
| false
| 8,566
|
[
"Apache-2.0"
] | 26
|
93e706953dc32eb694345179d5dcca5cfd9ff41b
|
https://github.com/MhmdSyd/celldetection/tree/93e706953dc32eb694345179d5dcca5cfd9ff41b
|
MaxNormConstraintLinear
|
import torch
import torch.nn as nn
class MaxNormConstraintLinear(nn.Linear):
def __init__(self, *args, max_norm_value=1, norm_axis=0, **kwargs):
self.max_norm_value = max_norm_value
self.norm_axis = norm_axis
super().__init__(*args, **kwargs)
def forward(self, input):
self.weight.data = self._max_norm(self.weight.data)
return super().forward(input)
def _max_norm(self, w):
with torch.no_grad():
norms = torch.sqrt(torch.sum(torch.square(w), dim=self.
norm_axis, keepdim=True))
desired = torch.clamp(norms, 0, self.max_norm_value)
w *= desired / norms
return w
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clamp_div_mul_pow_sqrt_sum_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 0.0
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = 1.0
tmp16 = triton_helpers.minimum(tmp14, tmp15)
tmp17 = tmp16 / tmp12
tmp18 = tmp0 * tmp17
tl.store(out_ptr0 + x2, tmp18, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_div_mul_pow_sqrt_sum_0[grid(16)](primals_1,
buf0, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf1)
del primals_2
buf2 = torch.ops.aten.set_.source_Tensor(primals_1, buf0)
assert_size_stride(buf2, (4, 4), (4, 1))
del primals_1
return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0)
class MaxNormConstraintLinearNew(nn.Linear):
def __init__(self, *args, max_norm_value=1, norm_axis=0, **kwargs):
self.max_norm_value = max_norm_value
self.norm_axis = norm_axis
super().__init__(*args, **kwargs)
def _max_norm(self, w):
with torch.no_grad():
norms = torch.sqrt(torch.sum(torch.square(w), dim=self.
norm_axis, keepdim=True))
desired = torch.clamp(norms, 0, self.max_norm_value)
w *= desired / norms
return w
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Mrswolf/brainda
|
MaxNormConstraintLinear
| false
| 8,567
|
[
"MIT"
] | 24
|
cbd2fa6334d9e6243324dbaf086be4eb4047e801
|
https://github.com/Mrswolf/brainda/tree/cbd2fa6334d9e6243324dbaf086be4eb4047e801
|
CNN3dModel
|
import torch
class CNN3dModel(torch.nn.ModuleDict):
def __init__(self, D_in=1, D_out=1):
super(CNN3dModel, self).__init__()
self.conv3d = torch.nn.Conv3d(D_in, D_in * 2, kernel_size=2, stride
=2, padding=1)
self.conv3d2 = torch.nn.Conv3d(D_in * 2, D_in * 2, kernel_size=2,
stride=2, padding=1)
self.conv3d3 = torch.nn.Conv3d(D_in * 2, D_in * 4, kernel_size=2,
stride=2, padding=1)
self.pool = torch.nn.MaxPool3d(kernel_size=2, padding=1)
self.pool2 = torch.nn.MaxPool3d(kernel_size=2)
self.relu = torch.nn.ReLU()
self.linear = torch.nn.Linear(D_in * 4, D_in * 8)
self.linear2 = torch.nn.Linear(D_in * 8, D_out)
def forward(self, x):
x = x.float()
c1 = self.conv3d(x)
p1 = self.pool(c1)
c2 = self.conv3d2(self.relu(p1))
p2 = self.pool(c2)
c3 = self.conv3d3(self.relu(p2))
p3 = self.pool2(c3)
v1 = p3.view(p3.size(0), -1)
l1 = self.relu(self.linear(v1))
l2 = self.linear2(l1)
return l2
def get_inputs():
return [torch.rand([4, 1, 64, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 287496
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 35937 % 2
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 39304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 5832
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 729 % 2
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_relu_3(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 432
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 27 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 1, 64, 64, 64), (262144, 262144, 4096,
64, 1))
assert_size_stride(primals_2, (2, 1, 2, 2, 2), (8, 8, 4, 2, 1))
assert_size_stride(primals_3, (2,), (1,))
assert_size_stride(primals_4, (2, 2, 2, 2, 2), (16, 8, 4, 2, 1))
assert_size_stride(primals_5, (2,), (1,))
assert_size_stride(primals_6, (4, 2, 2, 2, 2), (16, 8, 4, 2, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (8, 4), (4, 1))
assert_size_stride(primals_9, (8,), (1,))
assert_size_stride(primals_10, (1, 8), (8, 1))
assert_size_stride(primals_11, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(2,
2, 2), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 2, 33, 33, 33), (71874, 35937, 1089,
33, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(287496)](buf1, primals_3,
287496, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_3
buf2 = torch.ops.aten.max_pool3d_with_indices.default(buf1, [2, 2,
2], [2, 2, 2], [1, 1, 1])
buf3 = buf2[0]
buf4 = buf2[1]
del buf2
buf5 = buf3
del buf3
triton_poi_fused_relu_1[grid(39304)](buf5, 39304, XBLOCK=256,
num_warps=4, num_stages=1)
buf6 = extern_kernels.convolution(buf5, primals_4, stride=(2, 2, 2),
padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 2, 9, 9, 9), (1458, 729, 81, 9, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_2[grid(5832)](buf7, primals_5, 5832,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf8 = torch.ops.aten.max_pool3d_with_indices.default(buf7, [2, 2,
2], [2, 2, 2], [1, 1, 1])
buf9 = buf8[0]
buf10 = buf8[1]
del buf8
buf11 = buf9
del buf9
triton_poi_fused_relu_3[grid(1000)](buf11, 1000, XBLOCK=256,
num_warps=4, num_stages=1)
buf12 = extern_kernels.convolution(buf11, primals_6, stride=(2, 2,
2), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1))
buf13 = buf12
del buf12
triton_poi_fused_convolution_4[grid(432)](buf13, primals_7, 432,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf14 = torch.ops.aten.max_pool3d_with_indices.default(buf13, [2, 2,
2], [2, 2, 2])
buf15 = buf14[0]
buf16 = buf14[1]
del buf14
buf17 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf15, (4, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 8), (1, 4), 0), out=buf17)
buf18 = buf17
del buf17
triton_poi_fused_relu_5[grid(32)](buf18, primals_9, 32, XBLOCK=32,
num_warps=1, num_stages=1)
del primals_9
buf20 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_11, buf18, reinterpret_tensor(
primals_10, (8, 1), (1, 8), 0), alpha=1, beta=1, out=buf20)
del primals_11
return (buf20, primals_1, primals_2, primals_4, primals_6, buf1, buf4,
buf5, buf7, buf10, buf11, buf13, buf16, reinterpret_tensor(buf15, (
4, 4), (4, 1), 0), buf18, primals_10, primals_8)
class CNN3dModelNew(torch.nn.ModuleDict):
def __init__(self, D_in=1, D_out=1):
super(CNN3dModelNew, self).__init__()
self.conv3d = torch.nn.Conv3d(D_in, D_in * 2, kernel_size=2, stride
=2, padding=1)
self.conv3d2 = torch.nn.Conv3d(D_in * 2, D_in * 2, kernel_size=2,
stride=2, padding=1)
self.conv3d3 = torch.nn.Conv3d(D_in * 2, D_in * 4, kernel_size=2,
stride=2, padding=1)
self.pool = torch.nn.MaxPool3d(kernel_size=2, padding=1)
self.pool2 = torch.nn.MaxPool3d(kernel_size=2)
self.relu = torch.nn.ReLU()
self.linear = torch.nn.Linear(D_in * 4, D_in * 8)
self.linear2 = torch.nn.Linear(D_in * 8, D_out)
def forward(self, input_0):
primals_2 = self.conv3d.weight
primals_3 = self.conv3d.bias
primals_4 = self.conv3d2.weight
primals_5 = self.conv3d2.bias
primals_6 = self.conv3d3.weight
primals_7 = self.conv3d3.bias
primals_8 = self.linear.weight
primals_9 = self.linear.bias
primals_10 = self.linear2.weight
primals_11 = self.linear2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
MilesCranmer/Sapsan
|
CNN3dModel
| false
| 8,568
|
[
"BSD-3-Clause"
] | 11
|
4d21954baf196ede2d4dafc765aed98a0cfca21b
|
https://github.com/MilesCranmer/Sapsan/tree/4d21954baf196ede2d4dafc765aed98a0cfca21b
|
LabelSmoothingCrossEntropy
|
import torch
import torch._C
import torch.serialization
from torch import nn
import torch.nn.functional as F
class LabelSmoothingCrossEntropy(nn.Module):
""" NLL loss with label smoothing.
"""
def __init__(self, smoothing=0.1, loss_weight=1.0, loss_name='loss_ce'):
super(LabelSmoothingCrossEntropy, self).__init__()
assert smoothing < 1.0
self.smoothing = smoothing
self.confidence = 1.0 - smoothing
self.loss_weight = loss_weight
self._loss_name = loss_name
def forward(self, x: 'torch.Tensor', target: 'torch.Tensor', weight=
None, avg_factor=None, reduction_override=None, **kwargs
) ->torch.Tensor:
x = x.permute(1, 0, 2, 3).flatten(1).transpose(0, 1)
target = target.flatten(0)
logprobs = F.log_softmax(x, dim=-1)
nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1))
nll_loss = nll_loss.squeeze(1)
smooth_loss = -logprobs.mean(dim=-1)
loss = (self.confidence * nll_loss + self.smoothing * smooth_loss
) * self.loss_weight
return loss.mean()
@property
def loss_name(self):
"""Loss Name.
This function must be implemented and will return the name of this
loss function. This name will be used to combine different loss items
by simple sum operation. In addition, if you want this loss item to be
included into the backward graph, `loss_` must be the prefix of the
name.
Returns:
str: The name of this loss item.
"""
return self._loss_name
def get_inputs():
return [torch.rand([64, 4, 4, 4]), torch.ones([1024], dtype=torch.int64)]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch._C
import torch.serialization
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 1024
x1 = xindex // 1024
x2 = xindex
tmp0 = tl.load(in_ptr0 + (16 * x1 + 64 * (x0 // 16) + x0 % 16), None)
tmp1 = tl.load(in_ptr0 + (64 * (x0 // 16) + x0 % 16), None,
eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + 64 * (x0 // 16) + x0 % 16), None,
eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + 64 * (x0 // 16) + x0 % 16), None,
eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + 64 * (x0 // 16) + x0 % 16), None,
eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, None)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1024
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1024 + x0), None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2048 + x0), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3072 + x0), None, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x2, tmp13, None)
@triton.jit
def triton_per_fused_add_mean_mul_neg_2(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp10 = tl.load(in_ptr1 + r0, None)
tmp11 = tl.load(in_ptr1 + (1024 + r0), None)
tmp13 = tl.load(in_ptr1 + (2048 + r0), None)
tmp15 = tl.load(in_ptr1 + (3072 + r0), None)
tmp1 = tl.full([RBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 4),
'index out of bounds: 0 <= tmp4 < 4')
tmp6 = tl.load(in_ptr1 + (r0 + 1024 * tmp4), None)
tmp7 = -tmp6
tmp8 = 0.9
tmp9 = tmp7 * tmp8
tmp12 = tmp10 + tmp11
tmp14 = tmp12 + tmp13
tmp16 = tmp14 + tmp15
tmp17 = 4.0
tmp18 = tmp16 / tmp17
tmp19 = -tmp18
tmp20 = 0.1
tmp21 = tmp19 * tmp20
tmp22 = tmp9 + tmp21
tmp23 = 1.0
tmp24 = tmp22 * tmp23
tmp25 = tl.broadcast_to(tmp24, [RBLOCK])
tmp27 = triton_helpers.promote_to_tensor(tl.sum(tmp25, 0))
tmp28 = 1024.0
tmp29 = tmp27 / tmp28
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp29, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (64, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (1024,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1024, 4), (1, 1024), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(4096)](arg0_1, buf0, 4096,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((1024, 4), (1, 1024), torch.float32)
triton_poi_fused__log_softmax_1[grid(4096)](buf0, buf1, 4096,
XBLOCK=256, num_warps=4, num_stages=1)
del buf0
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
triton_per_fused_add_mean_mul_neg_2[grid(1)](buf3, arg1_1, buf1, 1,
1024, num_warps=8, num_stages=1)
del arg1_1
del buf1
return buf3,
class LabelSmoothingCrossEntropyNew(nn.Module):
""" NLL loss with label smoothing.
"""
def __init__(self, smoothing=0.1, loss_weight=1.0, loss_name='loss_ce'):
super(LabelSmoothingCrossEntropyNew, self).__init__()
assert smoothing < 1.0
self.smoothing = smoothing
self.confidence = 1.0 - smoothing
self.loss_weight = loss_weight
self._loss_name = loss_name
@property
def loss_name(self):
"""Loss Name.
This function must be implemented and will return the name of this
loss function. This name will be used to combine different loss items
by simple sum operation. In addition, if you want this loss item to be
included into the backward graph, `loss_` must be the prefix of the
name.
Returns:
str: The name of this loss item.
"""
return self._loss_name
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Molly6/segmentation_shengteng2021
|
LabelSmoothingCrossEntropy
| false
| 8,569
|
[
"Apache-2.0"
] | 21
|
33dfefa80193586f504069793d9e141944549e99
|
https://github.com/Molly6/segmentation_shengteng2021/tree/33dfefa80193586f504069793d9e141944549e99
|
ResidualBlock
|
import torch
from torch import nn
import torch.nn.functional as F
class ResidualBlock(nn.Module):
"""
Residual block from R2D3/IMPALA
Taken from [1,2]
"""
def __init__(self, num_channels, first_conv_weight_scale):
super().__init__()
self.conv1 = nn.Conv2d(num_channels, num_channels, kernel_size=3,
stride=1, padding=1, bias=False)
self.conv2 = nn.Conv2d(num_channels, num_channels, kernel_size=3,
stride=1, padding=1, bias=False)
self.bias1 = nn.Parameter(torch.zeros([num_channels, 1, 1]))
self.bias2 = nn.Parameter(torch.zeros([num_channels, 1, 1]))
self.bias3 = nn.Parameter(torch.zeros([num_channels, 1, 1]))
self.bias4 = nn.Parameter(torch.zeros([num_channels, 1, 1]))
self.scale = nn.Parameter(torch.ones([num_channels, 1, 1]))
with torch.no_grad():
self.conv2.weight *= 0
self.conv1.weight *= first_conv_weight_scale
def forward(self, x):
x = F.relu(x, inplace=True)
original = x
x = x + self.bias1
x = self.conv1(x)
x = x + self.bias2
x = F.relu(x, inplace=True)
x = x + self.bias3
x = self.conv2(x)
x = x * self.scale
x = x + self.bias4
return original + x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_channels': 4, 'first_conv_weight_scale': 1.0}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = tmp2 + tmp3
tl.store(out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_1(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp4 + tmp5
tmp7 = 0.0
tmp8 = tmp4 <= tmp7
tl.store(out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr1 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_mul_relu_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr1 + x3, xmask)
tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp5 = tmp3 * tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp2 + tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
tl.store(out_ptr2 + x3, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 1, 1), (1, 1, 1))
assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_4, (4, 1, 1), (1, 1, 1))
assert_size_stride(primals_5, (4, 1, 1), (1, 1, 1))
assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_7, (4, 1, 1), (1, 1, 1))
assert_size_stride(primals_8, (4, 1, 1), (1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_relu_0[grid(256)](primals_1, primals_2, buf0,
256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_add_relu_threshold_backward_1[grid(256)](buf1,
primals_4, primals_5, buf2, buf5, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_4
del primals_5
buf3 = extern_kernels.convolution(buf2, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = buf1
del buf1
triton_poi_fused_add_mul_relu_2[grid(256)](primals_1, buf3,
primals_7, primals_8, buf4, primals_1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_1
del primals_8
return buf4, primals_3, primals_6, primals_7, buf0, buf2, buf3, buf5
class ResidualBlockNew(nn.Module):
"""
Residual block from R2D3/IMPALA
Taken from [1,2]
"""
def __init__(self, num_channels, first_conv_weight_scale):
super().__init__()
self.conv1 = nn.Conv2d(num_channels, num_channels, kernel_size=3,
stride=1, padding=1, bias=False)
self.conv2 = nn.Conv2d(num_channels, num_channels, kernel_size=3,
stride=1, padding=1, bias=False)
self.bias1 = nn.Parameter(torch.zeros([num_channels, 1, 1]))
self.bias2 = nn.Parameter(torch.zeros([num_channels, 1, 1]))
self.bias3 = nn.Parameter(torch.zeros([num_channels, 1, 1]))
self.bias4 = nn.Parameter(torch.zeros([num_channels, 1, 1]))
self.scale = nn.Parameter(torch.ones([num_channels, 1, 1]))
with torch.no_grad():
self.conv2.weight *= 0
self.conv1.weight *= first_conv_weight_scale
def forward(self, input_0):
primals_2 = self.bias1
primals_4 = self.bias2
primals_5 = self.bias3
primals_7 = self.bias4
primals_8 = self.scale
primals_3 = self.conv1.weight
primals_6 = self.conv2.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
|
Miffyli/minecraft-bc-2020
|
ResidualBlock
| false
| 8,570
|
[
"MIT"
] | 11
|
94f8706e547474a2ed8cacd41bb20e59f672215f
|
https://github.com/Miffyli/minecraft-bc-2020/tree/94f8706e547474a2ed8cacd41bb20e59f672215f
|
Square
|
import torch
import torch.nn as nn
class Square(nn.Module):
def __init__(self):
super(Square, self).__init__()
def forward(self, X):
return torch.square(X)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0 * tmp0
tl.store(out_ptr0 + x0, tmp1, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_pow_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class SquareNew(nn.Module):
def __init__(self):
super(SquareNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Mrswolf/brainda
|
Square
| false
| 8,571
|
[
"MIT"
] | 24
|
cbd2fa6334d9e6243324dbaf086be4eb4047e801
|
https://github.com/Mrswolf/brainda/tree/cbd2fa6334d9e6243324dbaf086be4eb4047e801
|
SpatialAttention
|
import torch
import torch.nn as nn
class SpatialAttention(nn.Module):
def __init__(self, kernel_size=7):
super(SpatialAttention, self).__init__()
self.conv1 = nn.Conv1d(2, 1, kernel_size, padding=kernel_size // 2,
bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avg_out = torch.mean(x, dim=1, keepdim=True)
max_out, _ = torch.max(x, dim=1, keepdim=True)
x = torch.cat([avg_out, max_out], dim=1)
x = self.conv1(x)
return self.sigmoid(x)
def get_inputs():
return [torch.rand([4, 2, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 64 % 2
x0 = xindex % 64
x2 = xindex // 128
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 128 * x2), tmp4 & xmask, eviction_policy
='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (64 + x0 + 128 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = 2.0
tmp9 = tmp7 / tmp8
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp15 = tl.load(in_ptr0 + (x0 + 128 * x2), tmp12 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tl.load(in_ptr0 + (64 + x0 + 128 * x2), tmp12 & xmask,
eviction_policy='evict_last', other=0.0)
tmp17 = triton_helpers.maximum(tmp15, tmp16)
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp12, tmp17, tmp18)
tmp20 = tl.where(tmp4, tmp11, tmp19)
tl.store(out_ptr0 + x3, tmp20, xmask)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 2, 64), (128, 64, 1))
assert_size_stride(primals_2, (1, 2, 7), (14, 7, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 64), (128, 64, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_1, buf0, 512, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,),
padding=(3,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 64), (64, 64, 1))
buf2 = buf1
del buf1
triton_poi_fused_sigmoid_1[grid(256)](buf2, 256, XBLOCK=128,
num_warps=4, num_stages=1)
return buf2, primals_2, buf0, buf2
class SpatialAttentionNew(nn.Module):
def __init__(self, kernel_size=7):
super(SpatialAttentionNew, self).__init__()
self.conv1 = nn.Conv1d(2, 1, kernel_size, padding=kernel_size // 2,
bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
Ming-er/NeuralNILM_Pytorch
|
SpatialAttention
| false
| 8,572
|
[
"MIT"
] | 22
|
90123a3cf7d8dedc7f513ff784a45f178aa10a9d
|
https://github.com/Ming-er/NeuralNILM_Pytorch/tree/90123a3cf7d8dedc7f513ff784a45f178aa10a9d
|
weightedLoss
|
import torch
from torch import nn
class weightedLoss(nn.Module):
def __init__(self):
super().__init__()
self.thresholds = [0.5, 2, 5, 10, 30]
self.weights = [1, 1, 2, 5, 10, 30]
def forward(self, pred, label):
weights = torch.ones_like(pred) * 3
for i, threshold in enumerate(self.thresholds):
weights = weights + (self.weights[i + 1] - self.weights[i]) * (
label >= threshold).float()
mse = torch.sum(weights * (pred - label) ** 2, (1, 3, 4))
mae = torch.sum(weights * torch.abs(pred - label), (1, 3, 4))
return (torch.mean(mse) + torch.mean(mae)) * 5e-06
def get_inputs():
return [torch.rand([4, 4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy_add_ge_mul_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 256
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 >= tmp1
tmp3 = tmp2.to(tl.float32)
tmp4 = 0.0
tmp5 = tmp3 * tmp4
tmp6 = 3.0
tmp7 = tmp6 + tmp5
tmp8 = 2.0
tmp9 = tmp0 >= tmp8
tmp10 = tmp9.to(tl.float32)
tmp11 = 1.0
tmp12 = tmp10 * tmp11
tmp13 = tmp7 + tmp12
tmp14 = 5.0
tmp15 = tmp0 >= tmp14
tmp16 = tmp15.to(tl.float32)
tmp17 = tmp16 * tmp6
tmp18 = tmp13 + tmp17
tmp19 = 10.0
tmp20 = tmp0 >= tmp19
tmp21 = tmp20.to(tl.float32)
tmp22 = tmp21 * tmp14
tmp23 = tmp18 + tmp22
tmp24 = 30.0
tmp25 = tmp0 >= tmp24
tmp26 = tmp25.to(tl.float32)
tmp27 = 20.0
tmp28 = tmp26 * tmp27
tmp29 = tmp23 + tmp28
tl.store(out_ptr0 + x2, tmp29, xmask)
@triton.jit
def triton_per_fused_abs_mul_pow_sub_sum_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex % 16
r3 = rindex // 16
x0 = xindex % 4
x1 = xindex // 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r2 + 16 * x0 + 64 * r3 + 256 * x1), xmask,
other=0.0)
tmp1 = tl.load(in_ptr1 + (r2 + 16 * x0 + 64 * r3 + 256 * x1), xmask,
other=0.0)
tmp2 = tl.load(in_ptr2 + (r2 + 16 * x0 + 64 * r3), xmask,
eviction_policy='evict_last', other=0.0)
tmp3 = tmp1 - tmp2
tmp4 = tmp3 * tmp3
tmp5 = tmp0 * tmp4
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl_math.abs(tmp3)
tmp11 = tmp0 * tmp10
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.where(xmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tl.store(out_ptr0 + x4, tmp9, xmask)
tl.store(out_ptr1 + x4, tmp15, xmask)
@triton.jit
def triton_per_fused_add_mean_mul_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp4 = tl.load(in_ptr1 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.sum(tmp1, 1)[:, None]
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp7 = tl.sum(tmp5, 1)[:, None]
tmp8 = 16.0
tmp9 = tmp3 / tmp8
tmp10 = tmp7 / tmp8
tmp11 = tmp9 + tmp10
tmp12 = 5e-06
tmp13 = tmp11 * tmp12
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp13, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused__to_copy_add_ge_mul_0[grid(1024)](arg1_1, buf0,
1024, XBLOCK=256, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_per_fused_abs_mul_pow_sub_sum_1[grid(16)](buf0, arg0_1,
arg1_1, buf1, buf3, 16, 64, XBLOCK=8, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
del buf0
buf2 = empty_strided_cuda((), (), torch.float32)
buf5 = buf2
del buf2
triton_per_fused_add_mean_mul_2[grid(1)](buf5, buf1, buf3, 1, 16,
XBLOCK=1, num_warps=2, num_stages=1)
del buf1
del buf3
return buf5,
class weightedLossNew(nn.Module):
def __init__(self):
super().__init__()
self.thresholds = [0.5, 2, 5, 10, 30]
self.weights = [1, 1, 2, 5, 10, 30]
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Mikubill/GAN-ConvLSTM
|
weightedLoss
| false
| 8,573
|
[
"MIT"
] | 16
|
943525f62a3ab462a625c72534b3188cd583d839
|
https://github.com/Mikubill/GAN-ConvLSTM/tree/943525f62a3ab462a625c72534b3188cd583d839
|
Scaled_Dot_Product_Attention
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Scaled_Dot_Product_Attention(nn.Module):
"""Scaled Dot-Product Attention """
def __init__(self):
super(Scaled_Dot_Product_Attention, self).__init__()
def forward(self, Q, K, V, scale=None):
"""
Args:
Q: [batch_size, len_Q, dim_Q]
K: [batch_size, len_K, dim_K]
V: [batch_size, len_V, dim_V]
scale: 缩放因子 论文为根号dim_K
Return:
self-attention后的张量,以及attention张量
"""
attention = torch.matmul(Q, K.permute(0, 2, 1))
if scale:
attention = attention * scale
attention = F.softmax(attention, dim=-1)
context = torch.matmul(attention, V)
return context
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), (
16, 1, 4), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf0, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf2 = buf0
del buf0
triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = buf1
del buf1
extern_kernels.bmm(buf2, arg2_1, out=buf3)
del arg2_1
del buf2
return buf3,
class Scaled_Dot_Product_AttentionNew(nn.Module):
"""Scaled Dot-Product Attention """
def __init__(self):
super(Scaled_Dot_Product_AttentionNew, self).__init__()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
NTDXYG/Text-Classify-based-pytorch
|
Scaled_Dot_Product_Attention
| false
| 8,574
|
[
"Apache-2.0"
] | 20
|
b12a264a0ea64b2f8b46fafd5383ef0a8025ef2f
|
https://github.com/NTDXYG/Text-Classify-based-pytorch/tree/b12a264a0ea64b2f8b46fafd5383ef0a8025ef2f
|
ResBlock
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class ResBlock(nn.Module):
def __init__(self, in_channel, out_channel, ker_size, stri, pad):
super(ResBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channel, out_channel, 3, 1, 1)
self.conv2 = nn.Conv2d(out_channel, out_channel, 3, 1, 1)
def forward(self, x):
return self.conv2(F.relu(self.conv1(x)))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channel': 4, 'out_channel': 4, 'ker_size': 4, 'stri':
4, 'pad': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(256)](buf1, primals_2, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_1[grid(256)](buf3, primals_5, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
return buf3, primals_1, primals_3, primals_4, buf1
class ResBlockNew(nn.Module):
def __init__(self, in_channel, out_channel, ker_size, stri, pad):
super(ResBlockNew, self).__init__()
self.conv1 = nn.Conv2d(in_channel, out_channel, 3, 1, 1)
self.conv2 = nn.Conv2d(out_channel, out_channel, 3, 1, 1)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
NJUVISION/AWnet
|
ResBlock
| false
| 8,575
|
[
"MIT"
] | 16
|
f47a1692819a778b513b882d36ed727f7732d37b
|
https://github.com/NJUVISION/AWnet/tree/f47a1692819a778b513b882d36ed727f7732d37b
|
AdaptiveInstanceNorm_H
|
import torch
from torch import nn
import torch.utils.data
import torch.utils.data.distributed
class AdaptiveInstanceNorm_H(nn.Module):
def __init__(self, in_channel, map_size):
super().__init__()
self.norm = nn.LayerNorm([map_size, map_size])
self.weight = nn.Parameter(1000.0 + torch.randn(1, in_channel, 1, 1))
self.beta = nn.Parameter(0.0 + torch.randn(1, in_channel, 1, 1))
def forward(self, input, style=0):
out = self.norm(input)
out = 0.01 * out + out.detach() * self.weight + self.beta
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channel': 4, 'map_size': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_mul_native_layer_norm_0(in_out_ptr0, in_out_ptr1,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + r1, None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + r1, None, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 16.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tmp22 = tmp0 - tmp10
tmp23 = tmp22 * tmp21
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = 0.01
tmp29 = tmp27 * tmp28
tmp31 = tmp27 * tmp30
tmp32 = tmp29 + tmp31
tmp34 = tmp32 + tmp33
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp21, xmask)
tl.store(in_out_ptr1 + (r1 + 16 * x0), tmp34, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf3 = reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf1
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf5 = buf4
del buf4
get_raw_stream(0)
triton_per_fused_add_mul_native_layer_norm_0[grid(16)](buf3, buf5,
primals_3, primals_1, primals_2, primals_4, primals_5, buf0, 16,
16, XBLOCK=8, num_warps=2, num_stages=1)
del primals_4
del primals_5
return buf5, primals_1, primals_2, primals_3, buf0, buf3
class AdaptiveInstanceNorm_HNew(nn.Module):
def __init__(self, in_channel, map_size):
super().__init__()
self.norm = nn.LayerNorm([map_size, map_size])
self.weight = nn.Parameter(1000.0 + torch.randn(1, in_channel, 1, 1))
self.beta = nn.Parameter(0.0 + torch.randn(1, in_channel, 1, 1))
def forward(self, input_0):
primals_4 = self.weight
primals_5 = self.beta
primals_1 = self.norm.weight
primals_2 = self.norm.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
MiaoyunZhao/GANTransferLimitedData
|
AdaptiveInstanceNorm_H
| false
| 8,576
|
[
"MIT"
] | 41
|
5545bc37a1d7d4f28a9c3588aaa12a616bbddd88
|
https://github.com/MiaoyunZhao/GANTransferLimitedData/tree/5545bc37a1d7d4f28a9c3588aaa12a616bbddd88
|
Position_wise_Feed_Forward
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Position_wise_Feed_Forward(nn.Module):
def __init__(self, dim_model, hidden, dropout=0.0):
super(Position_wise_Feed_Forward, self).__init__()
self.fc1 = nn.Linear(dim_model, hidden)
self.fc2 = nn.Linear(hidden, dim_model)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(dim_model)
def forward(self, x):
out = self.fc1(x)
out = F.relu(out)
out = self.fc2(out)
out = self.dropout(out)
out = out + x
out = self.layer_norm(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim_model': 4, 'hidden': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused_add_native_layer_norm_1[grid(64)](buf2, primals_3,
buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_2[grid(256)](buf2, primals_3,
buf3, buf4, primals_6, primals_7, buf5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf3
del buf4
del primals_7
return buf5, primals_3, primals_6, reinterpret_tensor(buf1, (64, 4), (4,
1), 0), buf2, primals_4, buf6
class Position_wise_Feed_ForwardNew(nn.Module):
def __init__(self, dim_model, hidden, dropout=0.0):
super(Position_wise_Feed_ForwardNew, self).__init__()
self.fc1 = nn.Linear(dim_model, hidden)
self.fc2 = nn.Linear(hidden, dim_model)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(dim_model)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.layer_norm.weight
primals_7 = self.layer_norm.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
NTDXYG/Text-Classify-based-pytorch
|
Position_wise_Feed_Forward
| false
| 8,577
|
[
"Apache-2.0"
] | 20
|
b12a264a0ea64b2f8b46fafd5383ef0a8025ef2f
|
https://github.com/NTDXYG/Text-Classify-based-pytorch/tree/b12a264a0ea64b2f8b46fafd5383ef0a8025ef2f
|
CopyChannels
|
import torch
class CopyChannels(torch.nn.Module):
def __init__(self, multiple=3, dim=1):
super(CopyChannels, self).__init__()
self.multiple = multiple
self.dim = dim
def forward(self, x):
return torch.cat([x for _ in range(self.multiple)], dim=self.dim)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x2 = xindex // 192
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 4, 4, 4), (192, 64, 16, 4, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(768)](arg0_1, buf0, 768, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 12, 4, 4), (192, 16, 4, 1), 0),
class CopyChannelsNew(torch.nn.Module):
def __init__(self, multiple=3, dim=1):
super(CopyChannelsNew, self).__init__()
self.multiple = multiple
self.dim = dim
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
NehzUx/autodl
|
CopyChannels
| false
| 8,578
|
[
"Apache-2.0"
] | 25
|
c80fdc4b297ed1ec2b9e6911d313f1fe31d83cb9
|
https://github.com/NehzUx/autodl/tree/c80fdc4b297ed1ec2b9e6911d313f1fe31d83cb9
|
BBoxTransform
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
class BBoxTransform(nn.Module):
def forward(self, anchors, regression):
"""
decode_box_outputs adapted from https://github.com/google/automl/blob/master/efficientdet/anchors.py
Args:
anchors: [batchsize, boxes, (y1, x1, y2, x2)]
regression: [batchsize, boxes, (dy, dx, dh, dw)]
Returns:
"""
y_centers_a = (anchors[..., 0] + anchors[..., 2]) / 2
x_centers_a = (anchors[..., 1] + anchors[..., 3]) / 2
ha = anchors[..., 2] - anchors[..., 0]
wa = anchors[..., 3] - anchors[..., 1]
w = regression[..., 3].exp() * wa
h = regression[..., 2].exp() * ha
y_centers = regression[..., 0] * ha + y_centers_a
x_centers = regression[..., 1] * wa + x_centers_a
ymin = y_centers - h / 2.0
xmin = x_centers - w / 2.0
ymax = y_centers + h / 2.0
xmax = x_centers + w / 2.0
return torch.stack([xmin, ymin, xmax, ymax], dim=2)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_stack_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * x1), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (3 + 4 * x0 + 16 * x1), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0 + 16 * x1), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 - tmp7
tmp9 = tmp5 * tmp8
tmp10 = tmp7 + tmp6
tmp11 = 0.5
tmp12 = tmp10 * tmp11
tmp13 = tmp9 + tmp12
tmp14 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * x1), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp15 = tl_math.exp(tmp14)
tmp16 = tmp15 * tmp8
tmp17 = tmp16 * tmp11
tmp18 = tmp13 - tmp17
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp4, tmp18, tmp19)
tmp21 = tmp0 >= tmp3
tmp22 = tl.full([1], 8, tl.int64)
tmp23 = tmp0 < tmp22
tmp24 = tmp21 & tmp23
tmp25 = tl.load(in_ptr0 + (4 * (-4 + x0) + 16 * x1), tmp24 & xmask,
eviction_policy='evict_last', other=0.0)
tmp26 = tl.load(in_ptr1 + (2 + 4 * (-4 + x0) + 16 * x1), tmp24 & xmask,
eviction_policy='evict_last', other=0.0)
tmp27 = tl.load(in_ptr1 + (4 * (-4 + x0) + 16 * x1), tmp24 & xmask,
eviction_policy='evict_last', other=0.0)
tmp28 = tmp26 - tmp27
tmp29 = tmp25 * tmp28
tmp30 = tmp27 + tmp26
tmp31 = tmp30 * tmp11
tmp32 = tmp29 + tmp31
tmp33 = tl.load(in_ptr0 + (2 + 4 * (-4 + x0) + 16 * x1), tmp24 & xmask,
eviction_policy='evict_last', other=0.0)
tmp34 = tl_math.exp(tmp33)
tmp35 = tmp34 * tmp28
tmp36 = tmp35 * tmp11
tmp37 = tmp32 - tmp36
tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype)
tmp39 = tl.where(tmp24, tmp37, tmp38)
tmp40 = tmp0 >= tmp22
tmp41 = tl.full([1], 12, tl.int64)
tmp42 = tmp0 < tmp41
tmp43 = tmp40 & tmp42
tmp44 = tl.load(in_ptr0 + (1 + 4 * (-8 + x0) + 16 * x1), tmp43 & xmask,
eviction_policy='evict_last', other=0.0)
tmp45 = tl.load(in_ptr1 + (3 + 4 * (-8 + x0) + 16 * x1), tmp43 & xmask,
eviction_policy='evict_last', other=0.0)
tmp46 = tl.load(in_ptr1 + (1 + 4 * (-8 + x0) + 16 * x1), tmp43 & xmask,
eviction_policy='evict_last', other=0.0)
tmp47 = tmp45 - tmp46
tmp48 = tmp44 * tmp47
tmp49 = tmp46 + tmp45
tmp50 = tmp49 * tmp11
tmp51 = tmp48 + tmp50
tmp52 = tl.load(in_ptr0 + (3 + 4 * (-8 + x0) + 16 * x1), tmp43 & xmask,
eviction_policy='evict_last', other=0.0)
tmp53 = tl_math.exp(tmp52)
tmp54 = tmp53 * tmp47
tmp55 = tmp54 * tmp11
tmp56 = tmp51 + tmp55
tmp57 = tl.full(tmp56.shape, 0.0, tmp56.dtype)
tmp58 = tl.where(tmp43, tmp56, tmp57)
tmp59 = tmp0 >= tmp41
tl.full([1], 16, tl.int64)
tmp62 = tl.load(in_ptr0 + (4 * (-12 + x0) + 16 * x1), tmp59 & xmask,
eviction_policy='evict_last', other=0.0)
tmp63 = tl.load(in_ptr1 + (2 + 4 * (-12 + x0) + 16 * x1), tmp59 & xmask,
eviction_policy='evict_last', other=0.0)
tmp64 = tl.load(in_ptr1 + (4 * (-12 + x0) + 16 * x1), tmp59 & xmask,
eviction_policy='evict_last', other=0.0)
tmp65 = tmp63 - tmp64
tmp66 = tmp62 * tmp65
tmp67 = tmp64 + tmp63
tmp68 = tmp67 * tmp11
tmp69 = tmp66 + tmp68
tmp70 = tl.load(in_ptr0 + (2 + 4 * (-12 + x0) + 16 * x1), tmp59 & xmask,
eviction_policy='evict_last', other=0.0)
tmp71 = tl_math.exp(tmp70)
tmp72 = tmp71 * tmp65
tmp73 = tmp72 * tmp11
tmp74 = tmp69 + tmp73
tmp75 = tl.full(tmp74.shape, 0.0, tmp74.dtype)
tmp76 = tl.where(tmp59, tmp74, tmp75)
tmp77 = tl.where(tmp43, tmp58, tmp76)
tmp78 = tl.where(tmp24, tmp39, tmp77)
tmp79 = tl.where(tmp4, tmp20, tmp78)
tl.store(out_ptr0 + x2, tmp79, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_stack_0[grid(256)](arg1_1, arg0_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0),
class BBoxTransformNew(nn.Module):
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
NHERI-SimCenter/BRAILS
|
BBoxTransform
| false
| 8,579
|
[
"BSD-3-Clause"
] | 22
|
ec17bcd000b15cb8c2933728fe2fd1fb190cd852
|
https://github.com/NHERI-SimCenter/BRAILS/tree/ec17bcd000b15cb8c2933728fe2fd1fb190cd852
|
BinaryCrossEntropyLabelSmooth
|
import torch
class BinaryCrossEntropyLabelSmooth(torch.nn.BCEWithLogitsLoss):
def __init__(self, num_classes, epsilon=0.1, weight=None, size_average=
None, reduce=None, reduction='mean', pos_weight=None):
super(BinaryCrossEntropyLabelSmooth, self).__init__(weight,
size_average, reduce, reduction, pos_weight)
self.num_classes = num_classes
self.epsilon = epsilon
def forward(self, input, target):
target = (1 - self.epsilon) * target + self.epsilon
return super(BinaryCrossEntropyLabelSmooth, self).forward(input, target
)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_classes': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_binary_cross_entropy_with_logits_mul_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp7 = tl.load(in_ptr1 + r0, None)
tmp1 = 0.9
tmp2 = tmp0 * tmp1
tmp3 = 0.1
tmp4 = tmp2 + tmp3
tmp5 = 1.0
tmp6 = tmp5 - tmp4
tmp8 = tmp6 * tmp7
tmp9 = 0.0
tmp10 = triton_helpers.minimum(tmp9, tmp7)
tmp11 = tl_math.abs(tmp7)
tmp12 = -tmp11
tmp13 = tl_math.exp(tmp12)
tmp14 = libdevice.log1p(tmp13)
tmp15 = tmp10 - tmp14
tmp16 = tmp8 - tmp15
tmp17 = tl.broadcast_to(tmp16, [RBLOCK])
tmp19 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0))
tmp20 = 256.0
tmp21 = tmp19 / tmp20
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp21, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_binary_cross_entropy_with_logits_mul_0[grid(1)](
buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class BinaryCrossEntropyLabelSmoothNew(torch.nn.BCEWithLogitsLoss):
def __init__(self, num_classes, epsilon=0.1, weight=None, size_average=
None, reduce=None, reduction='mean', pos_weight=None):
super(BinaryCrossEntropyLabelSmoothNew, self).__init__(weight,
size_average, reduce, reduction, pos_weight)
self.num_classes = num_classes
self.epsilon = epsilon
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
NehzUx/autodl
|
BinaryCrossEntropyLabelSmooth
| false
| 8,580
|
[
"Apache-2.0"
] | 25
|
c80fdc4b297ed1ec2b9e6911d313f1fe31d83cb9
|
https://github.com/NehzUx/autodl/tree/c80fdc4b297ed1ec2b9e6911d313f1fe31d83cb9
|
Conv2dStaticSamePadding
|
import math
import torch
from torch import nn
from torch.nn import functional as F
from torchvision.transforms import functional as F
class Conv2dStaticSamePadding(nn.Module):
"""
created by Zylo117
The real keras/tensorflow conv2d with same padding
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
bias=True, groups=1, dilation=1, **kwargs):
super().__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride=stride, bias=bias, groups=groups)
self.stride = self.conv.stride
self.kernel_size = self.conv.kernel_size
self.dilation = self.conv.dilation
if isinstance(self.stride, int):
self.stride = [self.stride] * 2
elif len(self.stride) == 1:
self.stride = [self.stride[0]] * 2
if isinstance(self.kernel_size, int):
self.kernel_size = [self.kernel_size] * 2
elif len(self.kernel_size) == 1:
self.kernel_size = [self.kernel_size[0]] * 2
def forward(self, x):
h, w = x.shape[-2:]
extra_h = (math.ceil(w / self.stride[1]) - 1) * self.stride[1
] - w + self.kernel_size[1]
extra_v = (math.ceil(h / self.stride[0]) - 1) * self.stride[0
] - h + self.kernel_size[0]
left = extra_h // 2
right = extra_h - left
top = extra_v // 2
bottom = extra_v - top
x = F.pad(x, [left, right, top, bottom])
x = self.conv(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 6 % 8
x0 = xindex % 6
x2 = xindex // 48
x4 = xindex
tmp0 = -2 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-9 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask,
other=0.0)
tl.store(out_ptr0 + x4, tmp11, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 240
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 15 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 6), (192, 48, 6, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(768)](primals_1, buf0, 768,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 5, 3), (60, 15, 3, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(240)](buf2, primals_3, 240,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
return buf2, primals_2, buf0
class Conv2dStaticSamePaddingNew(nn.Module):
"""
created by Zylo117
The real keras/tensorflow conv2d with same padding
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
bias=True, groups=1, dilation=1, **kwargs):
super().__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride=stride, bias=bias, groups=groups)
self.stride = self.conv.stride
self.kernel_size = self.conv.kernel_size
self.dilation = self.conv.dilation
if isinstance(self.stride, int):
self.stride = [self.stride] * 2
elif len(self.stride) == 1:
self.stride = [self.stride[0]] * 2
if isinstance(self.kernel_size, int):
self.kernel_size = [self.kernel_size] * 2
elif len(self.kernel_size) == 1:
self.kernel_size = [self.kernel_size[0]] * 2
def forward(self, input_0):
primals_1 = self.conv.weight
primals_3 = self.conv.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
NaCl-Ocean/Anchor_free_detection_rotation
|
Conv2dStaticSamePadding
| false
| 8,581
|
[
"MIT"
] | 12
|
358d9f5df1beabc7a05a352d2cfa2283b17825a9
|
https://github.com/NaCl-Ocean/Anchor_free_detection_rotation/tree/358d9f5df1beabc7a05a352d2cfa2283b17825a9
|
TestTimeIN
|
import torch
import torch.nn as nn
import torch.optim
import torch.nn.parallel
import torch.utils.data
import torch.utils.data.distributed
class TestTimeIN(nn.BatchNorm2d):
def __init__(self, num_features: 'int', eps: 'float'=1e-05, momentum:
'float'=1, affine: 'bool'=True, track_running_stats: 'bool'=True):
super().__init__(num_features, eps, momentum, affine,
track_running_stats)
def forward(self, target_input):
target_input.numel() / target_input.size(1)
with torch.no_grad():
target_instance_var = target_input.var([2, 3], unbiased=False)[
:, :, None, None]
target_instance_mean = target_input.mean([2, 3])[:, :, None, None]
weight = self.weight[None, :, None, None]
bias = self.bias[None, :, None, None]
target_input = weight * (target_input - target_instance_mean
) / torch.sqrt(target_instance_var + self.eps) + bias
target_input = torch.clamp(target_input, max=1)
return target_input
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_features': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.optim
import torch.nn.parallel
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_clamp_div_mean_mul_sqrt_sub_var_0(in_ptr0, in_ptr1,
in_ptr2, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp19 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp1 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp20 = 16.0
tmp21 = tmp4 / tmp20
tmp22 = tmp0 - tmp21
tmp23 = tmp19 * tmp22
tmp24 = tmp18 / tmp20
tmp25 = 1e-05
tmp26 = tmp24 + tmp25
tmp27 = libdevice.sqrt(tmp26)
tmp28 = tmp23 / tmp27
tmp30 = tmp28 + tmp29
tmp31 = 1.0
tmp32 = triton_helpers.minimum(tmp30, tmp31)
tl.store(out_ptr2 + (r1 + 16 * x0), tmp32, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4,), (1,))
assert_size_stride(arg2_1, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_add_clamp_div_mean_mul_sqrt_sub_var_0[grid(16)](arg0_1
, arg1_1, arg2_1, buf4, 16, 16, XBLOCK=8, num_warps=2, num_stages=1
)
del arg0_1
del arg1_1
del arg2_1
return buf4,
class TestTimeINNew(nn.BatchNorm2d):
def __init__(self, num_features: 'int', eps: 'float'=1e-05, momentum:
'float'=1, affine: 'bool'=True, track_running_stats: 'bool'=True):
super().__init__(num_features, eps, momentum, affine,
track_running_stats)
def forward(self, input_0):
arg1_1 = self.weight
arg2_1 = self.bias
arg0_1 = input_0
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
MosyMosy/Pytorch_ImaneNet_With_wandb
|
TestTimeIN
| false
| 8,582
|
[
"MIT"
] | 30
|
b7b6e245e29ec342212025b8164e5053d4197fa1
|
https://github.com/MosyMosy/Pytorch_ImaneNet_With_wandb/tree/b7b6e245e29ec342212025b8164e5053d4197fa1
|
MaxNormConstraintConv2d
|
import torch
import torch.nn as nn
class MaxNormConstraintConv2d(nn.Conv2d):
def __init__(self, *args, max_norm_value=1, norm_axis=2, **kwargs):
self.max_norm_value = max_norm_value
self.norm_axis = norm_axis
super().__init__(*args, **kwargs)
def forward(self, input):
self.weight.data = self._max_norm(self.weight.data)
return super().forward(input)
def _max_norm(self, w):
with torch.no_grad():
norms = torch.sqrt(torch.sum(torch.square(w), dim=self.
norm_axis, keepdim=True))
desired = torch.clamp(norms, 0, self.max_norm_value)
w *= desired / norms
return w
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clamp_div_mul_pow_sqrt_sum_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 0.0
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = 1.0
tmp16 = triton_helpers.minimum(tmp14, tmp15)
tmp17 = tmp16 / tmp12
tmp18 = tmp0 * tmp17
tl.store(out_ptr0 + x3, tmp18, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_div_mul_pow_sqrt_sum_0[grid(256)](primals_1,
buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(primals_3, buf0, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1))
buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
triton_poi_fused_convolution_1[grid(16)](buf1, primals_2, buf2, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
buf3 = torch.ops.aten.set_.source_Tensor(primals_1, buf0)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
del buf1
del primals_1
return buf2, primals_3, buf0
class MaxNormConstraintConv2dNew(nn.Conv2d):
def __init__(self, *args, max_norm_value=1, norm_axis=2, **kwargs):
self.max_norm_value = max_norm_value
self.norm_axis = norm_axis
super().__init__(*args, **kwargs)
def _max_norm(self, w):
with torch.no_grad():
norms = torch.sqrt(torch.sum(torch.square(w), dim=self.
norm_axis, keepdim=True))
desired = torch.clamp(norms, 0, self.max_norm_value)
w *= desired / norms
return w
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Mrswolf/brainda
|
MaxNormConstraintConv2d
| false
| 8,583
|
[
"MIT"
] | 24
|
cbd2fa6334d9e6243324dbaf086be4eb4047e801
|
https://github.com/Mrswolf/brainda/tree/cbd2fa6334d9e6243324dbaf086be4eb4047e801
|
FeedForwardBlock
|
from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
import torch.utils
import torch.nn.functional as F
class PositionwiseFeedForward(nn.Module):
def __init__(self, config):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(config.d_model, config.d_ff)
self.w_2 = nn.Linear(config.d_ff, config.d_model)
self.dropout = nn.Dropout(p=config.dropout)
def forward(self, x):
return self.w_2(self.dropout(F.relu(self.w_1(x))))
class FeedForwardBlock(nn.Module):
def __init__(self, config):
super(FeedForwardBlock, self).__init__()
self.norm = nn.LayerNorm(config.d_model)
self.feed_forward = PositionwiseFeedForward(config)
self.dropout = nn.Dropout(p=config.dropout)
def forward(self, x):
x_ = self.norm(x)
x_ = self.feed_forward(x_)
return self.dropout(x_) + x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(d_model=4, d_ff=4, dropout=0.5)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(64)](primals_3, buf0,
buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(256)](primals_3, buf0,
buf1, primals_1, primals_2, buf2, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del buf0
del buf1
del primals_1
del primals_2
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf3
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(256)](buf4,
primals_5, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf4, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf5)
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused_add_3[grid(256)](buf6, primals_7, primals_3, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
return buf6, primals_3, reinterpret_tensor(buf2, (64, 4), (4, 1), 0
), reinterpret_tensor(buf4, (64, 4), (4, 1), 0
), primals_6, buf7, primals_4
class PositionwiseFeedForward(nn.Module):
def __init__(self, config):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(config.d_model, config.d_ff)
self.w_2 = nn.Linear(config.d_ff, config.d_model)
self.dropout = nn.Dropout(p=config.dropout)
def forward(self, x):
return self.w_2(self.dropout(F.relu(self.w_1(x))))
class FeedForwardBlockNew(nn.Module):
def __init__(self, config):
super(FeedForwardBlockNew, self).__init__()
self.norm = nn.LayerNorm(config.d_model)
self.feed_forward = PositionwiseFeedForward(config)
self.dropout = nn.Dropout(p=config.dropout)
def forward(self, input_0):
primals_1 = self.norm.weight
primals_2 = self.norm.bias
primals_4 = self.feed_forward.w_1.weight
primals_5 = self.feed_forward.w_1.bias
primals_6 = self.feed_forward.w_2.weight
primals_7 = self.feed_forward.w_2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
MSU-MLSys-Lab/CATE
|
FeedForwardBlock
| false
| 8,584
|
[
"Apache-2.0"
] | 15
|
654c393d7df888d2c3f3b90f9e6752faa061157e
|
https://github.com/MSU-MLSys-Lab/CATE/tree/654c393d7df888d2c3f3b90f9e6752faa061157e
|
SmoothL1loss_with_weight
|
import torch
from torch import nn
class SmoothL1loss_with_weight(nn.Module):
def __init__(self):
super(SmoothL1loss_with_weight, self).__init__()
def forward(self, pred, targets, weights):
assert pred.shape[0] == targets.shape[0] == weights.shape[0]
loss = nn.SmoothL1Loss(reduction='none')(pred, targets)
loss = loss.sum(dim=-1) * weights
loss = loss.sum()
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_smooth_l1_loss_sum_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp23 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp24 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp34 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp35 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = 1.0
tmp5 = tmp3 < tmp4
tmp6 = tmp3 * tmp3
tmp7 = 0.5
tmp8 = tmp6 * tmp7
tmp9 = tmp8 * tmp4
tmp10 = tmp3 - tmp7
tmp11 = tl.where(tmp5, tmp9, tmp10)
tmp14 = tmp12 - tmp13
tmp15 = tl_math.abs(tmp14)
tmp16 = tmp15 < tmp4
tmp17 = tmp15 * tmp15
tmp18 = tmp17 * tmp7
tmp19 = tmp18 * tmp4
tmp20 = tmp15 - tmp7
tmp21 = tl.where(tmp16, tmp19, tmp20)
tmp22 = tmp11 + tmp21
tmp25 = tmp23 - tmp24
tmp26 = tl_math.abs(tmp25)
tmp27 = tmp26 < tmp4
tmp28 = tmp26 * tmp26
tmp29 = tmp28 * tmp7
tmp30 = tmp29 * tmp4
tmp31 = tmp26 - tmp7
tmp32 = tl.where(tmp27, tmp30, tmp31)
tmp33 = tmp22 + tmp32
tmp36 = tmp34 - tmp35
tmp37 = tl_math.abs(tmp36)
tmp38 = tmp37 < tmp4
tmp39 = tmp37 * tmp37
tmp40 = tmp39 * tmp7
tmp41 = tmp40 * tmp4
tmp42 = tmp37 - tmp7
tmp43 = tl.where(tmp38, tmp41, tmp42)
tmp44 = tmp33 + tmp43
tl.store(out_ptr0 + x0, tmp44, xmask)
@triton.jit
def triton_per_fused_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex % 64
r2 = rindex
tmp0 = tl.load(in_ptr0 + r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + r2, None)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp5, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_smooth_l1_loss_sum_0[grid(64)](arg0_1, arg1_1,
buf0, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
triton_per_fused_mul_sum_1[grid(1)](buf0, arg2_1, buf1, 1, 256,
num_warps=2, num_stages=1)
del arg2_1
del buf0
return buf1,
class SmoothL1loss_with_weightNew(nn.Module):
def __init__(self):
super(SmoothL1loss_with_weightNew, self).__init__()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
NaCl-Ocean/Anchor_free_detection_rotation
|
SmoothL1loss_with_weight
| false
| 8,585
|
[
"MIT"
] | 12
|
358d9f5df1beabc7a05a352d2cfa2283b17825a9
|
https://github.com/NaCl-Ocean/Anchor_free_detection_rotation/tree/358d9f5df1beabc7a05a352d2cfa2283b17825a9
|
SoftHistogram
|
import torch
class SoftHistogram(torch.nn.Module):
"""
Motivated by https://discuss.pytorch.org/t/differentiable-torch-histc/25865/3
"""
def __init__(self, bins, min_bin_edge, max_bin_edge, sigma):
super(SoftHistogram, self).__init__()
self.sigma = sigma
self.delta = float(max_bin_edge - min_bin_edge) / float(bins)
self.centers = float(min_bin_edge) + self.delta * (torch.arange(
bins).float() + 0.5)
self.centers = torch.nn.Parameter(self.centers, requires_grad=False)
def forward(self, x):
x = torch.unsqueeze(x, 0) - torch.unsqueeze(self.centers, 1)
x = torch.sigmoid(self.sigma * (x + self.delta / 2)) - torch.sigmoid(
self.sigma * (x - self.delta / 2))
x = x.sum(dim=1)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'bins': 4, 'min_bin_edge': 4, 'max_bin_edge': 4, 'sigma': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_sigmoid_sub_sum_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (64 + x3), xmask)
tmp22 = tl.load(in_ptr0 + (128 + x3), xmask)
tmp32 = tl.load(in_ptr0 + (192 + x3), xmask)
tmp2 = tmp0 - tmp1
tmp3 = 0.0
tmp4 = tmp2 + tmp3
tmp5 = 4.0
tmp6 = tmp4 * tmp5
tmp7 = tl.sigmoid(tmp6)
tmp8 = tmp2 - tmp3
tmp9 = tmp8 * tmp5
tmp10 = tl.sigmoid(tmp9)
tmp11 = tmp7 - tmp10
tmp13 = tmp12 - tmp1
tmp14 = tmp13 + tmp3
tmp15 = tmp14 * tmp5
tmp16 = tl.sigmoid(tmp15)
tmp17 = tmp13 - tmp3
tmp18 = tmp17 * tmp5
tmp19 = tl.sigmoid(tmp18)
tmp20 = tmp16 - tmp19
tmp21 = tmp11 + tmp20
tmp23 = tmp22 - tmp1
tmp24 = tmp23 + tmp3
tmp25 = tmp24 * tmp5
tmp26 = tl.sigmoid(tmp25)
tmp27 = tmp23 - tmp3
tmp28 = tmp27 * tmp5
tmp29 = tl.sigmoid(tmp28)
tmp30 = tmp26 - tmp29
tmp31 = tmp21 + tmp30
tmp33 = tmp32 - tmp1
tmp34 = tmp33 + tmp3
tmp35 = tmp34 * tmp5
tmp36 = tl.sigmoid(tmp35)
tmp37 = tmp33 - tmp3
tmp38 = tmp37 * tmp5
tmp39 = tl.sigmoid(tmp38)
tmp40 = tmp36 - tmp39
tmp41 = tmp31 + tmp40
tl.store(out_ptr0 + x3, tmp41, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_sigmoid_sub_sum_0[grid(64)](arg0_1, arg1_1,
buf0, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class SoftHistogramNew(torch.nn.Module):
"""
Motivated by https://discuss.pytorch.org/t/differentiable-torch-histc/25865/3
"""
def __init__(self, bins, min_bin_edge, max_bin_edge, sigma):
super(SoftHistogramNew, self).__init__()
self.sigma = sigma
self.delta = float(max_bin_edge - min_bin_edge) / float(bins)
self.centers = float(min_bin_edge) + self.delta * (torch.arange(
bins).float() + 0.5)
self.centers = torch.nn.Parameter(self.centers, requires_grad=False)
def forward(self, input_0):
arg1_1 = self.centers
arg0_1 = input_0
output = call([arg0_1, arg1_1])
return output[0]
|
NiallJeffrey/DeepMass
|
SoftHistogram
| false
| 8,586
|
[
"MIT"
] | 13
|
6bf11bd08082562161a2f91cd40dc57abba12396
|
https://github.com/NiallJeffrey/DeepMass/tree/6bf11bd08082562161a2f91cd40dc57abba12396
|
FocalLoss
|
import torch
import torch.utils.data
import torch
import torch._utils
import torch.nn as nn
class FocalLoss(nn.Module):
def __init__(self, gamma=0, eps=1e-07):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.eps = eps
self.ce = torch.nn.CrossEntropyLoss()
def forward(self, input, target):
logp = self.ce(input, target)
p = torch.exp(-logp)
loss = (1 - p) ** self.gamma * logp
return loss.mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.utils.data
import torch
import torch._utils
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_per_fused__log_softmax_div_exp_mean_mul_neg_pow_rsub_sum_1(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = rindex // 64
tmp0 = tl.load(in_ptr0 + r3, None)
tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr1 + r3, None)
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tmp15 = tmp13 * tmp14
tmp16 = tl.broadcast_to(tmp15, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tmp19 = -tmp18
tmp20 = 0.015625
tmp21 = tmp19 * tmp20
tmp22 = -tmp21
tmp23 = tl_math.exp(tmp22)
tmp24 = 1.0
tmp24 - tmp23
tmp26 = tmp24 * tmp21
tmp27 = tmp26 / tmp24
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp27, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused__log_softmax_div_exp_mean_mul_neg_pow_rsub_sum_1[grid
(1)](buf2, buf0, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del buf0
return buf2,
class FocalLossNew(nn.Module):
def __init__(self, gamma=0, eps=1e-07):
super(FocalLossNew, self).__init__()
self.gamma = gamma
self.eps = eps
self.ce = torch.nn.CrossEntropyLoss()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Mukosame/AODA
|
FocalLoss
| false
| 8,587
|
[
"BSD-3-Clause"
] | 43
|
c187e5ff0a6502a9166da37a213ee259afa60903
|
https://github.com/Mukosame/AODA/tree/c187e5ff0a6502a9166da37a213ee259afa60903
|
ConvEncoder
|
import torch
from torch import nn
import torch.nn.functional as F
class ConvEncoder(nn.Module):
def __init__(self, input_dim=512, output_dim=512, kernel_size=1,
init_scale=1.0, no_weight_init=False):
super(ConvEncoder, self).__init__()
self.conv = nn.Conv1d(input_dim, output_dim, kernel_size=kernel_size)
if not no_weight_init:
for layer in (self.conv,):
nn.init.orthogonal_(layer.weight, init_scale)
with torch.no_grad():
layer.bias.zero_()
def forward(self, x):
_B, _D, _L = x.size()
x = self.conv(x)
x = F.relu(x)
return x.flatten(1)
def get_inputs():
return [torch.rand([4, 512, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 512
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x3, tmp4, None)
tl.store(out_ptr0 + x3, tmp6, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 512, 64), (32768, 64, 1))
assert_size_stride(primals_2, (512, 512, 1), (512, 1, 1))
assert_size_stride(primals_3, (512,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 512, 64), (32768, 64, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 512, 64), (32768, 64, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_convolution_relu_threshold_backward_0[grid(131072)](
buf1, primals_3, buf2, 131072, XBLOCK=512, num_warps=8,
num_stages=1)
del primals_3
return reinterpret_tensor(buf1, (4, 32768), (32768, 1), 0
), primals_1, primals_2, buf2
class ConvEncoderNew(nn.Module):
def __init__(self, input_dim=512, output_dim=512, kernel_size=1,
init_scale=1.0, no_weight_init=False):
super(ConvEncoderNew, self).__init__()
self.conv = nn.Conv1d(input_dim, output_dim, kernel_size=kernel_size)
if not no_weight_init:
for layer in (self.conv,):
nn.init.orthogonal_(layer.weight, init_scale)
with torch.no_grad():
layer.bias.zero_()
def forward(self, input_0):
primals_2 = self.conv.weight
primals_3 = self.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
KH-Kyle/rmp_nav
|
ConvEncoder
| false
| 8,588
|
[
"MIT"
] | 30
|
d598fe70664a4cdc0e9b9dd4b52e84aa3de1b551
|
https://github.com/KH-Kyle/rmp_nav/tree/d598fe70664a4cdc0e9b9dd4b52e84aa3de1b551
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.