entry_point
stringlengths 1
65
| original_triton_python_code
stringlengths 208
619k
| optimised_triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
listlengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
|
|---|---|---|---|---|---|---|---|---|---|---|
Attention
|
import math
import torch
from torch import nn
class Attention(nn.Module):
"""A generic attention module for a decoder in seq2seq"""
def __init__(self, dim, use_tanh=False, C=10):
super(Attention, self).__init__()
self.use_tanh = use_tanh
self.project_query = nn.Linear(dim, dim)
self.project_ref = nn.Conv1d(dim, dim, 1, 1)
self.C = C
self.tanh = nn.Tanh()
self.v = nn.Parameter(torch.FloatTensor(dim))
self.v.data.uniform_(-(1.0 / math.sqrt(dim)), 1.0 / math.sqrt(dim))
def forward(self, query, ref):
"""
Args:
query: is the hidden state of the decoder at the current
time step. batch x dim
ref: the set of hidden states from the encoder.
sourceL x batch x hidden_dim
"""
ref = ref.permute(1, 2, 0)
q = self.project_query(query).unsqueeze(2)
e = self.project_ref(ref)
expanded_q = q.repeat(1, 1, e.size(2))
v_view = self.v.unsqueeze(0).expand(expanded_q.size(0), len(self.v)
).unsqueeze(1)
u = torch.bmm(v_view, self.tanh(expanded_q + e)).squeeze(1)
if self.use_tanh:
logits = self.C * self.tanh(u)
else:
logits = u
return e, logits
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x1), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_convolution_repeat_tanh_1(in_out_ptr0, in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x1 = xindex // 4 % 4
x3 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp3 + tmp2
tmp5 = libdevice.tanh(tmp4)
tl.store(in_out_ptr0 + x4, tmp2, xmask)
tl.store(out_ptr0 + x4, tmp5, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, primals_4, reinterpret_tensor(
primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf1, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_5, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4), (16, 4, 1))
buf3 = buf2
del buf2
buf4 = buf1
del buf1
triton_poi_fused_add_convolution_repeat_tanh_1[grid(64)](buf3,
primals_6, buf0, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_6
buf5 = reinterpret_tensor(buf0, (4, 1, 4), (4, 4, 1), 0)
del buf0
extern_kernels.bmm(reinterpret_tensor(primals_7, (4, 1, 4), (0, 0,
1), 0), buf4, out=buf5)
return buf3, reinterpret_tensor(buf5, (4, 4), (4, 1), 0
), primals_4, primals_5, primals_7, reinterpret_tensor(primals_1, (
4, 4, 4), (4, 1, 16), 0), buf4
class AttentionNew(nn.Module):
"""A generic attention module for a decoder in seq2seq"""
def __init__(self, dim, use_tanh=False, C=10):
super(AttentionNew, self).__init__()
self.use_tanh = use_tanh
self.project_query = nn.Linear(dim, dim)
self.project_ref = nn.Conv1d(dim, dim, 1, 1)
self.C = C
self.tanh = nn.Tanh()
self.v = nn.Parameter(torch.FloatTensor(dim))
self.v.data.uniform_(-(1.0 / math.sqrt(dim)), 1.0 / math.sqrt(dim))
def forward(self, input_0, input_1):
primals_3 = self.v
primals_2 = self.project_query.weight
primals_6 = self.project_query.bias
primals_5 = self.project_ref.weight
primals_7 = self.project_ref.bias
primals_4 = input_0
primals_1 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0], output[1]
|
iamstevepaul/MRTA-Attention
|
Attention
| false
| 10,230
|
[
"MIT"
] | 0
|
fc177440f7354212c41ad02ef76fdda43cc0aa57
|
https://github.com/iamstevepaul/MRTA-Attention/tree/fc177440f7354212c41ad02ef76fdda43cc0aa57
|
AugCNN
|
import torch
import torch.nn as nn
import torch.nn.functional as F
def apply_init_(modules):
"""
Initialize NN modules
"""
for m in modules:
if isinstance(m, nn.Conv2d):
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
class Conv2d_tf(nn.Conv2d):
"""
Conv2d with the padding behavior from TF
"""
def __init__(self, *args, **kwargs):
super(Conv2d_tf, self).__init__(*args, **kwargs)
self.padding = kwargs.get('padding', 'SAME')
def _compute_padding(self, input, dim):
input_size = input.size(dim + 2)
filter_size = self.weight.size(dim + 2)
effective_filter_size = (filter_size - 1) * self.dilation[dim] + 1
out_size = (input_size + self.stride[dim] - 1) // self.stride[dim]
total_padding = max(0, (out_size - 1) * self.stride[dim] +
effective_filter_size - input_size)
additional_padding = int(total_padding % 2 != 0)
return additional_padding, total_padding
def forward(self, input):
if self.padding == 'VALID':
return F.conv2d(input, self.weight, self.bias, self.stride,
padding=0, dilation=self.dilation, groups=self.groups)
rows_odd, padding_rows = self._compute_padding(input, dim=0)
cols_odd, padding_cols = self._compute_padding(input, dim=1)
if rows_odd or cols_odd:
input = F.pad(input, [0, cols_odd, 0, rows_odd])
return F.conv2d(input, self.weight, self.bias, self.stride, padding
=(padding_rows // 2, padding_cols // 2), dilation=self.dilation,
groups=self.groups)
class AugCNN(nn.Module):
"""
Convolutional Neural Network used as Augmentation
"""
def __init__(self):
super(AugCNN, self).__init__()
self.aug = Conv2d_tf(3, 3, kernel_size=3)
apply_init_(self.modules())
self.train()
def forward(self, obs):
return self.aug(obs)
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 3
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_2, (3, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_3, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 3, 64, 64), (12288, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(49152)](buf1, primals_3, 49152,
XBLOCK=512, num_warps=4, num_stages=1)
del primals_3
return buf1, primals_1, primals_2
def apply_init_(modules):
"""
Initialize NN modules
"""
for m in modules:
if isinstance(m, nn.Conv2d):
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
class Conv2d_tf(nn.Conv2d):
"""
Conv2d with the padding behavior from TF
"""
def __init__(self, *args, **kwargs):
super(Conv2d_tf, self).__init__(*args, **kwargs)
self.padding = kwargs.get('padding', 'SAME')
def _compute_padding(self, input, dim):
input_size = input.size(dim + 2)
filter_size = self.weight.size(dim + 2)
effective_filter_size = (filter_size - 1) * self.dilation[dim] + 1
out_size = (input_size + self.stride[dim] - 1) // self.stride[dim]
total_padding = max(0, (out_size - 1) * self.stride[dim] +
effective_filter_size - input_size)
additional_padding = int(total_padding % 2 != 0)
return additional_padding, total_padding
def forward(self, input):
if self.padding == 'VALID':
return F.conv2d(input, self.weight, self.bias, self.stride,
padding=0, dilation=self.dilation, groups=self.groups)
rows_odd, padding_rows = self._compute_padding(input, dim=0)
cols_odd, padding_cols = self._compute_padding(input, dim=1)
if rows_odd or cols_odd:
input = F.pad(input, [0, cols_odd, 0, rows_odd])
return F.conv2d(input, self.weight, self.bias, self.stride, padding
=(padding_rows // 2, padding_cols // 2), dilation=self.dilation,
groups=self.groups)
class AugCNNNew(nn.Module):
"""
Convolutional Neural Network used as Augmentation
"""
def __init__(self):
super(AugCNNNew, self).__init__()
self.aug = Conv2d_tf(3, 3, kernel_size=3)
apply_init_(self.modules())
self.train()
def forward(self, input_0):
primals_2 = self.aug.weight
primals_3 = self.aug.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
jajajag/auto-drac
|
AugCNN
| false
| 10,231
|
[
"MIT"
] | 0
|
2241f9f5f10a4d863a8b9d198da1d39e5feb59a0
|
https://github.com/jajajag/auto-drac/tree/2241f9f5f10a4d863a8b9d198da1d39e5feb59a0
|
MeanAct
|
import torch
import torch.nn as nn
class MeanAct(nn.Module):
def __init__(self):
super(MeanAct, self).__init__()
def forward(self, x):
return torch.clamp(torch.exp(x), min=1e-05, max=1000000.0)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clamp_exp_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl_math.exp(tmp0)
tmp2 = 1e-05
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = 1000000.0
tmp5 = triton_helpers.minimum(tmp3, tmp4)
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_exp_0[grid(256)](arg0_1, buf0, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class MeanActNew(nn.Module):
def __init__(self):
super(MeanActNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
jdasam/scDCC
|
MeanAct
| false
| 10,232
|
[
"Apache-2.0"
] | 0
|
8ebaed766db5ad56021983ebc13e9a60b6c7b453
|
https://github.com/jdasam/scDCC/tree/8ebaed766db5ad56021983ebc13e9a60b6c7b453
|
BatchDense
|
import math
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
class BatchDense(nn.Module):
def __init__(self, batch, in_features, out_features, bias_init=None):
super(BatchDense, self).__init__()
self.batch = batch
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(batch, in_features, out_features))
self.bias = Parameter(torch.Tensor(batch, 1, out_features))
self.reset_parameters(bias_init)
def reset_parameters(self, bias_init=None):
stdv = math.sqrt(6.0 / (self.in_features + self.out_features))
self.weight.data.uniform_(-stdv, stdv)
if bias_init is not None:
self.bias.data = torch.from_numpy(bias_init)
else:
self.bias.data.fill_(0)
def forward(self, x):
x.size()
x = x.view(x.size(0), self.batch, -1)
out = x.transpose(0, 1).contiguous()
out = torch.baddbmm(self.bias, out, self.weight)
out = out.transpose(0, 1).contiguous()
out = out.view(x.size(0), -1)
return out
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'batch': 4, 'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import torch.nn as nn
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 16
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2 + 16 * x1), xmask)
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x4, tmp2, xmask)
@triton.jit
def triton_poi_fused_transpose_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask)
tl.store(out_ptr0 + x3, tmp0, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 1, 4), (4, 4, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (4, 16, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf0, primals_3, out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_1[grid(64)](primals_2, buf1, buf2, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
buf3 = reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0)
del buf1
triton_poi_fused_transpose_2[grid(64)](buf0, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf0
return reinterpret_tensor(buf2, (4, 16), (16, 1), 0), buf3
class BatchDenseNew(nn.Module):
def __init__(self, batch, in_features, out_features, bias_init=None):
super(BatchDenseNew, self).__init__()
self.batch = batch
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(batch, in_features, out_features))
self.bias = Parameter(torch.Tensor(batch, 1, out_features))
self.reset_parameters(bias_init)
def reset_parameters(self, bias_init=None):
stdv = math.sqrt(6.0 / (self.in_features + self.out_features))
self.weight.data.uniform_(-stdv, stdv)
if bias_init is not None:
self.bias.data = torch.from_numpy(bias_init)
else:
self.bias.data.fill_(0)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
iloncka/neurotrees
|
BatchDense
| false
| 10,233
|
[
"MIT"
] | 0
|
ddb52dc0e7ac1cf67a426b401ba06149807e03ec
|
https://github.com/iloncka/neurotrees/tree/ddb52dc0e7ac1cf67a426b401ba06149807e03ec
|
VAE
|
import torch
import torch.utils.data
from torch import nn
from torch.nn import functional as F
class VAE(nn.Module):
def __init__(self):
super(VAE, self).__init__()
self.input_linear = nn.Linear(4297, 2000)
self.enc_middle = nn.Linear(2000, 100)
self.enc_1 = nn.Linear(100, 5)
self.enc_2 = nn.Linear(100, 5)
self.dec_0 = nn.Linear(5, 100)
self.dec_middle = nn.Linear(100, 2000)
self.output_linear = nn.Linear(2000, 4297)
def encode(self, x):
h1 = F.relu(self.input_linear(x))
h2 = F.relu(self.enc_middle(h1))
return self.enc_1(h2), self.enc_2(h2)
def reparameterize(self, mu, logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps.mul(std).add_(mu)
def decode(self, z):
h3 = F.relu(self.dec_0(z))
h4 = F.relu(self.dec_middle(h3))
return torch.sigmoid(self.output_linear(h4))
def forward(self, x):
mu, logvar = self.encode(x.view(-1, 4297))
z = self.reparameterize(mu, logvar)
return self.decode(z), mu, logvar
def get_inputs():
return [torch.rand([4, 4297])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch import device
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.utils.data
from torch import nn
from torch.nn import functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 8000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2000
x1 = xindex // 2000
tmp0 = tl.load(in_out_ptr0 + (x0 + 2016 * x1), xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x0 + 2016 * x1), tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 100
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_exp_mul_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 20
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp6 = tl.load(in_ptr2 + x0, xmask)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp0 * tmp4
tmp7 = tmp5 + tmp6
tl.store(out_ptr0 + x0, tmp7, xmask)
@triton.jit
def triton_poi_fused_sigmoid_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 17188
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4297
x1 = xindex // 4297
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4320 * x1), xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15) = args
args.clear()
assert_size_stride(primals_1, (4, 4297), (4297, 1))
assert_size_stride(primals_2, (2000, 4297), (4297, 1))
assert_size_stride(primals_3, (2000,), (1,))
assert_size_stride(primals_4, (100, 2000), (2000, 1))
assert_size_stride(primals_5, (100,), (1,))
assert_size_stride(primals_6, (5, 100), (100, 1))
assert_size_stride(primals_7, (5,), (1,))
assert_size_stride(primals_8, (5, 100), (100, 1))
assert_size_stride(primals_9, (5,), (1,))
assert_size_stride(primals_10, (100, 5), (5, 1))
assert_size_stride(primals_11, (100,), (1,))
assert_size_stride(primals_12, (2000, 100), (100, 1))
assert_size_stride(primals_13, (2000,), (1,))
assert_size_stride(primals_14, (4297, 2000), (2000, 1))
assert_size_stride(primals_15, (4297,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2000), (2016, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4297,
2000), (1, 4297), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(8000)](buf1, primals_3, 8000, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 100), (100, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (2000, 100),
(1, 2000), 0), out=buf2)
buf3 = buf2
del buf2
triton_poi_fused_relu_1[grid(400)](buf3, primals_5, 400, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 5), (5, 1), torch.float32)
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6,
(100, 5), (1, 100), 0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 5), (5, 1), torch.float32)
extern_kernels.addmm(primals_9, buf3, reinterpret_tensor(primals_8,
(100, 5), (1, 100), 0), alpha=1, beta=1, out=buf5)
del primals_9
buf6 = torch.ops.aten.randn.default([4, 5], dtype=torch.float32,
device=device(type='cuda', index=0), pin_memory=False)
buf7 = buf6
del buf6
buf8 = empty_strided_cuda((4, 5), (5, 1), torch.float32)
triton_poi_fused_add_exp_mul_2[grid(20)](buf7, buf5, buf4, buf8, 20,
XBLOCK=32, num_warps=1, num_stages=1)
buf9 = empty_strided_cuda((4, 100), (100, 1), torch.float32)
extern_kernels.mm(buf8, reinterpret_tensor(primals_10, (5, 100), (1,
5), 0), out=buf9)
buf10 = buf9
del buf9
triton_poi_fused_relu_1[grid(400)](buf10, primals_11, 400, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_11
buf11 = empty_strided_cuda((4, 2000), (2016, 1), torch.float32)
extern_kernels.mm(buf10, reinterpret_tensor(primals_12, (100, 2000),
(1, 100), 0), out=buf11)
buf12 = buf11
del buf11
triton_poi_fused_relu_0[grid(8000)](buf12, primals_13, 8000, XBLOCK
=128, num_warps=4, num_stages=1)
del primals_13
buf13 = empty_strided_cuda((4, 4297), (4320, 1), torch.float32)
extern_kernels.mm(buf12, reinterpret_tensor(primals_14, (2000, 4297
), (1, 2000), 0), out=buf13)
buf14 = empty_strided_cuda((4, 4297), (4297, 1), torch.float32)
triton_poi_fused_sigmoid_3[grid(17188)](buf13, primals_15, buf14,
17188, XBLOCK=256, num_warps=4, num_stages=1)
del buf13
del primals_15
return (buf14, buf4, buf5, primals_1, buf1, buf3, buf5, buf7, buf8,
buf10, buf12, buf14, primals_14, primals_12, primals_10, primals_8,
primals_6, primals_4)
class VAENew(nn.Module):
def __init__(self):
super(VAENew, self).__init__()
self.input_linear = nn.Linear(4297, 2000)
self.enc_middle = nn.Linear(2000, 100)
self.enc_1 = nn.Linear(100, 5)
self.enc_2 = nn.Linear(100, 5)
self.dec_0 = nn.Linear(5, 100)
self.dec_middle = nn.Linear(100, 2000)
self.output_linear = nn.Linear(2000, 4297)
def encode(self, x):
h1 = F.relu(self.input_linear(x))
h2 = F.relu(self.enc_middle(h1))
return self.enc_1(h2), self.enc_2(h2)
def reparameterize(self, mu, logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps.mul(std).add_(mu)
def decode(self, z):
h3 = F.relu(self.dec_0(z))
h4 = F.relu(self.dec_middle(h3))
return torch.sigmoid(self.output_linear(h4))
def forward(self, input_0):
primals_2 = self.input_linear.weight
primals_3 = self.input_linear.bias
primals_4 = self.enc_middle.weight
primals_5 = self.enc_middle.bias
primals_6 = self.enc_1.weight
primals_7 = self.enc_1.bias
primals_8 = self.enc_2.weight
primals_9 = self.enc_2.bias
primals_10 = self.dec_0.weight
primals_11 = self.dec_0.bias
primals_12 = self.dec_middle.weight
primals_13 = self.dec_middle.bias
primals_14 = self.output_linear.weight
primals_15 = self.output_linear.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15])
return output[0], output[1], output[2]
|
helenaandres/adversarial-generation-of-gene-expression-data
|
VAE
| false
| 10,234
|
[
"MIT"
] | 0
|
9a10f0c364b7daa789ae75ab5b51ed5c7cbcbeb1
|
https://github.com/helenaandres/adversarial-generation-of-gene-expression-data/tree/9a10f0c364b7daa789ae75ab5b51ed5c7cbcbeb1
|
DispAct
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class DispAct(nn.Module):
def __init__(self):
super(DispAct, self).__init__()
def forward(self, x):
return torch.clamp(F.softplus(x), min=0.0001, max=10000.0)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clamp_softplus_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 20.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = libdevice.log1p(tmp3)
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp6 = 0.0001
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = 10000.0
tmp9 = triton_helpers.minimum(tmp7, tmp8)
tl.store(out_ptr0 + x0, tmp9, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_softplus_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class DispActNew(nn.Module):
def __init__(self):
super(DispActNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
jdasam/scDCC
|
DispAct
| false
| 10,235
|
[
"Apache-2.0"
] | 0
|
8ebaed766db5ad56021983ebc13e9a60b6c7b453
|
https://github.com/jdasam/scDCC/tree/8ebaed766db5ad56021983ebc13e9a60b6c7b453
|
KLDLoss
|
import torch
import torch.nn as nn
import torch.utils.data
class KLDLoss(nn.Module):
def forward(self, mu, logvar):
return -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_exp_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp0 + tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 - tmp4
tmp6 = tl_math.exp(tmp0)
tmp7 = tmp5 - tmp6
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = -0.5
tmp12 = tmp10 * tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_exp_mul_pow_sub_sum_0[grid(1)](buf1, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class KLDLossNew(nn.Module):
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
izhorvath/MetGAN
|
KLDLoss
| false
| 10,236
|
[
"BSD-3-Clause"
] | 0
|
aca85fb3306d2515a65c8d525cd78e1147ba7e1b
|
https://github.com/izhorvath/MetGAN/tree/aca85fb3306d2515a65c8d525cd78e1147ba7e1b
|
PerturbationModule
|
import torch
import torch.utils.data
import torch
import torch.nn as nn
class PerturbationModule(nn.Module):
def __init__(self, T):
super(PerturbationModule, self).__init__()
self.T = T
self.training = False
self.conv_block = None
def forward(self, x):
if not self.training:
x = x + self.T * torch.normal(torch.zeros_like(x), 1.0)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'T': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
import torch
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_zeros_like_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_mul_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_out_ptr0 + x0, xmask)
tmp2 = 4.0
tmp3 = tmp1 * tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_zeros_like_0[grid(256)](buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf1 = torch.ops.aten.normal.Tensor_float(buf0)
del buf0
buf2 = buf1
del buf1
buf3 = buf2
del buf2
triton_poi_fused_add_mul_1[grid(256)](buf3, arg0_1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf3,
class PerturbationModuleNew(nn.Module):
def __init__(self, T):
super(PerturbationModuleNew, self).__init__()
self.T = T
self.training = False
self.conv_block = None
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
jeffkinnison/pytorch-CycleGAN-and-pix2pix
|
PerturbationModule
| false
| 10,237
|
[
"BSD-3-Clause"
] | 0
|
e47041fa4ffa80ad5948d2d1125ec94c34c5947d
|
https://github.com/jeffkinnison/pytorch-CycleGAN-and-pix2pix/tree/e47041fa4ffa80ad5948d2d1125ec94c34c5947d
|
CIoU
|
import torch
from torch import nn
class CIoU(nn.Module):
def __init__(self):
super(CIoU, self).__init__()
def forward(self, inputs, targets):
size = len(inputs)
uL_truth = targets[:, 0:2]
lR_truth = targets[:, 2:4]
uL_pred = inputs[:, 0:2]
lR_pred = inputs[:, 2:4]
truth_cen = torch.div(torch.add(uL_truth, lR_truth), 2)
pred_cen = torch.div(torch.add(uL_pred, lR_pred), 2)
uL_truth_x = uL_truth[:, 0]
uL_truth_y = uL_truth[:, 1]
lR_truth_x = lR_truth[:, 0]
lR_truth_y = lR_truth[:, 1]
uL_pred_x = uL_pred[:, 0]
uL_pred_y = uL_pred[:, 1]
lR_pred_x = lR_pred[:, 0]
lR_pred_y = lR_pred[:, 1]
truth_cen_x = truth_cen[:, 0]
truth_cen_y = truth_cen[:, 1]
pred_cen_x = pred_cen[:, 0]
pred_cen_y = pred_cen[:, 1]
p = torch.sqrt((pred_cen_x - truth_cen_x) ** 2 + (pred_cen_y -
truth_cen_y) ** 2)
enc_X = torch.reshape(torch.minimum(uL_truth, uL_pred), (size, 2))
enc_Y = torch.reshape(torch.maximum(lR_truth, lR_pred), (size, 2))
bounding_box = torch.reshape(torch.cat((enc_X, enc_Y), 1), (size, 4))
bb_uL_x = bounding_box[:, 0]
bb_uL_y = bounding_box[:, 1]
bb_lR_x = bounding_box[:, 2]
bb_lR_y = bounding_box[:, 3]
(bb_lR_x - bb_uL_x) * (bb_lR_y - bb_uL_y)
C = torch.sqrt((bb_lR_x - bb_uL_x) ** 2 + (bb_lR_y - bb_uL_y) ** 2)
X = torch.where(torch.gt(uL_pred_x, lR_truth_x) | torch.gt(
uL_truth_x, lR_pred_x), 0, 1) * (torch.minimum(lR_truth_x,
lR_pred_x) - torch.maximum(uL_truth_x, uL_pred_x))
Y = torch.where(torch.gt(uL_pred_y, lR_truth_y) | torch.gt(
uL_truth_y, lR_pred_y), 0, 1) * (torch.minimum(lR_truth_y,
lR_pred_y) - torch.maximum(uL_truth_y, uL_pred_y))
i_area = X * Y
rec_1 = (lR_truth_x - uL_truth_x) * (lR_truth_y - uL_truth_y)
rec_2 = (lR_pred_x - uL_pred_x) * (lR_pred_y - uL_pred_y)
total_area = rec_1 + rec_2 - i_area
IoU = i_area / total_area
DIoU = 1 - IoU + p ** 2 / C ** 2
return torch.mean(DIoU)
"""#my own calculation
first = 1 - (i_area/BBA) # this will approach 0 when i_area == bbA
second = torch.where(p<5, first, torch.mul(p, first))
return torch.mean(second)"""
"""pred_W = lR_pred_x - uL_pred_x
pred_H = torch.where((lR_pred_y - uL_pred_y)!=0, 1, 0)*(lR_pred_y - uL_pred_y)
truth_W = lR_truth_x - uL_truth_x
truth_H = torch.where((lR_truth_y - uL_truth_y)!=0, 1, 0)*(lR_truth_y - uL_truth_y)
V = (4/(np.pi**2))*((torch.atan(torch.div(truth_W, truth_H)) - torch.atan(torch.div(pred_W, pred_H)))**2)
#alpha_1 = torch.div(V, ((1 - IoU)+V))
#print("alpha 1: ", alpha_1)
alpha = torch.where(torch.lt(IoU, 0.5), 0, 1)*torch.div(V, ((1-IoU)+V))
#print(torch.where(torch.lt(IoU, 0.5), 0, 1))
CIoU = 1 - (IoU - (((p**2)/(C**2)) + alpha*V))
return CIoU"""
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_bitwise_or_div_gt_maximum_mean_minimum_mul_pow_rsub_scalar_tensor_sqrt_sub_where_0(
in_out_ptr1, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp67 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp68 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp70 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp71 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp80 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp81 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp83 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp84 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp0 = tl.full([1, 1], 2, tl.int64)
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp3 = tmp0 < tmp0
tmp4 = tl.load(in_ptr0 + tl.broadcast_to(4 * r0 + 2, [XBLOCK, RBLOCK]),
tmp3, eviction_policy='evict_last', other=0.0)
tmp5 = tl.load(in_ptr1 + tl.broadcast_to(4 * r0 + 2, [XBLOCK, RBLOCK]),
tmp3, eviction_policy='evict_last', other=0.0)
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype)
tmp8 = tl.where(tmp3, tmp6, tmp7)
tmp9 = tmp0 >= tmp0
tl.full([1, 1], 4, tl.int64)
tmp12 = tl.load(in_ptr0 + tl.broadcast_to(2 + 4 * r0 + 0, [XBLOCK,
RBLOCK]), tmp9, eviction_policy='evict_last', other=0.0)
tmp13 = tl.load(in_ptr1 + tl.broadcast_to(2 + 4 * r0 + 0, [XBLOCK,
RBLOCK]), tmp9, eviction_policy='evict_last', other=0.0)
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp9, tmp14, tmp15)
tmp17 = tl.where(tmp3, tmp8, tmp16)
tmp19 = tmp1 < tmp0
tmp20 = tl.load(in_ptr0 + tl.broadcast_to(4 * r0 + 0, [XBLOCK, RBLOCK]),
tmp19, eviction_policy='evict_last', other=0.0)
tmp21 = tl.load(in_ptr1 + tl.broadcast_to(4 * r0 + 0, [XBLOCK, RBLOCK]),
tmp19, eviction_policy='evict_last', other=0.0)
tmp22 = triton_helpers.minimum(tmp20, tmp21)
tmp23 = tl.full(tmp22.shape, 0.0, tmp22.dtype)
tmp24 = tl.where(tmp19, tmp22, tmp23)
tmp25 = tmp1 >= tmp0
tmp27 = tl.load(in_ptr0 + tl.broadcast_to(2 + 4 * r0 + -2, [XBLOCK,
RBLOCK]), tmp25, eviction_policy='evict_last', other=0.0)
tmp28 = tl.load(in_ptr1 + tl.broadcast_to(2 + 4 * r0 + -2, [XBLOCK,
RBLOCK]), tmp25, eviction_policy='evict_last', other=0.0)
tmp29 = triton_helpers.maximum(tmp27, tmp28)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp25, tmp29, tmp30)
tmp32 = tl.where(tmp19, tmp24, tmp31)
tmp33 = tmp17 - tmp32
tmp34 = tl.full([1, 1], 3, tl.int64)
tmp36 = tmp34 < tmp0
tmp37 = tl.load(in_ptr0 + tl.broadcast_to(4 * r0 + 3, [XBLOCK, RBLOCK]),
tmp36, eviction_policy='evict_last', other=0.0)
tmp38 = tl.load(in_ptr1 + tl.broadcast_to(4 * r0 + 3, [XBLOCK, RBLOCK]),
tmp36, eviction_policy='evict_last', other=0.0)
tmp39 = triton_helpers.minimum(tmp37, tmp38)
tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype)
tmp41 = tl.where(tmp36, tmp39, tmp40)
tmp42 = tmp34 >= tmp0
tmp44 = tl.load(in_ptr0 + tl.broadcast_to(2 + 4 * r0 + 1, [XBLOCK,
RBLOCK]), tmp42, eviction_policy='evict_last', other=0.0)
tmp45 = tl.load(in_ptr1 + tl.broadcast_to(2 + 4 * r0 + 1, [XBLOCK,
RBLOCK]), tmp42, eviction_policy='evict_last', other=0.0)
tmp46 = triton_helpers.maximum(tmp44, tmp45)
tmp47 = tl.full(tmp46.shape, 0.0, tmp46.dtype)
tmp48 = tl.where(tmp42, tmp46, tmp47)
tmp49 = tl.where(tmp36, tmp41, tmp48)
tmp50 = tl.full([1, 1], 1, tl.int64)
tmp52 = tmp50 < tmp0
tmp53 = tl.load(in_ptr0 + tl.broadcast_to(4 * r0 + 1, [XBLOCK, RBLOCK]),
tmp52, eviction_policy='evict_last', other=0.0)
tmp54 = tl.load(in_ptr1 + tl.broadcast_to(4 * r0 + 1, [XBLOCK, RBLOCK]),
tmp52, eviction_policy='evict_last', other=0.0)
tmp55 = triton_helpers.minimum(tmp53, tmp54)
tmp56 = tl.full(tmp55.shape, 0.0, tmp55.dtype)
tmp57 = tl.where(tmp52, tmp55, tmp56)
tmp58 = tmp50 >= tmp0
tmp60 = tl.load(in_ptr0 + tl.broadcast_to(2 + 4 * r0 + -1, [XBLOCK,
RBLOCK]), tmp58, eviction_policy='evict_last', other=0.0)
tmp61 = tl.load(in_ptr1 + tl.broadcast_to(2 + 4 * r0 + -1, [XBLOCK,
RBLOCK]), tmp58, eviction_policy='evict_last', other=0.0)
tmp62 = triton_helpers.maximum(tmp60, tmp61)
tmp63 = tl.full(tmp62.shape, 0.0, tmp62.dtype)
tmp64 = tl.where(tmp58, tmp62, tmp63)
tmp65 = tl.where(tmp52, tmp57, tmp64)
tmp66 = tmp49 - tmp65
tmp69 = tmp67 > tmp68
tmp72 = tmp70 > tmp71
tmp73 = tmp69 | tmp72
tmp74 = tl.where(tmp73, tmp1, tmp50)
tmp75 = tmp74.to(tl.float32)
tmp76 = triton_helpers.minimum(tmp68, tmp71)
tmp77 = triton_helpers.maximum(tmp70, tmp67)
tmp78 = tmp76 - tmp77
tmp79 = tmp75 * tmp78
tmp82 = tmp80 > tmp81
tmp85 = tmp83 > tmp84
tmp86 = tmp82 | tmp85
tmp87 = tl.where(tmp86, tmp1, tmp50)
tmp88 = tmp87.to(tl.float32)
tmp89 = triton_helpers.minimum(tmp81, tmp84)
tmp90 = triton_helpers.maximum(tmp83, tmp80)
tmp91 = tmp89 - tmp90
tmp92 = tmp88 * tmp91
tmp93 = tmp79 * tmp92
tmp94 = tmp68 - tmp70
tmp95 = tmp81 - tmp83
tmp96 = tmp94 * tmp95
tmp97 = tmp71 - tmp67
tmp98 = tmp84 - tmp80
tmp99 = tmp97 * tmp98
tmp100 = tmp96 + tmp99
tmp101 = tmp100 - tmp93
tmp102 = tmp67 + tmp71
tmp103 = 0.5
tmp104 = tmp102 * tmp103
tmp105 = tmp70 + tmp68
tmp106 = tmp105 * tmp103
tmp107 = tmp104 - tmp106
tmp108 = tmp107 * tmp107
tmp109 = tmp80 + tmp84
tmp110 = tmp109 * tmp103
tmp111 = tmp83 + tmp81
tmp112 = tmp111 * tmp103
tmp113 = tmp110 - tmp112
tmp114 = tmp113 * tmp113
tmp115 = tmp108 + tmp114
tmp116 = libdevice.sqrt(tmp115)
tmp117 = tmp116 * tmp116
tmp118 = tmp33 * tmp33
tmp119 = tmp66 * tmp66
tmp120 = tmp118 + tmp119
tmp121 = libdevice.sqrt(tmp120)
tmp122 = tmp121 * tmp121
tmp123 = tmp117 / tmp122
tmp124 = tmp93 / tmp101
tmp125 = 1.0
tmp126 = tmp125 - tmp124
tmp127 = tmp126 + tmp123
tmp128 = tl.broadcast_to(tmp127, [XBLOCK, RBLOCK])
tmp130 = tl.sum(tmp128, 1)[:, None]
tmp131 = 4.0
tmp132 = tmp130 / tmp131
tl.debug_barrier()
tl.store(in_out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp132, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf5 = empty_strided_cuda((), (), torch.float32)
buf6 = buf5
del buf5
get_raw_stream(0)
triton_per_fused_add_bitwise_or_div_gt_maximum_mean_minimum_mul_pow_rsub_scalar_tensor_sqrt_sub_where_0[
grid(1)](buf6, arg1_1, arg0_1, 1, 4, XBLOCK=1, num_warps=2,
num_stages=1)
del arg0_1
del arg1_1
return buf6,
class CIoUNew(nn.Module):
def __init__(self):
super(CIoUNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
jcscheufele/CS545_Final
|
CIoU
| false
| 10,238
|
[
"MIT"
] | 0
|
d86858408a9a0aab82b5d2b7e12847023d939e2e
|
https://github.com/jcscheufele/CS545_Final/tree/d86858408a9a0aab82b5d2b7e12847023d939e2e
|
BiaffineScorer
|
import torch
import torch.nn as nn
class BiaffineScorer(nn.Module):
def __init__(self, input1_size, input2_size, output_size):
super().__init__()
self.W_bilin = nn.Bilinear(input1_size + 1, input2_size + 1,
output_size)
self.W_bilin.weight.data.zero_()
self.W_bilin.bias.data.zero_()
def forward(self, input1, input2):
input1 = torch.cat([input1, input1.new_ones(*input1.size()[:-1], 1)
], len(input1.size()) - 1)
input2 = torch.cat([input2, input2.new_ones(*input2.size()[:-1], 1)
], len(input2.size()) - 1)
return self.W_bilin(input1, input2)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input1_size': 4, 'input2_size': 4, 'output_size': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = xindex // 5
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 5, tl.int64)
tmp9 = 1.0
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp6, tmp9, tmp10)
tmp12 = tl.where(tmp4, tmp5, tmp11)
tl.store(out_ptr0 + x2, tmp12, xmask)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 5, 5), (25, 5, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(320)](primals_1, buf0, 320, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32)
triton_poi_fused_cat_0[grid(320)](primals_2, buf1, 320, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_2
buf2 = torch.ops.aten._trilinear.default(reinterpret_tensor(buf0, (
64, 5), (5, 1), 0), primals_3, reinterpret_tensor(buf1, (64, 5),
(5, 1), 0), [1, 3], [0], [1, 2], [2, 3])
del primals_3
buf3 = buf2
del buf2
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf3
triton_poi_fused_add_1[grid(256)](buf4, primals_4, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_4
return buf4, reinterpret_tensor(buf0, (64, 5), (5, 1), 0
), reinterpret_tensor(buf1, (64, 5), (5, 1), 0)
class BiaffineScorerNew(nn.Module):
def __init__(self, input1_size, input2_size, output_size):
super().__init__()
self.W_bilin = nn.Bilinear(input1_size + 1, input2_size + 1,
output_size)
self.W_bilin.weight.data.zero_()
self.W_bilin.bias.data.zero_()
def forward(self, input_0, input_1):
primals_3 = self.W_bilin.weight
primals_4 = self.W_bilin.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
giorgianb/stanza
|
BiaffineScorer
| false
| 10,239
|
[
"Apache-2.0"
] | 0
|
e1ff1ab73c228739fea3ef5c012a9f1042bef2e3
|
https://github.com/giorgianb/stanza/tree/e1ff1ab73c228739fea3ef5c012a9f1042bef2e3
|
Actor
|
import torch
import torch.nn.functional as F
from torch import nn
class Actor(nn.Module):
"""
Policy Network (state --> action)
"""
def __init__(self, state_size: 'int', action_size: 'int', hidden_size:
'int'=256):
super().__init__()
self.fc1 = nn.Linear(state_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.out = nn.Linear(hidden_size, action_size)
def forward(self, state):
x = F.relu(self.fc1(state))
x = F.relu(self.fc2(x))
act = torch.tanh(self.out(x))
return act
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_size': 4, 'action_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (256, 256), (256, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (4, 256), (256, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf0
buf7 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf1,
primals_2, buf7, 16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0),
reinterpret_tensor(primals_4, (256, 256), (1, 256), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf2
buf6 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf3,
primals_5, buf6, 16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 256), (256, 1), 0),
reinterpret_tensor(primals_6, (256, 4), (1, 256), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
triton_poi_fused_tanh_1[grid(256)](buf5, primals_7, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_7
return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 256), (256, 1), 0
), reinterpret_tensor(buf3, (64, 256), (256, 1), 0
), buf5, primals_6, buf6, primals_4, buf7
class ActorNew(nn.Module):
"""
Policy Network (state --> action)
"""
def __init__(self, state_size: 'int', action_size: 'int', hidden_size:
'int'=256):
super().__init__()
self.fc1 = nn.Linear(state_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.out = nn.Linear(hidden_size, action_size)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.out.weight
primals_7 = self.out.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
jadenvc/puppersim
|
Actor
| false
| 10,240
|
[
"Apache-2.0"
] | 0
|
1b3f3e3fc0515d5d6101622e0d729c779debfd32
|
https://github.com/jadenvc/puppersim/tree/1b3f3e3fc0515d5d6101622e0d729c779debfd32
|
BboxHead
|
import torch
import torch.nn as nn
from itertools import product as product
class BboxHead(nn.Module):
def __init__(self, inchannels=512, num_anchors=2):
super(BboxHead, self).__init__()
self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 4, kernel_size=(
1, 1), stride=1, padding=0)
def forward(self, x):
out = self.conv1x1(x)
out = out.permute(0, 2, 3, 1).contiguous()
return out.view(out.shape[0], -1, 4)
def get_inputs():
return [torch.rand([4, 512, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from itertools import product as product
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 512 * x2 + 2097152 * y1), tmp0, None)
@triton.jit
def triton_poi_fused_clone_view_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x4 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + x4, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x4, tmp2, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (8, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 512, 64, 64), (2097152, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512
), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(2048, 4096)](primals_3, buf0, 2048, 4096,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_3
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 8, 64, 64), (32768, 1, 512, 8))
buf2 = reinterpret_tensor(buf1, (4, 64, 64, 8), (32768, 512, 8, 1), 0)
del buf1
buf3 = reinterpret_tensor(buf2, (4, 8192, 4), (32768, 4, 1), 0)
del buf2
triton_poi_fused_clone_view_1[grid(131072)](buf3, primals_2, 131072,
XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
return buf3, primals_1, buf0
class BboxHeadNew(nn.Module):
def __init__(self, inchannels=512, num_anchors=2):
super(BboxHeadNew, self).__init__()
self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 4, kernel_size=(
1, 1), stride=1, padding=0)
def forward(self, input_0):
primals_1 = self.conv1x1.weight
primals_2 = self.conv1x1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
huigs/retinaface-pytorch
|
BboxHead
| false
| 10,241
|
[
"MIT"
] | 0
|
0d7551d5863d172c2122bdd8d2d58be36e1b10fd
|
https://github.com/huigs/retinaface-pytorch/tree/0d7551d5863d172c2122bdd8d2d58be36e1b10fd
|
SDFNetwork
|
import torch
import numpy as np
import torch.nn as nn
def get_embedder(multires, input_dims=3):
embed_kwargs = {'include_input': True, 'input_dims': input_dims,
'max_freq_log2': multires - 1, 'num_freqs': multires,
'log_sampling': True, 'periodic_fns': [torch.sin, torch.cos]}
embedder_obj = Embedder(**embed_kwargs)
def embed(x, eo=embedder_obj):
return eo.embed(x)
return embed, embedder_obj.out_dim
class Embedder:
def __init__(self, **kwargs):
self.kwargs = kwargs
self.create_embedding_fn()
def create_embedding_fn(self):
embed_fns = []
d = self.kwargs['input_dims']
out_dim = 0
if self.kwargs['include_input']:
embed_fns.append(lambda x: x)
out_dim += d
max_freq = self.kwargs['max_freq_log2']
N_freqs = self.kwargs['num_freqs']
if self.kwargs['log_sampling']:
freq_bands = 2.0 ** torch.linspace(0.0, max_freq, N_freqs)
else:
freq_bands = torch.linspace(2.0 ** 0.0, 2.0 ** max_freq, N_freqs)
for freq in freq_bands:
for p_fn in self.kwargs['periodic_fns']:
embed_fns.append(lambda x, p_fn=p_fn, freq=freq: p_fn(x * freq)
)
out_dim += d
self.embed_fns = embed_fns
self.out_dim = out_dim
def embed(self, inputs):
return torch.cat([fn(inputs) for fn in self.embed_fns], -1)
class SDFNetwork(nn.Module):
def __init__(self, d_in, d_out, d_hidden, n_layers, skip_in=(4,),
multires=0, bias=0.5, scale=1, geometric_init=True, weight_norm=
True, inside_outside=False):
super(SDFNetwork, self).__init__()
dims = [d_in] + [d_hidden for _ in range(n_layers)] + [d_out]
self.embed_fn_fine = None
if multires > 0:
embed_fn, input_ch = get_embedder(multires, input_dims=d_in)
self.embed_fn_fine = embed_fn
dims[0] = input_ch
self.num_layers = len(dims)
self.skip_in = skip_in
self.scale = scale
for l in range(0, self.num_layers - 1):
if l + 1 in self.skip_in:
out_dim = dims[l + 1] - dims[0]
else:
out_dim = dims[l + 1]
lin = nn.Linear(dims[l], out_dim)
if geometric_init:
if l == self.num_layers - 2:
if not inside_outside:
torch.nn.init.normal_(lin.weight, mean=np.sqrt(np.
pi) / np.sqrt(dims[l]), std=0.0001)
torch.nn.init.constant_(lin.bias, -bias)
else:
torch.nn.init.normal_(lin.weight, mean=-np.sqrt(np.
pi) / np.sqrt(dims[l]), std=0.0001)
torch.nn.init.constant_(lin.bias, bias)
elif multires > 0 and l == 0:
torch.nn.init.constant_(lin.bias, 0.0)
torch.nn.init.constant_(lin.weight[:, 3:], 0.0)
torch.nn.init.normal_(lin.weight[:, :3], 0.0, np.sqrt(2
) / np.sqrt(out_dim))
elif multires > 0 and l in self.skip_in:
torch.nn.init.constant_(lin.bias, 0.0)
torch.nn.init.normal_(lin.weight, 0.0, np.sqrt(2) / np.
sqrt(out_dim))
torch.nn.init.constant_(lin.weight[:, -(dims[0] - 3):], 0.0
)
else:
torch.nn.init.constant_(lin.bias, 0.0)
torch.nn.init.normal_(lin.weight, 0.0, np.sqrt(2) / np.
sqrt(out_dim))
if weight_norm:
lin = nn.utils.weight_norm(lin)
setattr(self, 'lin' + str(l), lin)
self.activation = nn.Softplus(beta=100)
def forward(self, inputs):
inputs = inputs * self.scale
if self.embed_fn_fine is not None:
inputs = self.embed_fn_fine(inputs)
x = inputs
for l in range(0, self.num_layers - 1):
lin = getattr(self, 'lin' + str(l))
if l in self.skip_in:
x = torch.cat([x, inputs], 1) / np.sqrt(2)
x = lin(x)
if l < self.num_layers - 2:
x = self.activation(x)
return torch.cat([x[:, :1] / self.scale, x[:, 1:]], dim=-1)
def sdf(self, x):
return self.forward(x)[:, :1]
def sdf_hidden_appearance(self, x):
return self.forward(x)
def gradient(self, x):
x.requires_grad_(True)
y = self.sdf(x)
d_output = torch.ones_like(y, requires_grad=False, device=y.device)
gradients = torch.autograd.grad(outputs=y, inputs=x, grad_outputs=
d_output, create_graph=True, retain_graph=True, only_inputs=True)[0
]
return gradients.unsqueeze(1)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'d_in': 4, 'd_out': 4, 'd_hidden': 4, 'n_layers': 1}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused__weight_norm_interface_1(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = libdevice.sqrt(tmp10)
tl.store(out_ptr0 + x0, tmp11, xmask)
@triton.jit
def triton_poi_fused__weight_norm_interface_2(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 / tmp2
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_softplus_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 100.0
tmp2 = tmp0 * tmp1
tmp3 = 20.0
tmp4 = tmp2 > tmp3
tmp5 = tl_math.exp(tmp2)
tmp6 = libdevice.log1p(tmp5)
tmp7 = 0.01
tmp8 = tmp6 * tmp7
tmp9 = tl.where(tmp4, tmp0, tmp8)
tl.store(out_ptr0 + x0, tmp9, xmask)
@triton.jit
def triton_poi_fused_cat_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + 4 * x1, tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = 1.0
tmp7 = tmp5 * tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tl.full([1], 4, tl.int64)
tmp13 = tl.load(in_ptr0 + (1 + 4 * x1 + (-1 + x0)), tmp10 & xmask,
eviction_policy='evict_last', other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tl.store(out_ptr0 + x2, tmp14, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 1), (1, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 1), (1, 1))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](primals_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(4)](primals_3, buf1,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__weight_norm_interface_2[grid(16)](primals_3,
primals_2, buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(buf2, (4,
4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_4
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_softplus_3[grid(16)](buf3, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(4)](primals_6, buf5,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__weight_norm_interface_2[grid(16)](primals_6,
primals_5, buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, buf4, reinterpret_tensor(buf6, (4,
4), (1, 4), 0), alpha=1, beta=1, out=buf7)
del primals_7
buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_cat_4[grid(16)](buf7, buf8, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf7
return (buf8, buf2, buf6, primals_2, primals_3, primals_5, primals_6,
buf0, buf1, buf3, buf4, buf5, buf6)
def get_embedder(multires, input_dims=3):
embed_kwargs = {'include_input': True, 'input_dims': input_dims,
'max_freq_log2': multires - 1, 'num_freqs': multires,
'log_sampling': True, 'periodic_fns': [torch.sin, torch.cos]}
embedder_obj = Embedder(**embed_kwargs)
def embed(x, eo=embedder_obj):
return eo.embed(x)
return embed, embedder_obj.out_dim
class Embedder:
def __init__(self, **kwargs):
self.kwargs = kwargs
self.create_embedding_fn()
def create_embedding_fn(self):
embed_fns = []
d = self.kwargs['input_dims']
out_dim = 0
if self.kwargs['include_input']:
embed_fns.append(lambda x: x)
out_dim += d
max_freq = self.kwargs['max_freq_log2']
N_freqs = self.kwargs['num_freqs']
if self.kwargs['log_sampling']:
freq_bands = 2.0 ** torch.linspace(0.0, max_freq, N_freqs)
else:
freq_bands = torch.linspace(2.0 ** 0.0, 2.0 ** max_freq, N_freqs)
for freq in freq_bands:
for p_fn in self.kwargs['periodic_fns']:
embed_fns.append(lambda x, p_fn=p_fn, freq=freq: p_fn(x * freq)
)
out_dim += d
self.embed_fns = embed_fns
self.out_dim = out_dim
def embed(self, inputs):
return torch.cat([fn(inputs) for fn in self.embed_fns], -1)
class SDFNetworkNew(nn.Module):
def __init__(self, d_in, d_out, d_hidden, n_layers, skip_in=(4,),
multires=0, bias=0.5, scale=1, geometric_init=True, weight_norm=
True, inside_outside=False):
super(SDFNetworkNew, self).__init__()
dims = [d_in] + [d_hidden for _ in range(n_layers)] + [d_out]
self.embed_fn_fine = None
if multires > 0:
embed_fn, input_ch = get_embedder(multires, input_dims=d_in)
self.embed_fn_fine = embed_fn
dims[0] = input_ch
self.num_layers = len(dims)
self.skip_in = skip_in
self.scale = scale
for l in range(0, self.num_layers - 1):
if l + 1 in self.skip_in:
out_dim = dims[l + 1] - dims[0]
else:
out_dim = dims[l + 1]
lin = nn.Linear(dims[l], out_dim)
if geometric_init:
if l == self.num_layers - 2:
if not inside_outside:
torch.nn.init.normal_(lin.weight, mean=np.sqrt(np.
pi) / np.sqrt(dims[l]), std=0.0001)
torch.nn.init.constant_(lin.bias, -bias)
else:
torch.nn.init.normal_(lin.weight, mean=-np.sqrt(np.
pi) / np.sqrt(dims[l]), std=0.0001)
torch.nn.init.constant_(lin.bias, bias)
elif multires > 0 and l == 0:
torch.nn.init.constant_(lin.bias, 0.0)
torch.nn.init.constant_(lin.weight[:, 3:], 0.0)
torch.nn.init.normal_(lin.weight[:, :3], 0.0, np.sqrt(2
) / np.sqrt(out_dim))
elif multires > 0 and l in self.skip_in:
torch.nn.init.constant_(lin.bias, 0.0)
torch.nn.init.normal_(lin.weight, 0.0, np.sqrt(2) / np.
sqrt(out_dim))
torch.nn.init.constant_(lin.weight[:, -(dims[0] - 3):], 0.0
)
else:
torch.nn.init.constant_(lin.bias, 0.0)
torch.nn.init.normal_(lin.weight, 0.0, np.sqrt(2) / np.
sqrt(out_dim))
if weight_norm:
lin = nn.utils.weight_norm(lin)
setattr(self, 'lin' + str(l), lin)
self.activation = nn.Softplus(beta=100)
def sdf(self, x):
return self.forward(x)[:, :1]
def sdf_hidden_appearance(self, x):
return self.forward(x)
def gradient(self, x):
x.requires_grad_(True)
y = self.sdf(x)
d_output = torch.ones_like(y, requires_grad=False, device=y.device)
gradients = torch.autograd.grad(outputs=y, inputs=x, grad_outputs=
d_output, create_graph=True, retain_graph=True, only_inputs=True)[0
]
return gradients.unsqueeze(1)
def forward(self, input_0):
primals_4 = self.lin0.bias
primals_2 = self.lin0.weight_g
primals_1 = self.lin0.weight_v
primals_7 = self.lin1.bias
primals_5 = self.lin1.weight_g
primals_3 = self.lin1.weight_v
primals_6 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
hzwangjl/NeuS
|
SDFNetwork
| false
| 10,242
|
[
"MIT"
] | 0
|
f1b89176ec18e19b3848d787416dab9a1ce5300b
|
https://github.com/hzwangjl/NeuS/tree/f1b89176ec18e19b3848d787416dab9a1ce5300b
|
Critic
|
import torch
import torch.nn.functional as F
from torch import nn
class Critic(nn.Module):
"""
Value Network (state + action --> value)
"""
def __init__(self, state_size: 'int', action_size: 'int', hidden_size:
'int'=256):
super().__init__()
self.fc1 = nn.Linear(state_size + action_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.out = nn.Linear(hidden_size, 1)
def forward(self, state, action):
x = torch.cat((state, action), dim=1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
val = self.out(x)
return val
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'state_size': 4, 'action_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (256, 8), (8, 1))
assert_size_stride(primals_4, (256,), (1,))
assert_size_stride(primals_5, (256, 256), (256, 1))
assert_size_stride(primals_6, (256,), (1,))
assert_size_stride(primals_7, (1, 256), (256, 1))
assert_size_stride(primals_8, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 256), (1,
8), 0), out=buf1)
del primals_3
buf2 = buf1
del buf1
triton_poi_fused_relu_1[grid(1024)](buf2, primals_4, 1024, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (256, 256), (
1, 256), 0), out=buf3)
buf4 = buf3
del buf3
triton_poi_fused_relu_1[grid(1024)](buf4, primals_6, 1024, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_6
buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7,
(256, 1), (1, 256), 0), alpha=1, beta=1, out=buf6)
del primals_8
return buf6, buf0, buf2, buf4, primals_7, primals_5
class CriticNew(nn.Module):
"""
Value Network (state + action --> value)
"""
def __init__(self, state_size: 'int', action_size: 'int', hidden_size:
'int'=256):
super().__init__()
self.fc1 = nn.Linear(state_size + action_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.out = nn.Linear(hidden_size, 1)
def forward(self, input_0, input_1):
primals_3 = self.fc1.weight
primals_4 = self.fc1.bias
primals_5 = self.fc2.weight
primals_6 = self.fc2.bias
primals_7 = self.out.weight
primals_8 = self.out.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
|
jadenvc/puppersim
|
Critic
| false
| 10,243
|
[
"Apache-2.0"
] | 0
|
1b3f3e3fc0515d5d6101622e0d729c779debfd32
|
https://github.com/jadenvc/puppersim/tree/1b3f3e3fc0515d5d6101622e0d729c779debfd32
|
LandmarkHead
|
import torch
import torch.nn as nn
from itertools import product as product
class LandmarkHead(nn.Module):
def __init__(self, inchannels=512, num_anchors=2):
super(LandmarkHead, self).__init__()
self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 10, kernel_size=
(1, 1), stride=1, padding=0)
def forward(self, x):
out = self.conv1x1(x)
out = out.permute(0, 2, 3, 1).contiguous()
return out.view(out.shape[0], -1, 10)
def get_inputs():
return [torch.rand([4, 512, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from itertools import product as product
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 512 * x2 + 2097152 * y1), tmp0, None)
@triton.jit
def triton_poi_fused_clone_view_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x4 = xindex
x0 = xindex % 20
tmp0 = tl.load(in_out_ptr0 + x4, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x4, tmp2, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (20, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_2, (20,), (1,))
assert_size_stride(primals_3, (4, 512, 64, 64), (2097152, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512
), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(2048, 4096)](primals_3, buf0, 2048, 4096,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_3
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 20, 64, 64), (81920, 1, 1280, 20))
buf2 = reinterpret_tensor(buf1, (4, 64, 64, 20), (81920, 1280, 20,
1), 0)
del buf1
buf3 = reinterpret_tensor(buf2, (4, 8192, 10), (81920, 10, 1), 0)
del buf2
triton_poi_fused_clone_view_1[grid(327680)](buf3, primals_2, 327680,
XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
return buf3, primals_1, buf0
class LandmarkHeadNew(nn.Module):
def __init__(self, inchannels=512, num_anchors=2):
super(LandmarkHeadNew, self).__init__()
self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 10, kernel_size=
(1, 1), stride=1, padding=0)
def forward(self, input_0):
primals_1 = self.conv1x1.weight
primals_2 = self.conv1x1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
huigs/retinaface-pytorch
|
LandmarkHead
| false
| 10,244
|
[
"MIT"
] | 0
|
0d7551d5863d172c2122bdd8d2d58be36e1b10fd
|
https://github.com/huigs/retinaface-pytorch/tree/0d7551d5863d172c2122bdd8d2d58be36e1b10fd
|
SpatialSELayer1d
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class SpatialSELayer1d(nn.Module):
def __init__(self, num_channels):
"""
:param num_channels: No of input channels
"""
super(SpatialSELayer1d, self).__init__()
self.conv = nn.Conv1d(num_channels, 1, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, input_tensor, weights=None):
"""
:param weights: weights for few shot learning
:param input_tensor: X, shape = (batch_size, num_channels, W)
:return: output_tensor
"""
batch_size, channel, a = input_tensor.size()
if weights is not None:
weights = torch.mean(weights, dim=0)
weights = weights.view(1, channel, 1)
out = F.conv2d(input_tensor, weights)
else:
out = self.conv(input_tensor)
squeeze_tensor = self.sigmoid(out)
return input_tensor * squeeze_tensor.view(batch_size, 1, a)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'num_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x3, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 4), (4, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16)](buf1, primals_3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_mul_sigmoid_1[grid(64)](primals_1, buf1, buf2, 64,
XBLOCK=64, num_warps=1, num_stages=1)
return buf2, primals_1, primals_2, buf1
class SpatialSELayer1dNew(nn.Module):
def __init__(self, num_channels):
"""
:param num_channels: No of input channels
"""
super(SpatialSELayer1dNew, self).__init__()
self.conv = nn.Conv1d(num_channels, 1, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, input_0):
primals_2 = self.conv.weight
primals_3 = self.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
ioanvl/1d_squeeze_excitation
|
SpatialSELayer1d
| false
| 10,245
|
[
"MIT"
] | 0
|
f422dc4b8e7de6239a6fb7d1688048db5053e733
|
https://github.com/ioanvl/1d_squeeze_excitation/tree/f422dc4b8e7de6239a6fb7d1688048db5053e733
|
ClassHead
|
import torch
import torch.nn as nn
from itertools import product as product
class ClassHead(nn.Module):
def __init__(self, inchannels=512, num_anchors=2):
super(ClassHead, self).__init__()
self.num_anchors = num_anchors
self.conv1x1 = nn.Conv2d(inchannels, self.num_anchors * 2,
kernel_size=(1, 1), stride=1, padding=0)
def forward(self, x):
out = self.conv1x1(x)
out = out.permute(0, 2, 3, 1).contiguous()
return out.view(out.shape[0], -1, 2)
def get_inputs():
return [torch.rand([4, 512, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from itertools import product as product
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 512 * x2 + 2097152 * y1), tmp0, None)
@triton.jit
def triton_poi_fused_clone_view_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x4 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x4, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x4, tmp2, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 512, 64, 64), (2097152, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512
), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(2048, 4096)](primals_3, buf0, 2048, 4096,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_3
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 64, 64), (16384, 1, 256, 4))
buf2 = reinterpret_tensor(buf1, (4, 64, 64, 4), (16384, 256, 4, 1), 0)
del buf1
buf3 = reinterpret_tensor(buf2, (4, 8192, 2), (16384, 2, 1), 0)
del buf2
triton_poi_fused_clone_view_1[grid(65536)](buf3, primals_2, 65536,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
return buf3, primals_1, buf0
class ClassHeadNew(nn.Module):
def __init__(self, inchannels=512, num_anchors=2):
super(ClassHeadNew, self).__init__()
self.num_anchors = num_anchors
self.conv1x1 = nn.Conv2d(inchannels, self.num_anchors * 2,
kernel_size=(1, 1), stride=1, padding=0)
def forward(self, input_0):
primals_1 = self.conv1x1.weight
primals_2 = self.conv1x1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
huigs/retinaface-pytorch
|
ClassHead
| false
| 10,246
|
[
"MIT"
] | 0
|
0d7551d5863d172c2122bdd8d2d58be36e1b10fd
|
https://github.com/huigs/retinaface-pytorch/tree/0d7551d5863d172c2122bdd8d2d58be36e1b10fd
|
CNormalized_Linear
|
import math
import torch
import torch as th
class CNormalized_Linear(th.nn.Module):
"""Linear layer with column-wise normalized input matrix."""
def __init__(self, in_features, out_features, bias=False):
"""Initialize the layer."""
super(CNormalized_Linear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = th.nn.Parameter(th.Tensor(out_features, in_features))
if bias:
self.bias = th.nn.Parameter(th.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
"""Reset the parameters."""
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input):
"""Feed-forward through the network."""
return th.nn.functional.linear(input, self.weight.div(self.weight.
pow(2).sum(0).sqrt()))
def __repr__(self):
"""For print purposes."""
return self.__class__.__name__ + '(' + 'in_features=' + str(self.
in_features) + ', out_features=' + str(self.out_features
) + ', bias=' + str(self.bias is not None) + ')'
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch as th
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_pow_sqrt_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tmp0 / tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_pow_sqrt_sum_0[grid(16)](primals_1, buf0, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(buf0, (4, 4), (1, 4), 0), out=buf1)
del buf0
return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_1, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0)
class CNormalized_LinearNew(th.nn.Module):
"""Linear layer with column-wise normalized input matrix."""
def __init__(self, in_features, out_features, bias=False):
"""Initialize the layer."""
super(CNormalized_LinearNew, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = th.nn.Parameter(th.Tensor(out_features, in_features))
if bias:
self.bias = th.nn.Parameter(th.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
"""Reset the parameters."""
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def __repr__(self):
"""For print purposes."""
return self.__class__.__name__ + '(' + 'in_features=' + str(self.
in_features) + ', out_features=' + str(self.out_features
) + ', bias=' + str(self.bias is not None) + ')'
def forward(self, input_0):
primals_1 = self.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
edgarvardanyan/CausalDiscoveryToolbox
|
CNormalized_Linear
| false
| 10,247
|
[
"MIT"
] | 0
|
5497a400440b49a3af14a0c7512bcdd307c9285d
|
https://github.com/edgarvardanyan/CausalDiscoveryToolbox/tree/5497a400440b49a3af14a0c7512bcdd307c9285d
|
ChannelSELayer1d
|
import torch
import torch.nn as nn
class ChannelSELayer1d(nn.Module):
def __init__(self, num_channels, reduction_ratio=4):
"""
:param num_channels: No of input channels
:param reduction_ratio: By how much should the num_channels should be reduced
"""
super(ChannelSELayer1d, self).__init__()
num_channels_reduced = num_channels // reduction_ratio
self.reduction_ratio = reduction_ratio
self.fc1 = nn.Linear(num_channels, num_channels_reduced, bias=True)
self.fc2 = nn.Linear(num_channels_reduced, num_channels, bias=True)
self.activ_1 = nn.ReLU()
self.activ_2 = nn.Sigmoid()
def forward(self, input_tensor):
"""
:param input_tensor: X, shape = (batch_size, num_channels, H)
:return: output tensor
"""
batch_size, num_channels, _H = input_tensor.size()
squeeze_tensor = input_tensor.view(batch_size, num_channels, -1).mean(
dim=2)
fc_out_1 = self.activ_1(self.fc1(squeeze_tensor))
fc_out_2 = self.activ_2(self.fc2(fc_out_1))
return input_tensor * fc_out_2.view(batch_size, num_channels, 1)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'num_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tl.store(in_out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (1, 4), (4, 1))
assert_size_stride(primals_3, (1,), (1,))
assert_size_stride(primals_4, (4, 1), (1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_0[grid(16)](primals_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_2, (4, 1), (1, 4
), 0), out=buf1)
del primals_2
buf2 = buf1
del buf1
triton_poi_fused_relu_1[grid(4)](buf2, primals_3, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_3
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, buf2, reinterpret_tensor(primals_4,
(1, 4), (1, 1), 0), alpha=1, beta=1, out=buf3)
del primals_5
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_mul_2[grid(64)](primals_1, buf3, buf4, 64, XBLOCK=
64, num_warps=1, num_stages=1)
return buf4, primals_1, buf0, buf2, buf3, primals_4
class ChannelSELayer1dNew(nn.Module):
def __init__(self, num_channels, reduction_ratio=4):
"""
:param num_channels: No of input channels
:param reduction_ratio: By how much should the num_channels should be reduced
"""
super(ChannelSELayer1dNew, self).__init__()
num_channels_reduced = num_channels // reduction_ratio
self.reduction_ratio = reduction_ratio
self.fc1 = nn.Linear(num_channels, num_channels_reduced, bias=True)
self.fc2 = nn.Linear(num_channels_reduced, num_channels, bias=True)
self.activ_1 = nn.ReLU()
self.activ_2 = nn.Sigmoid()
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
ioanvl/1d_squeeze_excitation
|
ChannelSELayer1d
| false
| 10,248
|
[
"MIT"
] | 0
|
f422dc4b8e7de6239a6fb7d1688048db5053e733
|
https://github.com/ioanvl/1d_squeeze_excitation/tree/f422dc4b8e7de6239a6fb7d1688048db5053e733
|
Linear3D
|
import math
import torch
import torch as th
from torch.nn import Parameter
def functional_linear3d(input, weight, bias=None):
"""
Apply a linear transformation to the incoming data: :math:`y = xA^T + b`.
Shape:
- Input: :math:`(N, *, in\\_features)` where `*` means any number of
additional dimensions
- Weight: :math:`(out\\_features, in\\_features)`
- Bias: :math:`(out\\_features)`
- Output: :math:`(N, *, out\\_features)`
"""
output = input.transpose(0, 1).matmul(weight)
if bias is not None:
output += bias.unsqueeze(1)
return output.transpose(0, 1)
class Linear3D(th.nn.Module):
"""Applies a linear transformation to the incoming data: :math:`y = Ax + b`.
Args:
in_features: size of each input sample
out_features: size of each output sample
bias: If set to False, the layer will not learn an additive bias.
Default: ``True``
Shape:
- Input: :math:`(N, *, in\\_features)` where :math:`*` means any number of
additional dimensions
- Output: :math:`(N, *, out\\_features)` where all but the last dimension
are the same shape as the input.
Attributes:
weight: the learnable weights of the module of shape
`(out_features x in_features)`
bias: the learnable bias of the module of shape `(out_features)`
Examples::
>>> m = nn.Linear(3, 20, 30)
>>> input = torch.randn(128, 20)
>>> output = m(input)
>>> print(output.size())
"""
def __init__(self, channels, in_features, out_features, batch_size=-1,
bias=True, noise=False):
super(Linear3D, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.channels = channels
if noise:
self.in_features += 1
self.weight = Parameter(th.Tensor(channels, self.in_features,
out_features))
if bias:
self.bias = Parameter(th.Tensor(channels, out_features))
else:
self.register_parameter('bias', None)
if noise:
self.register_buffer('noise', th.Tensor(batch_size, channels, 1))
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input, adj_matrix=None, permutation_matrix=None):
input_ = [input]
if input.dim() == 2:
if permutation_matrix is not None:
input_.append(input.unsqueeze(1).expand([input.shape[0],
self.channels, permutation_matrix.shape[1]]))
elif hasattr(self, 'noise'):
input_.append(input.unsqueeze(1).expand([input.shape[0],
self.channels, self.in_features - 1]))
else:
input_.append(input.unsqueeze(1).expand([input.shape[0],
self.channels, self.in_features]))
if adj_matrix is not None and permutation_matrix is not None:
input_.append((input_[-1].transpose(0, 1) @ (adj_matrix.t().
unsqueeze(2) * permutation_matrix)).transpose(0, 1))
elif adj_matrix is not None:
input_.append(input_[-1] * adj_matrix.t().unsqueeze(0))
elif permutation_matrix is not None:
input_.append((input_[-1].transpose(0, 1) @ permutation_matrix).t()
)
if hasattr(self, 'noise'):
self.noise.normal_()
input_.append(th.cat([input_[-1], self.noise], 2))
return functional_linear3d(input_[-1], self.weight, self.bias)
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(self.
in_features, self.out_features, self.bias is not None)
def apply_filter(self, permutation_matrix):
transpose_weight = self.weight.transpose(1, 2) @ permutation_matrix
self.weight = Parameter(transpose_weight.transpose(1, 2))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4, 'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import torch as th
from torch.nn import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16 % 4
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2 + 64 * x1), xmask)
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
x2 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x4, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(256)](primals_1, buf0, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_1[grid(256)](primals_2, buf1, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0), out=buf2)
del buf1
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused_add_2[grid(256)](buf3, primals_3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_3
return reinterpret_tensor(buf3, (4, 4, 4, 4), (16, 64, 4, 1), 0
), reinterpret_tensor(buf0, (16, 4, 4), (16, 1, 4), 0)
def functional_linear3d(input, weight, bias=None):
"""
Apply a linear transformation to the incoming data: :math:`y = xA^T + b`.
Shape:
- Input: :math:`(N, *, in\\_features)` where `*` means any number of
additional dimensions
- Weight: :math:`(out\\_features, in\\_features)`
- Bias: :math:`(out\\_features)`
- Output: :math:`(N, *, out\\_features)`
"""
output = input.transpose(0, 1).matmul(weight)
if bias is not None:
output += bias.unsqueeze(1)
return output.transpose(0, 1)
class Linear3DNew(th.nn.Module):
"""Applies a linear transformation to the incoming data: :math:`y = Ax + b`.
Args:
in_features: size of each input sample
out_features: size of each output sample
bias: If set to False, the layer will not learn an additive bias.
Default: ``True``
Shape:
- Input: :math:`(N, *, in\\_features)` where :math:`*` means any number of
additional dimensions
- Output: :math:`(N, *, out\\_features)` where all but the last dimension
are the same shape as the input.
Attributes:
weight: the learnable weights of the module of shape
`(out_features x in_features)`
bias: the learnable bias of the module of shape `(out_features)`
Examples::
>>> m = nn.Linear(3, 20, 30)
>>> input = torch.randn(128, 20)
>>> output = m(input)
>>> print(output.size())
"""
def __init__(self, channels, in_features, out_features, batch_size=-1,
bias=True, noise=False):
super(Linear3DNew, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.channels = channels
if noise:
self.in_features += 1
self.weight = Parameter(th.Tensor(channels, self.in_features,
out_features))
if bias:
self.bias = Parameter(th.Tensor(channels, out_features))
else:
self.register_parameter('bias', None)
if noise:
self.register_buffer('noise', th.Tensor(batch_size, channels, 1))
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(self.
in_features, self.out_features, self.bias is not None)
def apply_filter(self, permutation_matrix):
transpose_weight = self.weight.transpose(1, 2) @ permutation_matrix
self.weight = Parameter(transpose_weight.transpose(1, 2))
def forward(self, input_0):
primals_2 = self.weight
primals_3 = self.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
edgarvardanyan/CausalDiscoveryToolbox
|
Linear3D
| false
| 10,249
|
[
"MIT"
] | 0
|
5497a400440b49a3af14a0c7512bcdd307c9285d
|
https://github.com/edgarvardanyan/CausalDiscoveryToolbox/tree/5497a400440b49a3af14a0c7512bcdd307c9285d
|
GCNLayer
|
import torch
import torch.nn as nn
class GCNLayer(nn.Module):
def __init__(self, in_ft, out_ft, bias=True):
super(GCNLayer, self).__init__()
self.fc = nn.Linear(in_ft, out_ft, bias=False)
self.act = nn.PReLU()
if bias:
self.bias = nn.Parameter(torch.FloatTensor(out_ft))
self.bias.data.fill_(0.0)
else:
self.register_parameter('bias', None)
for m in self.modules():
self.weights_init(m)
def weights_init(self, m):
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
def forward(self, feat, adj):
feat = self.fc(feat)
out = torch.bmm(adj, feat)
if self.bias is not None:
out += self.bias
return self.act(out)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_ft': 4, 'out_ft': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__prelu_kernel_add_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp7 = tmp6 * tmp2
tmp8 = tl.where(tmp4, tmp2, tmp7)
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(primals_3, reinterpret_tensor(buf0, (4, 4, 4), (
16, 4, 1), 0), out=buf1)
buf2 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused__prelu_kernel_add_0[grid(64)](buf1, primals_4,
primals_5, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1)
return buf2, primals_4, primals_5, reinterpret_tensor(primals_2, (16, 4
), (4, 1), 0), buf1, reinterpret_tensor(primals_3, (4, 4, 4), (16,
1, 4), 0)
class GCNLayerNew(nn.Module):
def __init__(self, in_ft, out_ft, bias=True):
super(GCNLayerNew, self).__init__()
self.fc = nn.Linear(in_ft, out_ft, bias=False)
self.act = nn.PReLU()
if bias:
self.bias = nn.Parameter(torch.FloatTensor(out_ft))
self.bias.data.fill_(0.0)
else:
self.register_parameter('bias', None)
for m in self.modules():
self.weights_init(m)
def weights_init(self, m):
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
def forward(self, input_0, input_1):
primals_4 = self.bias
primals_1 = self.fc.weight
primals_5 = self.act.weight
primals_2 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
jaynee156/GNN-thesis
|
GCNLayer
| false
| 10,250
|
[
"MIT"
] | 0
|
fe8a731698dedb6cf76f7130658a646664a79b09
|
https://github.com/jaynee156/GNN-thesis/tree/fe8a731698dedb6cf76f7130658a646664a79b09
|
Net
|
import torch
import torch.utils.data
import torch.utils.data.distributed
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = x.view(x.shape[0], 1, 28, 28)
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def get_inputs():
return [torch.rand([4, 1, 28, 28])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.utils.data
import torch.utils.data.distributed
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 23040
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 576 % 10
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_1(in_ptr0, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 5760
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 12
x3 = xindex // 12
x2 = xindex // 1440
x4 = xindex % 1440
x5 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 48 * x3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 48 * x3), xmask, eviction_policy
='evict_last')
tmp7 = tl.load(in_ptr0 + (24 + 2 * x0 + 48 * x3), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (25 + 2 * x0 + 48 * x3), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tl.store(out_ptr0 + (x4 + 1536 * x2), tmp15, xmask)
tl.store(out_ptr1 + x5, tmp18, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 5120
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 64 % 20
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_3(in_ptr0,
out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 1280
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 16 * x1), xmask, eviction_policy
='evict_last')
tmp7 = tl.load(in_ptr0 + (8 + 2 * x0 + 16 * x1), xmask, eviction_policy
='evict_last')
tmp12 = tl.load(in_ptr0 + (9 + 2 * x0 + 16 * x1), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = 0.0
tmp20 = tmp18 <= tmp19
tl.store(out_ptr0 + x2, tmp15, xmask)
tl.store(out_ptr1 + x2, tmp18, xmask)
tl.store(out_ptr2 + x2, tmp20, xmask)
@triton.jit
def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 50
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_per_fused__log_softmax_5(in_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 1, 28, 28), (784, 784, 28, 1))
assert_size_stride(primals_2, (10, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_3, (10,), (1,))
assert_size_stride(primals_4, (20, 10, 5, 5), (250, 25, 5, 1))
assert_size_stride(primals_5, (20,), (1,))
assert_size_stride(primals_6, (50, 320), (320, 1))
assert_size_stride(primals_7, (50,), (1,))
assert_size_stride(primals_8, (10, 50), (50, 1))
assert_size_stride(primals_9, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 10, 24, 24), (5760, 576, 24, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(23040)](buf1, primals_3, 23040,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 10, 12, 12), (1536, 144, 12, 1),
torch.int8)
buf3 = empty_strided_cuda((4, 10, 12, 12), (1440, 144, 12, 1),
torch.float32)
triton_poi_fused_max_pool2d_with_indices_relu_1[grid(5760)](buf1,
buf2, buf3, 5760, XBLOCK=128, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 20, 8, 8), (1280, 64, 8, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_2[grid(5120)](buf5, primals_5, 5120,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 20, 4, 4), (320, 16, 4, 1), torch.int8)
buf7 = empty_strided_cuda((4, 20, 4, 4), (320, 16, 4, 1), torch.float32
)
buf14 = empty_strided_cuda((4, 20, 4, 4), (320, 16, 4, 1), torch.bool)
triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_3[grid
(1280)](buf5, buf6, buf7, buf14, 1280, XBLOCK=128, num_warps=4,
num_stages=1)
buf8 = empty_strided_cuda((4, 50), (50, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf7, (4, 320), (320, 1), 0),
reinterpret_tensor(primals_6, (320, 50), (1, 320), 0), out=buf8)
buf9 = buf8
del buf8
triton_poi_fused_relu_4[grid(200)](buf9, primals_7, 200, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_7
buf10 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_9, buf9, reinterpret_tensor(primals_8,
(50, 10), (1, 50), 0), alpha=1, beta=1, out=buf10)
del primals_9
buf13 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
triton_per_fused__log_softmax_5[grid(4)](buf10, buf13, 4, 10,
XBLOCK=1, num_warps=2, num_stages=1)
del buf10
return (buf13, primals_1, primals_2, primals_4, buf1, buf2, buf3, buf5,
buf6, reinterpret_tensor(buf7, (4, 320), (320, 1), 0), buf9, buf13,
primals_8, primals_6, buf14)
class NetNew(nn.Module):
def __init__(self):
super(NetNew, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
iquintero/sagemaker-pytorch-container
|
Net
| false
| 10,251
|
[
"Apache-2.0"
] | 0
|
70f64c87e549ae833d7f2ef2f15f01542ff5678e
|
https://github.com/iquintero/sagemaker-pytorch-container/tree/70f64c87e549ae833d7f2ef2f15f01542ff5678e
|
ValueFunction
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class ValueFunction(nn.Module):
"""fully connected 200x200 hidden layers"""
def __init__(self, state_dim):
super(ValueFunction, self).__init__()
self.fc1 = nn.Linear(state_dim, 200)
self.fc2 = nn.Linear(200, 200)
self.out = nn.Linear(200, 1)
def forward(self, x):
"""return: scalar value"""
x = F.leaky_relu(self.fc1(x), 0.2)
x = F.leaky_relu(self.fc2(x), 0.2)
return self.out(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 12800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 200
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (200, 4), (4, 1))
assert_size_stride(primals_2, (200,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (200, 200), (200, 1))
assert_size_stride(primals_5, (200,), (1,))
assert_size_stride(primals_6, (1, 200), (200, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 200), (200, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 200), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1),
torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(12800)](buf0, primals_2, buf1,
buf2, 12800, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf3 = buf0
del buf0
extern_kernels.mm(reinterpret_tensor(buf2, (64, 200), (200, 1), 0),
reinterpret_tensor(primals_4, (200, 200), (1, 200), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1),
torch.bool)
buf5 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1),
torch.float32)
triton_poi_fused_leaky_relu_0[grid(12800)](buf3, primals_5, buf4,
buf5, 12800, XBLOCK=256, num_warps=4, num_stages=1)
del buf3
del primals_5
buf7 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf5, (64, 200),
(200, 1), 0), reinterpret_tensor(primals_6, (200, 1), (1, 200),
0), alpha=1, beta=1, out=buf7)
del primals_7
return reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, reinterpret_tensor(buf2, (64, 200), (200, 1), 0
), buf4, reinterpret_tensor(buf5, (64, 200), (200, 1), 0
), primals_6, primals_4
class ValueFunctionNew(nn.Module):
"""fully connected 200x200 hidden layers"""
def __init__(self, state_dim):
super(ValueFunctionNew, self).__init__()
self.fc1 = nn.Linear(state_dim, 200)
self.fc2 = nn.Linear(200, 200)
self.out = nn.Linear(200, 1)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.out.weight
primals_7 = self.out.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
himanshusahni/task-biased-url
|
ValueFunction
| false
| 10,252
|
[
"MIT"
] | 0
|
28e4ec318d46d84065b6e197fa9f4100bd4a4c34
|
https://github.com/himanshusahni/task-biased-url/tree/28e4ec318d46d84065b6e197fa9f4100bd4a4c34
|
CrossAttention
|
import torch
from torch import nn
class MultiHeadAttention(nn.Module):
"""
Multi head attention for Perceiver https://arxiv.org/pdf/2103.03206.pdf.
Args:
num_q_channels (`int`):
Number of q channels.
num_kv_channels (`int`):
Number of k or v channels. k has the same channels as v.
num_heads (`int`):
Number of parallel attention heads.
dropout (`nn.Module`):
Dropout probability.
"""
def __init__(self, num_q_channels: 'int', num_kv_channels: 'int',
num_heads: 'int', dropout: 'float'):
super().__init__()
self.attention = nn.MultiheadAttention(embed_dim=num_q_channels,
num_heads=num_heads, kdim=num_kv_channels, vdim=num_kv_channels,
dropout=dropout, batch_first=True)
def forward(self, x_q, x_kv, pad_mask=None, attn_mask=None):
"""
Forward function.
Args:
x_q (`Tensor`):
Query embeddings.
x_kv (`Tensor`):
Key embeddings. Key equals value.
pad_mask (`int`):
Padding mask.
attn_mask (`nn.Module`):
Attention mask.
"""
return self.attention(x_q, x_kv, x_kv, key_padding_mask=pad_mask,
attn_mask=attn_mask)[0]
class CrossAttention(nn.Module):
"""
Cross attention for Perceiver https://arxiv.org/pdf/2103.03206.pdf.
Args:
num_q_channels (`int`):
Number of q channels.
num_kv_channels (`int`):
Number of k or v channels. k has the same channels as v.
num_heads (`int`):
Number of parallel attention heads.
dropout (`nn.Module`):
Dropout probability.
"""
def __init__(self, num_q_channels: 'int', num_kv_channels: 'int',
num_heads: 'int', dropout: 'float'):
super().__init__()
self.q_norm = nn.LayerNorm(num_q_channels)
self.kv_norm = nn.LayerNorm(num_kv_channels)
self.attention = MultiHeadAttention(num_q_channels=num_q_channels,
num_kv_channels=num_kv_channels, num_heads=num_heads, dropout=
dropout)
def forward(self, x_q, x_kv, pad_mask=None, attn_mask=None):
x_q = self.q_norm(x_q)
x_kv = self.kv_norm(x_kv)
return self.attention(x_q, x_kv, pad_mask=pad_mask, attn_mask=attn_mask
)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'num_q_channels': 4, 'num_kv_channels': 4, 'num_heads': 4,
'dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (12, 4), (4, 1))
assert_size_stride(primals_8, (12,), (1,))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(4)](primals_3, buf0, buf1,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf3 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused_native_layer_norm_0[grid(4)](primals_6, buf2, buf3,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(16)](primals_3, buf0,
buf1, primals_1, primals_2, buf4, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del buf0
del buf1
del primals_1
del primals_2
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf4, reinterpret_tensor(primals_7, (4, 4), (1, 4
), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(16)](primals_6, buf2,
buf3, primals_4, primals_5, buf6, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del buf2
del buf3
del primals_4
del primals_5
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_8, (4,), (1,), 4),
buf6, reinterpret_tensor(primals_7, (4, 4), (1, 4), 16), alpha=
1, beta=1, out=buf7)
buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_8, (4,), (1,), 8),
buf6, reinterpret_tensor(primals_7, (4, 4), (1, 4), 32), alpha=
1, beta=1, out=buf8)
buf9 = reinterpret_tensor(buf5, (4, 4, 1), (1, 4, 16), 0)
del buf5
triton_poi_fused_mul_2[grid(16)](buf9, primals_8, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_8
buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf9, reinterpret_tensor(buf7, (4, 1, 4), (1, 1,
4), 0), out=buf10)
buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(64)](buf10, buf11, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf12 = buf10
del buf10
triton_poi_fused__softmax_4[grid(64)](buf11, buf12, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf11
buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf12, reinterpret_tensor(buf8, (4, 4, 1), (1, 4,
1), 0), out=buf13)
buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
triton_poi_fused_clone_5[grid(4, 4)](buf13, buf14, 4, 4, XBLOCK=4,
YBLOCK=4, num_warps=1, num_stages=1)
buf15 = reinterpret_tensor(buf13, (4, 4), (4, 1), 0)
del buf13
extern_kernels.addmm(primals_10, reinterpret_tensor(buf14, (4, 4),
(4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf15)
del primals_10
return buf15, primals_3, primals_6, buf4, buf6, buf12, reinterpret_tensor(
buf14, (4, 4), (4, 1), 0), primals_9, reinterpret_tensor(buf8, (4,
1, 4), (1, 1, 4), 0), reinterpret_tensor(buf9, (4, 1, 4), (1, 1, 4), 0
), reinterpret_tensor(buf7, (4, 4, 1), (1, 4, 1), 0
), reinterpret_tensor(primals_7, (4, 4), (4, 1), 32
), reinterpret_tensor(primals_7, (4, 4), (4, 1), 16
), reinterpret_tensor(primals_7, (4, 4), (4, 1), 0)
class MultiHeadAttention(nn.Module):
"""
Multi head attention for Perceiver https://arxiv.org/pdf/2103.03206.pdf.
Args:
num_q_channels (`int`):
Number of q channels.
num_kv_channels (`int`):
Number of k or v channels. k has the same channels as v.
num_heads (`int`):
Number of parallel attention heads.
dropout (`nn.Module`):
Dropout probability.
"""
def __init__(self, num_q_channels: 'int', num_kv_channels: 'int',
num_heads: 'int', dropout: 'float'):
super().__init__()
self.attention = nn.MultiheadAttention(embed_dim=num_q_channels,
num_heads=num_heads, kdim=num_kv_channels, vdim=num_kv_channels,
dropout=dropout, batch_first=True)
def forward(self, x_q, x_kv, pad_mask=None, attn_mask=None):
"""
Forward function.
Args:
x_q (`Tensor`):
Query embeddings.
x_kv (`Tensor`):
Key embeddings. Key equals value.
pad_mask (`int`):
Padding mask.
attn_mask (`nn.Module`):
Attention mask.
"""
return self.attention(x_q, x_kv, x_kv, key_padding_mask=pad_mask,
attn_mask=attn_mask)[0]
class CrossAttentionNew(nn.Module):
"""
Cross attention for Perceiver https://arxiv.org/pdf/2103.03206.pdf.
Args:
num_q_channels (`int`):
Number of q channels.
num_kv_channels (`int`):
Number of k or v channels. k has the same channels as v.
num_heads (`int`):
Number of parallel attention heads.
dropout (`nn.Module`):
Dropout probability.
"""
def __init__(self, num_q_channels: 'int', num_kv_channels: 'int',
num_heads: 'int', dropout: 'float'):
super().__init__()
self.q_norm = nn.LayerNorm(num_q_channels)
self.kv_norm = nn.LayerNorm(num_kv_channels)
self.attention = MultiHeadAttention(num_q_channels=num_q_channels,
num_kv_channels=num_kv_channels, num_heads=num_heads, dropout=
dropout)
def forward(self, input_0, input_1):
primals_1 = self.q_norm.weight
primals_2 = self.q_norm.bias
primals_4 = self.kv_norm.weight
primals_5 = self.kv_norm.bias
primals_7 = self.attention.attention.in_proj_weight
primals_8 = self.attention.attention.in_proj_bias
primals_3 = self.attention.attention.out_proj.weight
primals_10 = self.attention.attention.out_proj.bias
primals_6 = input_0
primals_9 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
|
jennyli-z/towhee
|
CrossAttention
| false
| 10,253
|
[
"Apache-2.0"
] | 0
|
55c55fd961229575b75eae269b55090c839f8dcd
|
https://github.com/jennyli-z/towhee/tree/55c55fd961229575b75eae269b55090c839f8dcd
|
DenseBlock
|
import torch
from torch import nn
from torch.nn import functional as F
class CausalConv1d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=2, dilation=2):
super(CausalConv1d, self).__init__()
self.padding = dilation
self.causal_conv = nn.Conv1d(in_channels, out_channels, kernel_size,
padding=self.padding, dilation=dilation)
def forward(self, minibatch):
return self.causal_conv(minibatch)[:, :, :-self.padding]
class DenseBlock(nn.Module):
def __init__(self, in_channels, filters, dilation=2):
super(DenseBlock, self).__init__()
self.causal_conv1 = CausalConv1d(in_channels, filters, dilation=
dilation)
self.causal_conv2 = CausalConv1d(in_channels, filters, dilation=
dilation)
def forward(self, minibatch):
tanh = F.tanh(self.causal_conv1(minibatch))
sig = F.sigmoid(self.causal_conv2(minibatch))
out = torch.cat([minibatch, tanh * sig], dim=1)
return out
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'filters': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 96
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 6 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 8
x0 = xindex % 4
x2 = xindex // 32
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 6 * (-4 + x1) + 24 * x2), tmp6 & xmask,
other=0.0)
tmp10 = libdevice.tanh(tmp9)
tmp11 = tl.load(in_ptr2 + (x0 + 6 * (-4 + x1) + 24 * x2), tmp6 & xmask,
other=0.0)
tmp12 = tl.sigmoid(tmp11)
tmp13 = tmp10 * tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp6, tmp13, tmp14)
tmp16 = tl.where(tmp4, tmp5, tmp15)
tl.store(out_ptr0 + x3, tmp16, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 2), (8, 2, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4, 2), (8, 2, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,),
padding=(2,), dilation=(2,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 6), (24, 6, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(96)](buf1, primals_2, 96,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1,),
padding=(2,), dilation=(2,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 6), (24, 6, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_0[grid(96)](buf3, primals_5, 96,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32)
triton_poi_fused_cat_1[grid(128)](primals_3, buf1, buf3, buf4, 128,
XBLOCK=128, num_warps=4, num_stages=1)
return buf4, primals_1, primals_3, primals_4, buf1, buf3
class CausalConv1d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=2, dilation=2):
super(CausalConv1d, self).__init__()
self.padding = dilation
self.causal_conv = nn.Conv1d(in_channels, out_channels, kernel_size,
padding=self.padding, dilation=dilation)
def forward(self, minibatch):
return self.causal_conv(minibatch)[:, :, :-self.padding]
class DenseBlockNew(nn.Module):
def __init__(self, in_channels, filters, dilation=2):
super(DenseBlockNew, self).__init__()
self.causal_conv1 = CausalConv1d(in_channels, filters, dilation=
dilation)
self.causal_conv2 = CausalConv1d(in_channels, filters, dilation=
dilation)
def forward(self, input_0):
primals_1 = self.causal_conv1.causal_conv.weight
primals_2 = self.causal_conv1.causal_conv.bias
primals_4 = self.causal_conv2.causal_conv.weight
primals_5 = self.causal_conv2.causal_conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
heyitsmine/FewRel
|
DenseBlock
| false
| 10,254
|
[
"MIT"
] | 0
|
2a2b8ae471298d9eb3557796a085c23b21982fb2
|
https://github.com/heyitsmine/FewRel/tree/2a2b8ae471298d9eb3557796a085c23b21982fb2
|
CausalConv1d
|
import torch
from torch import nn
class CausalConv1d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=2, dilation=2):
super(CausalConv1d, self).__init__()
self.padding = dilation
self.causal_conv = nn.Conv1d(in_channels, out_channels, kernel_size,
padding=self.padding, dilation=dilation)
def forward(self, minibatch):
return self.causal_conv(minibatch)[:, :, :-self.padding]
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 96
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 6 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 2), (8, 2, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,),
padding=(2,), dilation=(2,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 6), (24, 6, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(96)](buf1, primals_2, 96,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return reinterpret_tensor(buf1, (4, 4, 4), (24, 6, 1), 0
), primals_1, primals_3
class CausalConv1dNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=2, dilation=2):
super(CausalConv1dNew, self).__init__()
self.padding = dilation
self.causal_conv = nn.Conv1d(in_channels, out_channels, kernel_size,
padding=self.padding, dilation=dilation)
def forward(self, input_0):
primals_1 = self.causal_conv.weight
primals_2 = self.causal_conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
heyitsmine/FewRel
|
CausalConv1d
| false
| 10,255
|
[
"MIT"
] | 0
|
2a2b8ae471298d9eb3557796a085c23b21982fb2
|
https://github.com/heyitsmine/FewRel/tree/2a2b8ae471298d9eb3557796a085c23b21982fb2
|
QValueFunction
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class QValueFunction(nn.Module):
"""fully connected 200x200 hidden layers"""
def __init__(self, state_dim, action_dim):
super(QValueFunction, self).__init__()
self.fc1 = nn.Linear(state_dim + action_dim, 200)
self.fc2 = nn.Linear(200, 200)
self.out = nn.Linear(200, 1)
def forward(self, s, a):
"""return: scalar value"""
x = torch.cat((s, a), dim=1)
x = F.leaky_relu(self.fc1(x), 0.2)
x = F.leaky_relu(self.fc2(x), 0.2)
return self.out(x)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4, 'action_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 200
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (200, 8), (8, 1))
assert_size_stride(primals_4, (200,), (1,))
assert_size_stride(primals_5, (200, 200), (200, 1))
assert_size_stride(primals_6, (200,), (1,))
assert_size_stride(primals_7, (1, 200), (200, 1))
assert_size_stride(primals_8, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 200), (200, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 200), (1,
8), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 200), (200, 1), torch.bool)
buf3 = empty_strided_cuda((4, 200), (200, 1), torch.float32)
triton_poi_fused_leaky_relu_1[grid(800)](buf1, primals_4, buf2,
buf3, 800, XBLOCK=256, num_warps=4, num_stages=1)
del primals_4
buf4 = buf1
del buf1
extern_kernels.mm(buf3, reinterpret_tensor(primals_5, (200, 200), (
1, 200), 0), out=buf4)
buf5 = empty_strided_cuda((4, 200), (200, 1), torch.bool)
buf6 = empty_strided_cuda((4, 200), (200, 1), torch.float32)
triton_poi_fused_leaky_relu_1[grid(800)](buf4, primals_6, buf5,
buf6, 800, XBLOCK=256, num_warps=4, num_stages=1)
del buf4
del primals_6
buf8 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_8, buf6, reinterpret_tensor(primals_7,
(200, 1), (1, 200), 0), alpha=1, beta=1, out=buf8)
del primals_8
return buf8, buf0, buf2, buf3, buf5, buf6, primals_7, primals_5
class QValueFunctionNew(nn.Module):
"""fully connected 200x200 hidden layers"""
def __init__(self, state_dim, action_dim):
super(QValueFunctionNew, self).__init__()
self.fc1 = nn.Linear(state_dim + action_dim, 200)
self.fc2 = nn.Linear(200, 200)
self.out = nn.Linear(200, 1)
def forward(self, input_0, input_1):
primals_3 = self.fc1.weight
primals_4 = self.fc1.bias
primals_5 = self.fc2.weight
primals_6 = self.fc2.bias
primals_7 = self.out.weight
primals_8 = self.out.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
|
himanshusahni/task-biased-url
|
QValueFunction
| false
| 10,256
|
[
"MIT"
] | 0
|
28e4ec318d46d84065b6e197fa9f4100bd4a4c34
|
https://github.com/himanshusahni/task-biased-url/tree/28e4ec318d46d84065b6e197fa9f4100bd4a4c34
|
Gate
|
import torch
import torch.nn as nn
class Gate(nn.Module):
def __init__(self, input_dim):
super(Gate, self).__init__()
self.linear = nn.Linear(input_dim * 4, 1, bias=True)
self.sigmoid = nn.Sigmoid()
def forward(self, x, y):
z = torch.cat([x, y, x * y, x - y], dim=2)
return self.sigmoid(self.linear(z))
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + (4 * x1 + (-8 + x0)), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tl.load(in_ptr1 + (4 * x1 + (-8 + x0)), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp17 = tmp15 * tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp14, tmp17, tmp18)
tmp20 = tmp0 >= tmp12
tl.full([1], 16, tl.int64)
tmp23 = tl.load(in_ptr0 + (4 * x1 + (-12 + x0)), tmp20 & xmask,
eviction_policy='evict_last', other=0.0)
tmp24 = tl.load(in_ptr1 + (4 * x1 + (-12 + x0)), tmp20 & xmask,
eviction_policy='evict_last', other=0.0)
tmp25 = tmp23 - tmp24
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp20, tmp25, tmp26)
tmp28 = tl.where(tmp14, tmp19, tmp27)
tmp29 = tl.where(tmp9, tmp10, tmp28)
tmp30 = tl.where(tmp4, tmp5, tmp29)
tl.store(out_ptr0 + x2, tmp30, xmask)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (1, 16), (16, 1))
assert_size_stride(primals_4, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(256)](primals_1, primals_2, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 16), (16, 1), 0),
reinterpret_tensor(primals_3, (16, 1), (1, 16), 0), out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf1, (4, 4, 1), (4, 1, 1), 0)
del buf1
triton_poi_fused_sigmoid_1[grid(16)](buf2, primals_4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_4
return buf2, reinterpret_tensor(buf0, (16, 16), (16, 1), 0), buf2
class GateNew(nn.Module):
def __init__(self, input_dim):
super(GateNew, self).__init__()
self.linear = nn.Linear(input_dim * 4, 1, bias=True)
self.sigmoid = nn.Sigmoid()
def forward(self, input_0, input_1):
primals_3 = self.linear.weight
primals_4 = self.linear.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
hgrhgy/NumSeq2SQL
|
Gate
| false
| 10,257
|
[
"MIT"
] | 0
|
6f22fdf108736f979afa2dbd3af14aa9ad4718aa
|
https://github.com/hgrhgy/NumSeq2SQL/tree/6f22fdf108736f979afa2dbd3af14aa9ad4718aa
|
ChannelSpatialSELayer1d
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class ChannelSELayer1d(nn.Module):
def __init__(self, num_channels, reduction_ratio=4):
"""
:param num_channels: No of input channels
:param reduction_ratio: By how much should the num_channels should be reduced
"""
super(ChannelSELayer1d, self).__init__()
num_channels_reduced = num_channels // reduction_ratio
self.reduction_ratio = reduction_ratio
self.fc1 = nn.Linear(num_channels, num_channels_reduced, bias=True)
self.fc2 = nn.Linear(num_channels_reduced, num_channels, bias=True)
self.activ_1 = nn.ReLU()
self.activ_2 = nn.Sigmoid()
def forward(self, input_tensor):
"""
:param input_tensor: X, shape = (batch_size, num_channels, H)
:return: output tensor
"""
batch_size, num_channels, _H = input_tensor.size()
squeeze_tensor = input_tensor.view(batch_size, num_channels, -1).mean(
dim=2)
fc_out_1 = self.activ_1(self.fc1(squeeze_tensor))
fc_out_2 = self.activ_2(self.fc2(fc_out_1))
return input_tensor * fc_out_2.view(batch_size, num_channels, 1)
class SpatialSELayer1d(nn.Module):
def __init__(self, num_channels):
"""
:param num_channels: No of input channels
"""
super(SpatialSELayer1d, self).__init__()
self.conv = nn.Conv1d(num_channels, 1, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, input_tensor, weights=None):
"""
:param weights: weights for few shot learning
:param input_tensor: X, shape = (batch_size, num_channels, W)
:return: output_tensor
"""
batch_size, channel, a = input_tensor.size()
if weights is not None:
weights = torch.mean(weights, dim=0)
weights = weights.view(1, channel, 1)
out = F.conv2d(input_tensor, weights)
else:
out = self.conv(input_tensor)
squeeze_tensor = self.sigmoid(out)
return input_tensor * squeeze_tensor.view(batch_size, 1, a)
class ChannelSpatialSELayer1d(nn.Module):
def __init__(self, num_channels, reduction_ratio=2):
"""
:param num_channels: No of input channels
:param reduction_ratio: By how much should the num_channels should be reduced
"""
super(ChannelSpatialSELayer1d, self).__init__()
self.cSE = ChannelSELayer1d(num_channels, reduction_ratio)
self.sSE = SpatialSELayer1d(num_channels)
def forward(self, input_tensor):
"""
:param input_tensor: X, shape = (batch_size, num_channels, W)
:return: output_tensor
"""
return torch.max(self.cSE(input_tensor), self.sSE(input_tensor))
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'num_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_poi_fused_maximum_mul_sigmoid_3(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex // 4
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tmp5 = tl.sigmoid(tmp4)
tmp6 = tmp0 * tmp5
tmp7 = triton_helpers.maximum(tmp3, tmp6)
tl.store(out_ptr0 + x3, tmp7, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (2, 4), (4, 1))
assert_size_stride(primals_3, (2,), (1,))
assert_size_stride(primals_4, (4, 2), (2, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (1, 4, 1), (4, 1, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_0[grid(16)](primals_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_2, (4, 2), (1, 4
), 0), out=buf1)
del primals_2
buf2 = buf1
del buf1
triton_poi_fused_relu_1[grid(8)](buf2, primals_3, 8, XBLOCK=8,
num_warps=1, num_stages=1)
del primals_3
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, buf2, reinterpret_tensor(primals_4,
(2, 4), (1, 2), 0), alpha=1, beta=1, out=buf3)
del primals_5
buf4 = extern_kernels.convolution(primals_1, primals_6, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf4, (4, 1, 4), (4, 4, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_2[grid(16)](buf5, primals_7, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_maximum_mul_sigmoid_3[grid(64)](primals_1, buf3,
buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1)
return buf6, primals_1, primals_6, buf0, buf2, buf3, buf5, primals_4
class ChannelSELayer1d(nn.Module):
def __init__(self, num_channels, reduction_ratio=4):
"""
:param num_channels: No of input channels
:param reduction_ratio: By how much should the num_channels should be reduced
"""
super(ChannelSELayer1d, self).__init__()
num_channels_reduced = num_channels // reduction_ratio
self.reduction_ratio = reduction_ratio
self.fc1 = nn.Linear(num_channels, num_channels_reduced, bias=True)
self.fc2 = nn.Linear(num_channels_reduced, num_channels, bias=True)
self.activ_1 = nn.ReLU()
self.activ_2 = nn.Sigmoid()
def forward(self, input_tensor):
"""
:param input_tensor: X, shape = (batch_size, num_channels, H)
:return: output tensor
"""
batch_size, num_channels, _H = input_tensor.size()
squeeze_tensor = input_tensor.view(batch_size, num_channels, -1).mean(
dim=2)
fc_out_1 = self.activ_1(self.fc1(squeeze_tensor))
fc_out_2 = self.activ_2(self.fc2(fc_out_1))
return input_tensor * fc_out_2.view(batch_size, num_channels, 1)
class SpatialSELayer1d(nn.Module):
def __init__(self, num_channels):
"""
:param num_channels: No of input channels
"""
super(SpatialSELayer1d, self).__init__()
self.conv = nn.Conv1d(num_channels, 1, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, input_tensor, weights=None):
"""
:param weights: weights for few shot learning
:param input_tensor: X, shape = (batch_size, num_channels, W)
:return: output_tensor
"""
batch_size, channel, a = input_tensor.size()
if weights is not None:
weights = torch.mean(weights, dim=0)
weights = weights.view(1, channel, 1)
out = F.conv2d(input_tensor, weights)
else:
out = self.conv(input_tensor)
squeeze_tensor = self.sigmoid(out)
return input_tensor * squeeze_tensor.view(batch_size, 1, a)
class ChannelSpatialSELayer1dNew(nn.Module):
def __init__(self, num_channels, reduction_ratio=2):
"""
:param num_channels: No of input channels
:param reduction_ratio: By how much should the num_channels should be reduced
"""
super(ChannelSpatialSELayer1dNew, self).__init__()
self.cSE = ChannelSELayer1d(num_channels, reduction_ratio)
self.sSE = SpatialSELayer1d(num_channels)
def forward(self, input_0):
primals_2 = self.cSE.fc1.weight
primals_3 = self.cSE.fc1.bias
primals_4 = self.cSE.fc2.weight
primals_5 = self.cSE.fc2.bias
primals_6 = self.sSE.conv.weight
primals_7 = self.sSE.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
ioanvl/1d_squeeze_excitation
|
ChannelSpatialSELayer1d
| false
| 10,258
|
[
"MIT"
] | 0
|
f422dc4b8e7de6239a6fb7d1688048db5053e733
|
https://github.com/ioanvl/1d_squeeze_excitation/tree/f422dc4b8e7de6239a6fb7d1688048db5053e733
|
GaussianPolicyFunction
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class GaussianPolicyFunction(nn.Module):
"""fully connected 200x200 hidden layers"""
def __init__(self, state_dim, action_dim):
super(GaussianPolicyFunction, self).__init__()
self.fc1 = nn.Linear(state_dim, 200)
self.fc2 = nn.Linear(200, 200)
self.mu_out = nn.Linear(200, action_dim)
self.sigma_out = nn.Linear(200, action_dim)
def forward(self, x):
"""return: action between [-1,1]"""
x = F.leaky_relu(self.fc1(x), 0.2)
x = F.leaky_relu(self.fc2(x), 0.2)
return torch.tanh(self.mu_out(x)), F.softplus(self.sigma_out(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4, 'action_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 12800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 200
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused_softplus_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 20.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = libdevice.log1p(tmp3)
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (200, 4), (4, 1))
assert_size_stride(primals_2, (200,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (200, 200), (200, 1))
assert_size_stride(primals_5, (200,), (1,))
assert_size_stride(primals_6, (4, 200), (200, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 200), (200, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 200), (200, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 200), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1),
torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(12800)](buf0, primals_2, buf1,
buf2, 12800, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf3 = buf0
del buf0
extern_kernels.mm(reinterpret_tensor(buf2, (64, 200), (200, 1), 0),
reinterpret_tensor(primals_4, (200, 200), (1, 200), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1),
torch.bool)
buf5 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1),
torch.float32)
triton_poi_fused_leaky_relu_0[grid(12800)](buf3, primals_5, buf4,
buf5, 12800, XBLOCK=256, num_warps=4, num_stages=1)
del buf3
del primals_5
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf5, (64, 200), (200, 1), 0),
reinterpret_tensor(primals_6, (200, 4), (1, 200), 0), out=buf6)
buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf6
triton_poi_fused_tanh_1[grid(256)](buf7, primals_7, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_7
buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 200),
(200, 1), 0), reinterpret_tensor(primals_8, (200, 4), (1, 200),
0), alpha=1, beta=1, out=buf8)
del primals_9
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_softplus_2[grid(256)](buf8, buf9, 256, XBLOCK=256,
num_warps=4, num_stages=1)
return buf7, buf9, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, reinterpret_tensor(buf2, (64, 200), (200, 1), 0
), buf4, reinterpret_tensor(buf5, (64, 200), (200, 1), 0
), buf7, buf8, primals_8, primals_6, primals_4
class GaussianPolicyFunctionNew(nn.Module):
"""fully connected 200x200 hidden layers"""
def __init__(self, state_dim, action_dim):
super(GaussianPolicyFunctionNew, self).__init__()
self.fc1 = nn.Linear(state_dim, 200)
self.fc2 = nn.Linear(200, 200)
self.mu_out = nn.Linear(200, action_dim)
self.sigma_out = nn.Linear(200, action_dim)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.mu_out.weight
primals_7 = self.mu_out.bias
primals_8 = self.sigma_out.weight
primals_9 = self.sigma_out.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0], output[1]
|
himanshusahni/task-biased-url
|
GaussianPolicyFunction
| false
| 10,259
|
[
"MIT"
] | 0
|
28e4ec318d46d84065b6e197fa9f4100bd4a4c34
|
https://github.com/himanshusahni/task-biased-url/tree/28e4ec318d46d84065b6e197fa9f4100bd4a4c34
|
SkillDiscriminator
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class SkillDiscriminator(nn.Module):
"""fully connected 200x200 layers for inferring q(z|s)"""
def __init__(self, state_dim, nb_skills):
super(SkillDiscriminator, self).__init__()
self.fc1 = nn.Linear(state_dim, 200)
self.fc2 = nn.Linear(200, 200)
self.out = nn.Linear(200, nb_skills)
def forward(self, x):
"""return: scalar value"""
x = F.leaky_relu(self.fc1(x), 0.2)
x = F.leaky_relu(self.fc2(x), 0.2)
logits = self.out(x)
return logits, nn.LogSoftmax(dim=1)(logits)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4, 'nb_skills': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 12800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 200
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x3, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (200, 4), (4, 1))
assert_size_stride(primals_2, (200,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (200, 200), (200, 1))
assert_size_stride(primals_5, (200,), (1,))
assert_size_stride(primals_6, (4, 200), (200, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 200), (200, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 200), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1),
torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(12800)](buf0, primals_2, buf1,
buf2, 12800, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf3 = buf0
del buf0
extern_kernels.mm(reinterpret_tensor(buf2, (64, 200), (200, 1), 0),
reinterpret_tensor(primals_4, (200, 200), (1, 200), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1),
torch.bool)
buf5 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1),
torch.float32)
triton_poi_fused_leaky_relu_0[grid(12800)](buf3, primals_5, buf4,
buf5, 12800, XBLOCK=256, num_warps=4, num_stages=1)
del buf3
del primals_5
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf5, (64, 200),
(200, 1), 0), reinterpret_tensor(primals_6, (200, 4), (1, 200),
0), alpha=1, beta=1, out=buf6)
del primals_7
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__log_softmax_1[grid(256)](buf6, buf7, 256, XBLOCK=
256, num_warps=4, num_stages=1)
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__log_softmax_2[grid(256)](buf7, buf8, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del buf7
return reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0
), buf8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, reinterpret_tensor(buf2, (64, 200), (200, 1), 0
), buf4, reinterpret_tensor(buf5, (64, 200), (200, 1), 0
), buf8, primals_6, primals_4
class SkillDiscriminatorNew(nn.Module):
"""fully connected 200x200 layers for inferring q(z|s)"""
def __init__(self, state_dim, nb_skills):
super(SkillDiscriminatorNew, self).__init__()
self.fc1 = nn.Linear(state_dim, 200)
self.fc2 = nn.Linear(200, 200)
self.out = nn.Linear(200, nb_skills)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.out.weight
primals_7 = self.out.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0], output[1]
|
himanshusahni/task-biased-url
|
SkillDiscriminator
| false
| 10,260
|
[
"MIT"
] | 0
|
28e4ec318d46d84065b6e197fa9f4100bd4a4c34
|
https://github.com/himanshusahni/task-biased-url/tree/28e4ec318d46d84065b6e197fa9f4100bd4a4c34
|
OutConv
|
import torch
import torch.nn as nn
class OutConv(nn.Module):
def __init__(self, inChannels, outChannels):
super(OutConv, self).__init__()
self.conv = nn.Conv2d(inChannels, outChannels, kernel_size=1)
self.tanh = nn.Tanh()
def forward(self, input_):
return self.tanh(self.conv(input_))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'inChannels': 4, 'outChannels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_tanh_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x3, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_tanh_0[grid(256)](buf1, primals_2, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
return buf1, primals_1, primals_3, buf1
class OutConvNew(nn.Module):
def __init__(self, inChannels, outChannels):
super(OutConvNew, self).__init__()
self.conv = nn.Conv2d(inChannels, outChannels, kernel_size=1)
self.tanh = nn.Tanh()
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
iabd/Dereverbify
|
OutConv
| false
| 10,261
|
[
"MIT"
] | 0
|
e0c2e40c6813cf5528c3e0a1d697085444fb23b2
|
https://github.com/iabd/Dereverbify/tree/e0c2e40c6813cf5528c3e0a1d697085444fb23b2
|
DiscretePolicyFunction
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class DiscretePolicyFunction(nn.Module):
"""fully connected 200x200 hidden layers"""
def __init__(self, state_dim, action_dim):
super(DiscretePolicyFunction, self).__init__()
self.fc1 = nn.Linear(state_dim, 200)
self.fc2 = nn.Linear(200, 200)
self.out = nn.Linear(200, action_dim)
def forward(self, x):
"""return: action between [-1,1]"""
x = F.leaky_relu(self.fc1(x), 0.2)
x = F.leaky_relu(self.fc2(x), 0.2)
return F.softmax(self.out(x), 0)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4, 'action_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 12800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 200
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (200, 4), (4, 1))
assert_size_stride(primals_2, (200,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (200, 200), (200, 1))
assert_size_stride(primals_5, (200,), (1,))
assert_size_stride(primals_6, (4, 200), (200, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 200), (200, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 200), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1),
torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(12800)](buf0, primals_2, buf1,
buf2, 12800, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf3 = buf0
del buf0
extern_kernels.mm(reinterpret_tensor(buf2, (64, 200), (200, 1), 0),
reinterpret_tensor(primals_4, (200, 200), (1, 200), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1),
torch.bool)
buf5 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1),
torch.float32)
triton_poi_fused_leaky_relu_0[grid(12800)](buf3, primals_5, buf4,
buf5, 12800, XBLOCK=256, num_warps=4, num_stages=1)
del buf3
del primals_5
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf5, (64, 200),
(200, 1), 0), reinterpret_tensor(primals_6, (200, 4), (1, 200),
0), alpha=1, beta=1, out=buf6)
del primals_7
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf6, buf7, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf6
triton_poi_fused__softmax_2[grid(256)](buf7, buf8, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf7
return buf8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, reinterpret_tensor(buf2, (64, 200), (200, 1), 0
), buf4, reinterpret_tensor(buf5, (64, 200), (200, 1), 0
), buf8, primals_6, primals_4
class DiscretePolicyFunctionNew(nn.Module):
"""fully connected 200x200 hidden layers"""
def __init__(self, state_dim, action_dim):
super(DiscretePolicyFunctionNew, self).__init__()
self.fc1 = nn.Linear(state_dim, 200)
self.fc2 = nn.Linear(200, 200)
self.out = nn.Linear(200, action_dim)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.out.weight
primals_7 = self.out.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
himanshusahni/task-biased-url
|
DiscretePolicyFunction
| false
| 10,263
|
[
"MIT"
] | 0
|
28e4ec318d46d84065b6e197fa9f4100bd4a4c34
|
https://github.com/himanshusahni/task-biased-url/tree/28e4ec318d46d84065b6e197fa9f4100bd4a4c34
|
AttentionPool2d
|
import math
import torch
import numpy as np
import torch.nn as nn
import torch as th
def count_flops_attn(model, _x, y):
"""
A counter for the `thop` package to count the operations in an
attention operation.
Meant to be used like:
macs, params = thop.profile(
model,
inputs=(inputs, timestamps),
custom_ops={QKVAttention: QKVAttention.count_flops},
)
"""
b, c, *spatial = y[0].shape
num_spatial = int(np.prod(spatial))
matmul_ops = 2 * b * num_spatial ** 2 * c
model.total_ops += th.DoubleTensor([matmul_ops])
def conv_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D convolution module.
"""
if dims == 1:
return nn.Conv1d(*args, **kwargs)
elif dims == 2:
return nn.Conv2d(*args, **kwargs)
elif dims == 3:
return nn.Conv3d(*args, **kwargs)
raise ValueError(f'unsupported dimensions: {dims}')
class QKVAttention(nn.Module):
"""
A module which performs QKV attention and splits in a different order.
"""
def __init__(self, n_heads):
super().__init__()
self.n_heads = n_heads
def forward(self, qkv):
"""
Apply QKV attention.
:param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.
:return: an [N x (H * C) x T] tensor after attention.
"""
bs, width, length = qkv.shape
assert width % (3 * self.n_heads) == 0
ch = width // (3 * self.n_heads)
q, k, v = qkv.chunk(3, dim=1)
scale = 1 / math.sqrt(math.sqrt(ch))
weight = th.einsum('bct,bcs->bts', (q * scale).view(bs * self.
n_heads, ch, length), (k * scale).view(bs * self.n_heads, ch,
length))
weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
a = th.einsum('bts,bcs->bct', weight, v.reshape(bs * self.n_heads,
ch, length))
return a.reshape(bs, -1, length)
@staticmethod
def count_flops(model, _x, y):
return count_flops_attn(model, _x, y)
class AttentionPool2d(nn.Module):
"""
Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
"""
def __init__(self, spacial_dim: 'int', embed_dim: 'int',
num_heads_channels: 'int', output_dim: 'int'=None):
super().__init__()
self.positional_embedding = nn.Parameter(th.randn(embed_dim,
spacial_dim ** 2 + 1) / embed_dim ** 0.5)
self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
self.num_heads = embed_dim // num_heads_channels
self.attention = QKVAttention(self.num_heads)
def forward(self, x):
b, c, *_spatial = x.shape
x = x.reshape(b, c, -1)
x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1)
x = x + self.positional_embedding[None, :, :]
x = self.qkv_proj(x)
x = self.attention(x)
x = self.c_proj(x)
return x[:, :, 0]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'spacial_dim': 4, 'embed_dim': 4, 'num_heads_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import numpy as np
import torch.nn as nn
import torch as th
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 272
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 17
x3 = xindex // 17
x4 = xindex % 68
x5 = xindex
tmp15 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x3, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = 16.0
tmp7 = tmp5 / tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tl.full([1], 17, tl.int64)
tmp13 = tl.load(in_ptr1 + (16 * x3 + (-1 + x0)), tmp10 & xmask,
eviction_policy='evict_last', other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tmp16 = tmp14 + tmp15
tl.store(out_ptr0 + x5, tmp16, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 272
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 68
x3 = xindex % 68
x1 = xindex // 17 % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3 + 204 * x2), xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.7071067811865475
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + x4, tmp4, xmask)
@triton.jit
def triton_poi_fused_mul_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 272
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 68
x3 = xindex % 68
x1 = xindex // 17 % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (68 + x3 + 204 * x2), xmask)
tmp1 = tl.load(in_ptr1 + (4 + x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.7071067811865475
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + x4, tmp4, xmask)
@triton.jit
def triton_per_fused__softmax_4(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 68
rnumel = 17
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 17 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + 17 * x0), tmp11, rmask & xmask)
@triton.jit
def triton_poi_fused_convolution_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 816
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 17 % 12
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_6(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 17
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 68 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 17 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 272
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 17 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 17), (17, 1))
assert_size_stride(primals_3, (12, 4, 1), (4, 1, 1))
assert_size_stride(primals_4, (12,), (1,))
assert_size_stride(primals_5, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](primals_1, buf0, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 17), (68, 17, 1), torch.float32)
triton_poi_fused_add_cat_1[grid(272)](buf0, primals_1, primals_2,
buf1, 272, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del primals_1
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf2, (4, 12, 17), (204, 17, 1))
buf3 = empty_strided_cuda((4, 4, 17), (68, 17, 1), torch.float32)
triton_poi_fused_mul_2[grid(272)](buf2, primals_4, buf3, 272,
XBLOCK=256, num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 17), (68, 17, 1), torch.float32)
triton_poi_fused_mul_3[grid(272)](buf2, primals_4, buf4, 272,
XBLOCK=256, num_warps=4, num_stages=1)
buf5 = empty_strided_cuda((4, 17, 17), (289, 17, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (4, 17, 4), (68, 1, 17),
0), buf4, out=buf5)
buf8 = empty_strided_cuda((4, 17, 17), (289, 17, 1), torch.float32)
triton_per_fused__softmax_4[grid(68)](buf5, buf8, 68, 17, XBLOCK=1,
num_warps=2, num_stages=1)
del buf5
buf9 = buf2
del buf2
triton_poi_fused_convolution_5[grid(816)](buf9, primals_4, 816,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_4
buf10 = empty_strided_cuda((4, 17, 4), (68, 4, 1), torch.float32)
extern_kernels.bmm(buf8, reinterpret_tensor(buf9, (4, 17, 4), (204,
1, 17), 136), out=buf10)
buf11 = empty_strided_cuda((4, 4, 17), (68, 17, 1), torch.float32)
triton_poi_fused_convolution_6[grid(16, 17)](buf10, buf11, 16, 17,
XBLOCK=32, YBLOCK=16, num_warps=4, num_stages=1)
buf12 = extern_kernels.convolution(buf11, primals_5, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf12, (4, 4, 17), (68, 17, 1))
del buf11
buf13 = buf12
del buf12
triton_poi_fused_convolution_7[grid(272)](buf13, primals_6, 272,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_6
return reinterpret_tensor(buf13, (4, 4), (68, 17), 0
), primals_3, primals_5, buf1, buf8, reinterpret_tensor(buf10, (4,
4, 17), (68, 1, 4), 0), reinterpret_tensor(buf9, (4, 4, 17), (204,
17, 1), 136), buf3, reinterpret_tensor(buf4, (4, 17, 4), (68, 1, 17), 0
)
def count_flops_attn(model, _x, y):
"""
A counter for the `thop` package to count the operations in an
attention operation.
Meant to be used like:
macs, params = thop.profile(
model,
inputs=(inputs, timestamps),
custom_ops={QKVAttention: QKVAttention.count_flops},
)
"""
b, c, *spatial = y[0].shape
num_spatial = int(np.prod(spatial))
matmul_ops = 2 * b * num_spatial ** 2 * c
model.total_ops += th.DoubleTensor([matmul_ops])
def conv_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D convolution module.
"""
if dims == 1:
return nn.Conv1d(*args, **kwargs)
elif dims == 2:
return nn.Conv2d(*args, **kwargs)
elif dims == 3:
return nn.Conv3d(*args, **kwargs)
raise ValueError(f'unsupported dimensions: {dims}')
class QKVAttention(nn.Module):
"""
A module which performs QKV attention and splits in a different order.
"""
def __init__(self, n_heads):
super().__init__()
self.n_heads = n_heads
def forward(self, qkv):
"""
Apply QKV attention.
:param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.
:return: an [N x (H * C) x T] tensor after attention.
"""
bs, width, length = qkv.shape
assert width % (3 * self.n_heads) == 0
ch = width // (3 * self.n_heads)
q, k, v = qkv.chunk(3, dim=1)
scale = 1 / math.sqrt(math.sqrt(ch))
weight = th.einsum('bct,bcs->bts', (q * scale).view(bs * self.
n_heads, ch, length), (k * scale).view(bs * self.n_heads, ch,
length))
weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
a = th.einsum('bts,bcs->bct', weight, v.reshape(bs * self.n_heads,
ch, length))
return a.reshape(bs, -1, length)
@staticmethod
def count_flops(model, _x, y):
return count_flops_attn(model, _x, y)
class AttentionPool2dNew(nn.Module):
"""
Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
"""
def __init__(self, spacial_dim: 'int', embed_dim: 'int',
num_heads_channels: 'int', output_dim: 'int'=None):
super().__init__()
self.positional_embedding = nn.Parameter(th.randn(embed_dim,
spacial_dim ** 2 + 1) / embed_dim ** 0.5)
self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
self.num_heads = embed_dim // num_heads_channels
self.attention = QKVAttention(self.num_heads)
def forward(self, input_0):
primals_2 = self.positional_embedding
primals_3 = self.qkv_proj.weight
primals_4 = self.qkv_proj.bias
primals_5 = self.c_proj.weight
primals_6 = self.c_proj.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
jasperhu13/deit
|
AttentionPool2d
| false
| 10,265
|
[
"Apache-2.0"
] | 0
|
97b09b1c131a7ee8d01ee0ce27a936ff33cf62fc
|
https://github.com/jasperhu13/deit/tree/97b09b1c131a7ee8d01ee0ce27a936ff33cf62fc
|
PatchEmbed
|
import torch
import torch.nn as nn
class PatchEmbed(nn.Module):
"""
PatchEmbed.
"""
def __init__(self, dim_in=3, dim_out=768, kernel=(1, 16, 16), stride=(1,
4, 4), padding=(1, 7, 7), conv_2d=False):
super().__init__()
if conv_2d:
conv = nn.Conv2d
else:
conv = nn.Conv3d
self.proj = conv(dim_in, dim_out, kernel_size=kernel, stride=stride,
padding=padding)
def forward(self, x):
x = self.proj(x)
return x.flatten(2).transpose(1, 2)
def get_inputs():
return [torch.rand([4, 3, 64, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16896 % 768
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (768, 3, 1, 16, 16), (768, 256, 256, 16, 1))
assert_size_stride(primals_2, (768,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64, 64), (786432, 262144, 4096,
64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
4, 4), padding=(1, 7, 7), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 768, 66, 16, 16), (12976128, 16896,
256, 16, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(51904512)](buf1, primals_2,
51904512, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
return reinterpret_tensor(buf1, (4, 16896, 768), (12976128, 1, 16896), 0
), primals_1, primals_3
class PatchEmbedNew(nn.Module):
"""
PatchEmbed.
"""
def __init__(self, dim_in=3, dim_out=768, kernel=(1, 16, 16), stride=(1,
4, 4), padding=(1, 7, 7), conv_2d=False):
super().__init__()
if conv_2d:
conv = nn.Conv2d
else:
conv = nn.Conv3d
self.proj = conv(dim_in, dim_out, kernel_size=kernel, stride=stride,
padding=padding)
def forward(self, input_0):
primals_1 = self.proj.weight
primals_2 = self.proj.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
jasperhu13/deit
|
PatchEmbed
| false
| 10,266
|
[
"Apache-2.0"
] | 0
|
97b09b1c131a7ee8d01ee0ce27a936ff33cf62fc
|
https://github.com/jasperhu13/deit/tree/97b09b1c131a7ee8d01ee0ce27a936ff33cf62fc
|
SiglogModule
|
import torch
import torch.nn as nn
def siglog(v):
return v.sign() * torch.log(1 + v.abs())
class SiglogModule(nn.Module):
def forward(self, v):
return siglog(v)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_abs_add_log_mul_sign_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp1 < tmp0
tmp3 = tmp2.to(tl.int8)
tmp4 = tmp0 < tmp1
tmp5 = tmp4.to(tl.int8)
tmp6 = tmp3 - tmp5
tmp7 = tmp6.to(tmp0.dtype)
tmp8 = tl_math.abs(tmp0)
tmp9 = 1.0
tmp10 = tmp8 + tmp9
tmp11 = tl_math.log(tmp10)
tmp12 = tmp7 * tmp11
tl.store(out_ptr0 + x0, tmp12, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_abs_add_log_mul_sign_0[grid(256)](arg0_1, buf0,
256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
def siglog(v):
return v.sign() * torch.log(1 + v.abs())
class SiglogModuleNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
finalgruntgit/diautils
|
SiglogModule
| false
| 10,267
|
[
"MIT"
] | 0
|
b9d7666ed5023700db01a4295430c52721acfc25
|
https://github.com/finalgruntgit/diautils/tree/b9d7666ed5023700db01a4295430c52721acfc25
|
MeanModule
|
import torch
import torch.nn as nn
class MeanModule(nn.Module):
def __init__(self, *axis, keepdim=False):
super().__init__()
self.axis = axis
self.keepdim = keepdim
def forward(self, v):
mean = v.mean(self.axis)
if self.keepdim:
dims = list(v.shape)
if isinstance(self.axis, list) or isinstance(self.axis, tuple):
for ax in self.axis:
dims[ax] = 1
else:
dims[self.axis] = 1
mean = mean.view(dims)
return mean
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0))
tmp4 = 256.0
tmp5 = tmp3 / tmp4
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp5, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(1)](buf1, arg0_1, 1, 256, num_warps=2,
num_stages=1)
del arg0_1
return buf1,
class MeanModuleNew(nn.Module):
def __init__(self, *axis, keepdim=False):
super().__init__()
self.axis = axis
self.keepdim = keepdim
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
finalgruntgit/diautils
|
MeanModule
| false
| 10,268
|
[
"MIT"
] | 0
|
b9d7666ed5023700db01a4295430c52721acfc25
|
https://github.com/finalgruntgit/diautils/tree/b9d7666ed5023700db01a4295430c52721acfc25
|
Attention
|
import torch
from torch import nn
class Attention(nn.Module):
def __init__(self, feature_dim, K, bias=True, **kwargs):
super(Attention, self).__init__(**kwargs)
self.supports_masking = True
self.bias = bias
self.feature_dim = feature_dim
self.K = K
weight = torch.zeros(feature_dim, 1)
nn.init.xavier_uniform_(weight)
self.weight = nn.Parameter(weight)
if bias:
self.b = nn.Parameter(torch.zeros(K))
def forward(self, x):
B, N, K, feature_dim = x.shape
eij = torch.mm(x.contiguous().view(-1, feature_dim), self.weight).view(
-1, K)
if self.bias:
eij = eij + self.b
eij = torch.tanh(eij)
a = torch.exp(eij)
a = a / torch.sum(a, 1, keepdim=True) + 1e-10
weighted_input = x * a.view(B, N, K, 1)
return torch.sum(weighted_input, 2)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'feature_dim': 4, 'K': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_exp_sum_tanh_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + 1)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + 2)
tmp15 = tl.broadcast_to(tmp14, [XBLOCK])
tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + 3)
tmp22 = tl.broadcast_to(tmp21, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = libdevice.tanh(tmp3)
tmp5 = tl_math.exp(tmp4)
tmp9 = tmp6 + tmp8
tmp10 = libdevice.tanh(tmp9)
tmp11 = tl_math.exp(tmp10)
tmp12 = tmp5 + tmp11
tmp16 = tmp13 + tmp15
tmp17 = libdevice.tanh(tmp16)
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp12 + tmp18
tmp23 = tmp20 + tmp22
tmp24 = libdevice.tanh(tmp23)
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp19 + tmp25
tl.store(out_ptr0 + x0, tmp26, xmask)
@triton.jit
def triton_poi_fused_mul_sum_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp7 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp13 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr2 + 1)
tmp15 = tl.broadcast_to(tmp14, [XBLOCK])
tmp23 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp24 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr2 + 2)
tmp26 = tl.broadcast_to(tmp25, [XBLOCK])
tmp34 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp35 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp36 = tl.load(in_ptr2 + 3)
tmp37 = tl.broadcast_to(tmp36, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = libdevice.tanh(tmp4)
tmp6 = tl_math.exp(tmp5)
tmp8 = tmp6 / tmp7
tmp9 = 1e-10
tmp10 = tmp8 + tmp9
tmp11 = tmp0 * tmp10
tmp16 = tmp13 + tmp15
tmp17 = libdevice.tanh(tmp16)
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp18 / tmp7
tmp20 = tmp19 + tmp9
tmp21 = tmp12 * tmp20
tmp22 = tmp11 + tmp21
tmp27 = tmp24 + tmp26
tmp28 = libdevice.tanh(tmp27)
tmp29 = tl_math.exp(tmp28)
tmp30 = tmp29 / tmp7
tmp31 = tmp30 + tmp9
tmp32 = tmp23 * tmp31
tmp33 = tmp22 + tmp32
tmp38 = tmp35 + tmp37
tmp39 = libdevice.tanh(tmp38)
tmp40 = tl_math.exp(tmp39)
tmp41 = tmp40 / tmp7
tmp42 = tmp41 + tmp9
tmp43 = tmp34 * tmp42
tmp44 = tmp33 + tmp43
tl.store(out_ptr0 + x2, tmp44, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 1), (1, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
primals_2, out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 1), (1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_exp_sum_tanh_0[grid(16)](buf0, primals_3, buf1,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_mul_sum_1[grid(64)](primals_1, buf0, primals_3,
buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf1
return buf2, primals_1, primals_3, buf0
class AttentionNew(nn.Module):
def __init__(self, feature_dim, K, bias=True, **kwargs):
super(AttentionNew, self).__init__(**kwargs)
self.supports_masking = True
self.bias = bias
self.feature_dim = feature_dim
self.K = K
weight = torch.zeros(feature_dim, 1)
nn.init.xavier_uniform_(weight)
self.weight = nn.Parameter(weight)
if bias:
self.b = nn.Parameter(torch.zeros(K))
def forward(self, input_0):
primals_2 = self.weight
primals_3 = self.b
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
heyitsmine/FewRel
|
Attention
| false
| 10,269
|
[
"MIT"
] | 0
|
2a2b8ae471298d9eb3557796a085c23b21982fb2
|
https://github.com/heyitsmine/FewRel/tree/2a2b8ae471298d9eb3557796a085c23b21982fb2
|
SumModule
|
import torch
import torch.nn as nn
class SumModule(nn.Module):
def __init__(self, *axis, keepdim=False):
super().__init__()
self.axis = axis
self.keepdim = keepdim
def forward(self, v):
sum = v.sum(self.axis)
if self.keepdim:
dims = list(v.shape)
if isinstance(self.axis, list) or isinstance(self.axis, tuple):
for ax in self.axis:
dims[ax] = 1
else:
dims[self.axis] = 1
sum = sum.view(dims)
return sum
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_sum_0(in_ptr0, out_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0))
tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp3, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_sum_0[grid(1)](arg0_1, buf0, 1, 256, num_warps=2,
num_stages=1)
del arg0_1
return buf0,
class SumModuleNew(nn.Module):
def __init__(self, *axis, keepdim=False):
super().__init__()
self.axis = axis
self.keepdim = keepdim
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
finalgruntgit/diautils
|
SumModule
| false
| 10,271
|
[
"MIT"
] | 0
|
b9d7666ed5023700db01a4295430c52721acfc25
|
https://github.com/finalgruntgit/diautils/tree/b9d7666ed5023700db01a4295430c52721acfc25
|
MultiheadAttention
|
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
class MultiheadAttention(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, embed_dim, num_heads, attn_dropout=0.0, bias=True,
add_bias_kv=False, add_zero_attn=False):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.attn_dropout = attn_dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads'
self.scaling = self.head_dim ** -0.5
self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim))
self.register_parameter('in_proj_bias', None)
if bias:
self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim))
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.in_proj_weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.in_proj_bias is not None:
nn.init.constant_(self.in_proj_bias, 0.0)
nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def forward(self, query, key, value, attn_mask=None):
"""Input shape: Time x Batch x Channel
Self-attention can be implemented by passing in the same arguments for
query, key and value. Timesteps can be masked by supplying a T x T mask in the
`attn_mask` argument. Padding elements can be excluded from
the key by passing a binary ByteTensor (`key_padding_mask`) with shape:
batch x src_len, where padding elements are indicated by 1s.
"""
qkv_same = query.data_ptr() == key.data_ptr() == value.data_ptr()
kv_same = key.data_ptr() == value.data_ptr()
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
assert key.size() == value.size()
if qkv_same:
q, k, v = self.in_proj_qkv(query)
elif kv_same:
q = self.in_proj_q(query)
if key is None:
assert value is None
k = v = None
else:
k, v = self.in_proj_kv(key)
else:
q = self.in_proj_q(query)
k = self.in_proj_k(key)
v = self.in_proj_v(value)
q = q * self.scaling
if self.bias_k is not None:
assert self.bias_v is not None
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(
attn_mask.size(0), 1)], dim=1)
q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
src_len = k.size(1)
if self.add_zero_attn:
src_len += 1
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])],
dim=1)
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])],
dim=1)
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(
attn_mask.size(0), 1)], dim=1)
attn_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len,
src_len]
if attn_mask is not None:
try:
attn_weights += attn_mask.unsqueeze(0)
except:
None
None
assert False
attn_weights = F.softmax(attn_weights.float(), dim=-1).type_as(
attn_weights)
attn_weights = F.dropout(attn_weights, p=self.attn_dropout,
training=self.training)
attn = torch.bmm(attn_weights, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.
head_dim]
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.sum(dim=1) / self.num_heads
return attn, attn_weights
def in_proj_qkv(self, query):
return self._in_proj(query).chunk(3, dim=-1)
def in_proj_kv(self, key):
return self._in_proj(key, start=self.embed_dim).chunk(2, dim=-1)
def in_proj_q(self, query, **kwargs):
return self._in_proj(query, end=self.embed_dim, **kwargs)
def in_proj_k(self, key):
return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim)
def in_proj_v(self, value):
return self._in_proj(value, start=2 * self.embed_dim)
def _in_proj(self, input, start=0, end=None, **kwargs):
weight = kwargs.get('weight', self.in_proj_weight)
bias = kwargs.get('bias', self.in_proj_bias)
weight = weight[start:end, :]
if bias is not None:
bias = bias[start:end]
return F.linear(input, weight, bias)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4,
4, 4, 4])]
def get_init_inputs():
return [[], {'embed_dim': 4, 'num_heads': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + 16 * x0), tmp11, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_div_sum_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = xindex // 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 256 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (128 + x0 + 256 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (192 + x0 + 256 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 4),
reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 16), alpha=1,
beta=1, out=buf1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 8),
reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha=1,
beta=1, out=buf2)
del primals_4
buf3 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_mul_0[grid(64)](buf3, primals_5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((16, 4, 16), (64, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (1, 16, 0),
0), reinterpret_tensor(buf1, (16, 1, 16), (1, 1, 16), 0), out=buf4)
buf7 = empty_strided_cuda((16, 4, 16), (64, 16, 1), torch.float32)
triton_per_fused__softmax_1[grid(64)](buf4, buf7, 64, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del buf4
buf8 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf7, reinterpret_tensor(buf2, (16, 16, 1), (1,
16, 1), 0), out=buf8)
buf9 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32)
triton_poi_fused_clone_2[grid(4, 16)](buf8, buf9, 4, 16, XBLOCK=16,
YBLOCK=4, num_warps=1, num_stages=1)
buf10 = reinterpret_tensor(buf8, (16, 4), (4, 1), 0)
del buf8
extern_kernels.addmm(primals_7, reinterpret_tensor(buf9, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf10)
del primals_7
buf11 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
triton_poi_fused_div_sum_3[grid(256)](buf7, buf11, 256, XBLOCK=128,
num_warps=4, num_stages=1)
return reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0
), buf11, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf7, reinterpret_tensor(buf9, (16, 4), (4, 1), 0
), primals_6, reinterpret_tensor(buf2, (16, 1, 16), (1, 1, 16), 0
), reinterpret_tensor(buf3, (16, 1, 4), (1, 1, 16), 0
), reinterpret_tensor(buf1, (16, 16, 1), (1, 16, 1), 0)
class MultiheadAttentionNew(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, embed_dim, num_heads, attn_dropout=0.0, bias=True,
add_bias_kv=False, add_zero_attn=False):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.attn_dropout = attn_dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads'
self.scaling = self.head_dim ** -0.5
self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim))
self.register_parameter('in_proj_bias', None)
if bias:
self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim))
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.in_proj_weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.in_proj_bias is not None:
nn.init.constant_(self.in_proj_bias, 0.0)
nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def in_proj_qkv(self, query):
return self._in_proj(query).chunk(3, dim=-1)
def in_proj_kv(self, key):
return self._in_proj(key, start=self.embed_dim).chunk(2, dim=-1)
def in_proj_q(self, query, **kwargs):
return self._in_proj(query, end=self.embed_dim, **kwargs)
def in_proj_k(self, key):
return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim)
def in_proj_v(self, value):
return self._in_proj(value, start=2 * self.embed_dim)
def _in_proj(self, input, start=0, end=None, **kwargs):
weight = kwargs.get('weight', self.in_proj_weight)
bias = kwargs.get('bias', self.in_proj_bias)
weight = weight[start:end, :]
if bias is not None:
bias = bias[start:end]
return F.linear(input, weight, bias)
def forward(self, input_0, input_1, input_2):
primals_4 = self.in_proj_weight
primals_5 = self.in_proj_bias
primals_6 = self.out_proj.weight
primals_7 = self.out_proj.bias
primals_1 = input_0
primals_2 = input_1
primals_3 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0], output[1]
|
jiahuanluo/multi_media
|
MultiheadAttention
| false
| 10,272
|
[
"MIT"
] | 0
|
ac5ac59dba87d0368ca656e600a85bfd9a1da28e
|
https://github.com/jiahuanluo/multi_media/tree/ac5ac59dba87d0368ca656e600a85bfd9a1da28e
|
SigsqrtModule
|
import torch
import torch.nn as nn
def sigsqrt(v):
return v / torch.sqrt(1 + v.abs())
class SigsqrtModule(nn.Module):
def forward(self, v):
return sigsqrt(v)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_abs_add_div_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl_math.abs(tmp0)
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp4 = libdevice.sqrt(tmp3)
tmp5 = tmp0 / tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_abs_add_div_sqrt_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
def sigsqrt(v):
return v / torch.sqrt(1 + v.abs())
class SigsqrtModuleNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
finalgruntgit/diautils
|
SigsqrtModule
| false
| 10,273
|
[
"MIT"
] | 0
|
b9d7666ed5023700db01a4295430c52721acfc25
|
https://github.com/finalgruntgit/diautils/tree/b9d7666ed5023700db01a4295430c52721acfc25
|
LearnedPositionalEncoding
|
import torch
import torch.nn as nn
import torch.optim
class LearnedPositionalEncoding(nn.Module):
def __init__(self, max_position_embeddings, embedding_dim, seq_length):
super(LearnedPositionalEncoding, self).__init__()
self.position_embeddings = nn.Parameter(torch.zeros(1, 3200, 512))
def forward(self, x, position_ids=None):
position_embeddings = self.position_embeddings
return x + position_embeddings
def get_inputs():
return [torch.rand([4, 4, 3200, 512])]
def get_init_inputs():
return [[], {'max_position_embeddings': 4, 'embedding_dim': 4,
'seq_length': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1638400
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x2, tmp2, None)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (1, 3200, 512), (1638400, 512, 1))
assert_size_stride(primals_2, (4, 4, 3200, 512), (6553600, 1638400, 512, 1)
)
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 3200, 512), (6553600, 1638400, 512,
1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(26214400)](primals_2, primals_1, buf0,
26214400, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_1
del primals_2
return buf0,
class LearnedPositionalEncodingNew(nn.Module):
def __init__(self, max_position_embeddings, embedding_dim, seq_length):
super(LearnedPositionalEncodingNew, self).__init__()
self.position_embeddings = nn.Parameter(torch.zeros(1, 3200, 512))
def forward(self, input_0):
primals_1 = self.position_embeddings
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
felixquinton1/TransBTS
|
LearnedPositionalEncoding
| false
| 10,274
|
[
"Apache-2.0"
] | 0
|
6992c902413ba15f40ebfe9f6d5d0e3594051033
|
https://github.com/felixquinton1/TransBTS/tree/6992c902413ba15f40ebfe9f6d5d0e3594051033
|
VAELoss
|
import torch
import torch.nn as nn
class VAELoss(nn.Module):
def __init__(self):
super(VAELoss, self).__init__()
self.bce = nn.BCELoss(reduction='sum')
def forward(self, recon_x, x, mu, logvar):
BCE = self.bce(recon_x, x)
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
return BCE + KLD
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_binary_cross_entropy_exp_mul_pow_sub_sum_0(in_out_ptr0
, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp16 = tl.load(in_ptr2 + r0, None)
tmp18 = tl.load(in_ptr3 + r0, None)
tmp1 = 1.0
tmp2 = tmp0 - tmp1
tmp4 = -tmp3
tmp5 = libdevice.log1p(tmp4)
tmp6 = -100.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp2 * tmp7
tmp9 = tl_math.log(tmp3)
tmp10 = triton_helpers.maximum(tmp9, tmp6)
tmp11 = tmp0 * tmp10
tmp12 = tmp8 - tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp17 = tmp16 + tmp1
tmp19 = tmp18 * tmp18
tmp20 = tmp17 - tmp19
tmp21 = tl_math.exp(tmp16)
tmp22 = tmp20 - tmp21
tmp23 = tl.broadcast_to(tmp22, [RBLOCK])
tmp25 = triton_helpers.promote_to_tensor(tl.sum(tmp23, 0))
tmp26 = -0.5
tmp27 = tmp25 * tmp26
tmp28 = tmp15 + tmp27
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp28, None)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_binary_cross_entropy_exp_mul_pow_sub_sum_0[grid(1)
](buf2, arg0_1, arg1_1, arg2_1, arg3_1, 1, 256, num_warps=2,
num_stages=1)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
return buf2,
class VAELossNew(nn.Module):
def __init__(self):
super(VAELossNew, self).__init__()
self.bce = nn.BCELoss(reduction='sum')
def forward(self, input_0, input_1, input_2, input_3):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0]
|
jlrussin/RL_project
|
VAELoss
| false
| 10,275
|
[
"Apache-2.0"
] | 0
|
a8562b4797afdf5944dba768a88d779056e8506a
|
https://github.com/jlrussin/RL_project/tree/a8562b4797afdf5944dba768a88d779056e8506a
|
SoftmaxModule
|
import torch
import torch.nn as nn
class SoftmaxModule(nn.Module):
def __init__(self, axis):
super().__init__()
self.axis = axis
def forward(self, v):
return v.softmax(self.axis)
def get_inputs():
return [torch.rand([4, 4, 4, 4, 4])]
def get_init_inputs():
return [[], {'axis': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(1024)](arg0_1, buf0, 1024, XBLOCK=
256, num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_poi_fused__softmax_1[grid(1024)](buf0, buf1, 1024, XBLOCK=
256, num_warps=4, num_stages=1)
del buf0
return buf1,
class SoftmaxModuleNew(nn.Module):
def __init__(self, axis):
super().__init__()
self.axis = axis
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
finalgruntgit/diautils
|
SoftmaxModule
| false
| 10,276
|
[
"MIT"
] | 0
|
b9d7666ed5023700db01a4295430c52721acfc25
|
https://github.com/finalgruntgit/diautils/tree/b9d7666ed5023700db01a4295430c52721acfc25
|
MultipleRegression
|
import torch
import torch.nn as nn
class MultipleRegression(nn.Module):
def __init__(self, num_features):
super(MultipleRegression, self).__init__()
self.fc1 = nn.Linear(num_features, 64)
self.fc2 = nn.Linear(64, 128)
self.output = nn.Linear(128, 1)
self.act = nn.Sigmoid()
def forward(self, inputs):
x = self.act(self.fc1(inputs))
x = self.act(self.fc2(x))
x = self.output(x)
return x
def predict(self, test_inputs):
x = self.act(self.fc1(test_inputs))
x = self.act(self.fc2(x))
x = self.output(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, None)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (64, 4), (4, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (128, 64), (64, 1))
assert_size_stride(primals_5, (128,), (1,))
assert_size_stride(primals_6, (1, 128), (128, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_sigmoid_0[grid(4096)](buf1, primals_2, 4096,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0),
reinterpret_tensor(primals_4, (64, 128), (1, 64), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf2
triton_poi_fused_sigmoid_1[grid(8192)](buf3, primals_5, 8192,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 128),
(128, 1), 0), reinterpret_tensor(primals_6, (128, 1), (1, 128),
0), alpha=1, beta=1, out=buf5)
del primals_7
return reinterpret_tensor(buf5, (4, 4, 4, 1), (16, 4, 1, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, buf3, primals_6, primals_4
class MultipleRegressionNew(nn.Module):
def __init__(self, num_features):
super(MultipleRegressionNew, self).__init__()
self.fc1 = nn.Linear(num_features, 64)
self.fc2 = nn.Linear(64, 128)
self.output = nn.Linear(128, 1)
self.act = nn.Sigmoid()
def predict(self, test_inputs):
x = self.act(self.fc1(test_inputs))
x = self.act(self.fc2(x))
x = self.output(x)
return x
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.output.weight
primals_7 = self.output.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
jiruifu-jerry0219/UpperLimbEstimator
|
MultipleRegression
| false
| 10,277
|
[
"Apache-2.0"
] | 0
|
d62deef93419934dcb33e43707dd0634a235fb9a
|
https://github.com/jiruifu-jerry0219/UpperLimbEstimator/tree/d62deef93419934dcb33e43707dd0634a235fb9a
|
SegmentationNet
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class SegmentationNet(nn.Module):
def __init__(self, feature, hidden1, hidden2, output):
""" Initialize a class NeuralNet.
:param batch_size: int
:param hidden: int
"""
super(SegmentationNet, self).__init__()
self.layer1 = nn.Linear(feature, hidden1)
self.layer2 = nn.Linear(hidden1, hidden2)
self.layer3 = nn.Linear(hidden2, output)
def get_weight_norm(self):
""" Return ||W||
:return: float
"""
layer_1_w_norm = torch.norm(self.layer1.weight, 2)
layer_2_w_norm = torch.norm(self.layer2.weight, 2)
layer_3_w_norm = torch.norm(self.layer3.weight, 2)
return layer_1_w_norm + layer_2_w_norm + layer_3_w_norm
def forward(self, inputs):
""" Return a forward pass given inputs.
:param inputs: user vector.
:return: user vector.
"""
out = inputs
out = self.layer1(out)
out = F.relu(out)
out = self.layer2(out)
out = F.relu(out)
out = self.layer3(out)
out = F.softmax(out, dim=1)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'feature': 4, 'hidden1': 4, 'hidden2': 4, 'output': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_3, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3,
primals_5, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf4, buf5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf5
return buf6, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(
buf3, (64, 4), (4, 1), 0), buf6, primals_6, buf7, primals_4, buf8
class SegmentationNetNew(nn.Module):
def __init__(self, feature, hidden1, hidden2, output):
""" Initialize a class NeuralNet.
:param batch_size: int
:param hidden: int
"""
super(SegmentationNetNew, self).__init__()
self.layer1 = nn.Linear(feature, hidden1)
self.layer2 = nn.Linear(hidden1, hidden2)
self.layer3 = nn.Linear(hidden2, output)
def get_weight_norm(self):
""" Return ||W||
:return: float
"""
layer_1_w_norm = torch.norm(self.layer1.weight, 2)
layer_2_w_norm = torch.norm(self.layer2.weight, 2)
layer_3_w_norm = torch.norm(self.layer3.weight, 2)
return layer_1_w_norm + layer_2_w_norm + layer_3_w_norm
def forward(self, input_0):
primals_2 = self.layer1.weight
primals_3 = self.layer1.bias
primals_4 = self.layer2.weight
primals_5 = self.layer2.bias
primals_6 = self.layer3.weight
primals_7 = self.layer3.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
jinyu-hou/medium-blog-scripts
|
SegmentationNet
| false
| 10,278
|
[
"MIT"
] | 0
|
a645d544a4bd1c937e4ff99dca0d6e98b3abb7f9
|
https://github.com/jinyu-hou/medium-blog-scripts/tree/a645d544a4bd1c937e4ff99dca0d6e98b3abb7f9
|
LinearWithChannel
|
import torch
import numpy as np
import torch.nn as nn
class LinearWithChannel(nn.Module):
def __init__(self, input_size, output_size, channel_size):
super(LinearWithChannel, self).__init__()
self.channel_size = channel_size
self.weight = torch.nn.Parameter(torch.zeros(channel_size,
input_size, output_size))
self.bias = torch.nn.Parameter(torch.zeros(channel_size, 1,
output_size))
self.reset_parameters(self.weight, self.bias)
def reset_parameters(self, weights, bias):
torch.nn.init.kaiming_uniform_(weights, a=np.sqrt(3))
fan_in, _ = torch.nn.init._calculate_fan_in_and_fan_out(weights)
bound = 1 / np.sqrt(fan_in)
torch.nn.init.uniform_(bias, -bound, bound)
def forward(self, observations):
"""
observations = torch.tensor(batch_size, input_size)
weight = torch.tensor(channel_size, input_size, output_size)
bias = torch.tensor(channel_size, 1, output_size)
:param observations:
:return: torch.tensor(channel_size, batch_size, output_size)
"""
observations = observations.repeat(self.channel_size, 1, 1)
output = torch.bmm(observations, self.weight) + self.bias
return output
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'output_size': 4, 'channel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_repeat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 1, 4), (4, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_repeat_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf0, primals_2, out=buf1)
del primals_2
buf2 = buf1
del buf1
triton_poi_fused_add_1[grid(64)](buf2, primals_3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_3
return buf2, reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0)
class LinearWithChannelNew(nn.Module):
def __init__(self, input_size, output_size, channel_size):
super(LinearWithChannelNew, self).__init__()
self.channel_size = channel_size
self.weight = torch.nn.Parameter(torch.zeros(channel_size,
input_size, output_size))
self.bias = torch.nn.Parameter(torch.zeros(channel_size, 1,
output_size))
self.reset_parameters(self.weight, self.bias)
def reset_parameters(self, weights, bias):
torch.nn.init.kaiming_uniform_(weights, a=np.sqrt(3))
fan_in, _ = torch.nn.init._calculate_fan_in_and_fan_out(weights)
bound = 1 / np.sqrt(fan_in)
torch.nn.init.uniform_(bias, -bound, bound)
def forward(self, input_0):
primals_2 = self.weight
primals_3 = self.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
jilanglois-su/cobs10-dengai
|
LinearWithChannel
| false
| 10,279
|
[
"MIT"
] | 0
|
101d3434db6330e9794b2e266b02c93793abfb82
|
https://github.com/jilanglois-su/cobs10-dengai/tree/101d3434db6330e9794b2e266b02c93793abfb82
|
MultiHeadedAttention
|
import torch
from torch import nn
from torch.nn import functional as F
def same_tensor(tensor, *args):
""" Do the input tensors all point to the same underlying data """
for other in args:
if not torch.is_tensor(other):
return False
if tensor.device != other.device:
return False
if tensor.dtype != other.dtype:
return False
if tensor.data_ptr() != other.data_ptr():
return False
return True
class MultiHeadedAttention(nn.Module):
""" Implement a multi-headed attention module """
def __init__(self, embed_dim, num_heads=1):
""" Initialize the attention module """
super(MultiHeadedAttention, self).__init__()
assert embed_dim % num_heads == 0, f'num_heads={num_heads} should evenly divide embed_dim={embed_dim}'
self.embed_dim = embed_dim
self.num_heads = num_heads
self.projection_dim = embed_dim // num_heads
self.scale = self.projection_dim ** -0.5
self.input_weights = nn.Parameter(torch.Tensor(3 * embed_dim,
embed_dim))
self.output_projection = nn.Linear(embed_dim, embed_dim, bias=False)
self.reset_parameters()
def reset_parameters(self):
""" Reset parameters using xavier initialization """
gain = nn.init.calculate_gain('linear')
nn.init.xavier_uniform_(self.input_weights, gain)
nn.init.xavier_uniform_(self.output_projection.weight, gain)
def project(self, inputs, index=0, chunks=1):
""" Produce a linear projection using the weights """
batch_size = inputs.shape[0]
start = index * self.embed_dim
end = start + chunks * self.embed_dim
projections = F.linear(inputs, self.input_weights[start:end]).chunk(
chunks, dim=-1)
output_projections = []
for projection in projections:
output_projections.append(projection.view(batch_size, -1, self.
num_heads, self.projection_dim).transpose(2, 1).contiguous(
).view(batch_size * self.num_heads, -1, self.projection_dim))
return output_projections
def attention(self, values, keys, queries, key_mask=None, mask=None):
""" Scaled dot product attention with optional masks """
logits = self.scale * torch.bmm(queries, keys.transpose(2, 1))
if mask is not None:
logits += mask
if key_mask is not None:
logits_shape = logits.shape
batch_size = logits_shape[0] // self.num_heads
logits = logits.view(batch_size, self.num_heads, logits_shape[1
], logits_shape[2])
logits.masked_fill_(key_mask[:, None, None], float('-inf'))
logits = logits.view(logits_shape)
attn_weights = F.softmax(logits, dim=-1)
attended = torch.bmm(attn_weights, values)
batch_size = queries.shape[0] // self.num_heads
return attended.view(batch_size, self.num_heads, -1, self.
projection_dim).transpose(2, 1).contiguous().view(batch_size, -
1, self.num_heads * self.projection_dim)
def forward(self, values, keys, queries, key_mask=None, attention_mask=
None, num_queries=0):
""" Forward pass of the attention """
None
None
None
if same_tensor(values, keys, queries):
values, keys, queries = self.project(values, chunks=3)
elif same_tensor(values, keys):
values, keys = self.project(values, chunks=2)
queries, = self.project(queries, 2)
else:
values, = self.project(values, 0)
keys, = self.project(keys, 1)
queries, = self.project(queries, 2)
if num_queries:
queries = queries[:, -num_queries:]
attended = self.attention(values, keys, queries, key_mask,
attention_mask)
return self.output_projection(attended)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'embed_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
from torch.nn import functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__softmax_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, float('-inf'))
tmp6 = triton_helpers.max2(tmp5, 1)[:, None]
tmp7 = tmp2 - tmp6
tmp8 = 0.5
tmp9 = tmp7 * tmp8
tmp10 = tl_math.exp(tmp9)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tmp15 = tmp10 / tmp14
tl.store(out_ptr2 + (r1 + 16 * x0), tmp15, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 16), out=buf1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 32), out=buf2)
del primals_2
buf3 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf2, (4, 16, 4), (64, 4, 1),
0), reinterpret_tensor(buf1, (4, 4, 16), (64, 1, 4), 0), out=buf3)
buf6 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
get_raw_stream(0)
triton_per_fused__softmax_0[grid(64)](buf3, buf6, 64, 16, XBLOCK=32,
num_warps=4, num_stages=1)
del buf3
buf7 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
extern_kernels.bmm(buf6, reinterpret_tensor(buf0, (4, 16, 4), (64,
4, 1), 0), out=buf7)
buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf7, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf8)
return reinterpret_tensor(buf8, (4, 16, 4), (64, 4, 1), 0
), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_4, (64, 4), (4, 1), 0
), buf6, reinterpret_tensor(buf7, (64, 4), (4, 1), 0
), primals_5, reinterpret_tensor(buf0, (4, 4, 16), (64, 1, 4), 0
), reinterpret_tensor(buf2, (4, 4, 16), (64, 1, 4), 0
), reinterpret_tensor(buf1, (4, 16, 4), (64, 4, 1), 0)
def same_tensor(tensor, *args):
""" Do the input tensors all point to the same underlying data """
for other in args:
if not torch.is_tensor(other):
return False
if tensor.device != other.device:
return False
if tensor.dtype != other.dtype:
return False
if tensor.data_ptr() != other.data_ptr():
return False
return True
class MultiHeadedAttentionNew(nn.Module):
""" Implement a multi-headed attention module """
def __init__(self, embed_dim, num_heads=1):
""" Initialize the attention module """
super(MultiHeadedAttentionNew, self).__init__()
assert embed_dim % num_heads == 0, f'num_heads={num_heads} should evenly divide embed_dim={embed_dim}'
self.embed_dim = embed_dim
self.num_heads = num_heads
self.projection_dim = embed_dim // num_heads
self.scale = self.projection_dim ** -0.5
self.input_weights = nn.Parameter(torch.Tensor(3 * embed_dim,
embed_dim))
self.output_projection = nn.Linear(embed_dim, embed_dim, bias=False)
self.reset_parameters()
def reset_parameters(self):
""" Reset parameters using xavier initialization """
gain = nn.init.calculate_gain('linear')
nn.init.xavier_uniform_(self.input_weights, gain)
nn.init.xavier_uniform_(self.output_projection.weight, gain)
def project(self, inputs, index=0, chunks=1):
""" Produce a linear projection using the weights """
batch_size = inputs.shape[0]
start = index * self.embed_dim
end = start + chunks * self.embed_dim
projections = F.linear(inputs, self.input_weights[start:end]).chunk(
chunks, dim=-1)
output_projections = []
for projection in projections:
output_projections.append(projection.view(batch_size, -1, self.
num_heads, self.projection_dim).transpose(2, 1).contiguous(
).view(batch_size * self.num_heads, -1, self.projection_dim))
return output_projections
def attention(self, values, keys, queries, key_mask=None, mask=None):
""" Scaled dot product attention with optional masks """
logits = self.scale * torch.bmm(queries, keys.transpose(2, 1))
if mask is not None:
logits += mask
if key_mask is not None:
logits_shape = logits.shape
batch_size = logits_shape[0] // self.num_heads
logits = logits.view(batch_size, self.num_heads, logits_shape[1
], logits_shape[2])
logits.masked_fill_(key_mask[:, None, None], float('-inf'))
logits = logits.view(logits_shape)
attn_weights = F.softmax(logits, dim=-1)
attended = torch.bmm(attn_weights, values)
batch_size = queries.shape[0] // self.num_heads
return attended.view(batch_size, self.num_heads, -1, self.
projection_dim).transpose(2, 1).contiguous().view(batch_size, -
1, self.num_heads * self.projection_dim)
def forward(self, input_0, input_1, input_2):
primals_2 = self.input_weights
primals_5 = self.output_projection.weight
primals_1 = input_0
primals_3 = input_1
primals_4 = input_2
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
jinga-lala/stupidNMT
|
MultiHeadedAttention
| false
| 10,280
|
[
"BSD-3-Clause"
] | 0
|
2a41c072c2bc622c7edd8556f552f38556d70dae
|
https://github.com/jinga-lala/stupidNMT/tree/2a41c072c2bc622c7edd8556f552f38556d70dae
|
MLP
|
from _paritybench_helpers import _mock_config
import math
import torch
from torch import nn
from torch.nn.parameter import Parameter
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 *
torch.pow(x, 3))))
class Conv1D(nn.Module):
def __init__(self, nf, nx):
super(Conv1D, self).__init__()
self.nf = nf
w = torch.empty(nx, nf)
nn.init.normal_(w, std=0.02)
self.weight = Parameter(w)
self.bias = Parameter(torch.zeros(nf))
def forward(self, x):
size_out = x.size()[:-1] + (self.nf,)
x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
x = x.view(*size_out)
return x
class MLP(nn.Module):
def __init__(self, n_state, config):
super(MLP, self).__init__()
nx = config.n_embd
self.c_fc = Conv1D(n_state, nx)
self.c_proj = Conv1D(nx, n_state)
self.act = gelu
def forward(self, x):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return h2
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_state': 4, 'config': _mock_config(n_embd=4)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
from torch import nn
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_pow_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tmp0 * tmp0
tmp4 = tmp3 * tmp0
tmp5 = 0.044715
tmp6 = tmp4 * tmp5
tmp7 = tmp0 + tmp6
tmp8 = 0.7978845608028654
tmp9 = tmp7 * tmp8
tmp10 = libdevice.tanh(tmp9)
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp2 * tmp12
tl.store(out_ptr0 + x0, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_1, (64,
4), (4, 1), 0), primals_3, alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_pow_tanh_0[grid(256)](buf0, buf1, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_4, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), primals_5, alpha=1, beta=1, out=buf2)
del primals_4
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), buf0, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0
), reinterpret_tensor(buf1, (4, 64), (1, 4), 0), reinterpret_tensor(
primals_1, (4, 64), (1, 4), 0)
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 *
torch.pow(x, 3))))
class Conv1D(nn.Module):
def __init__(self, nf, nx):
super(Conv1D, self).__init__()
self.nf = nf
w = torch.empty(nx, nf)
nn.init.normal_(w, std=0.02)
self.weight = Parameter(w)
self.bias = Parameter(torch.zeros(nf))
def forward(self, x):
size_out = x.size()[:-1] + (self.nf,)
x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
x = x.view(*size_out)
return x
class MLPNew(nn.Module):
def __init__(self, n_state, config):
super(MLPNew, self).__init__()
nx = config.n_embd
self.c_fc = Conv1D(n_state, nx)
self.c_proj = Conv1D(nx, n_state)
self.act = gelu
def forward(self, input_0):
primals_3 = self.c_fc.weight
primals_2 = self.c_fc.bias
primals_5 = self.c_proj.weight
primals_4 = self.c_proj.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
jamessheenworks/GPT2sQA
|
MLP
| false
| 10,281
|
[
"Apache-2.0"
] | 0
|
14866cb21d229281e8f8b8f88aac9195bca45cd7
|
https://github.com/jamessheenworks/GPT2sQA/tree/14866cb21d229281e8f8b8f88aac9195bca45cd7
|
Classify
|
import torch
import torch.nn as nn
def autopad(k, p=None):
if p is None:
p = k // 2 if isinstance(k, int) else [(x // 2) for x in k]
return p
class Flatten(nn.Module):
@staticmethod
def forward(x):
return x.view(x.size(0), -1)
class Classify(nn.Module):
def __init__(self, c1, c2, k=1, s=1, p=None, g=1):
super(Classify, self).__init__()
self.aap = nn.AdaptiveAvgPool2d(1)
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False
)
self.flat = Flatten()
def forward(self, x):
z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else
[x])], 1)
return self.flat(self.conv(z))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'c1': 4, 'c2': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del primals_1
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1))
return reinterpret_tensor(buf2, (4, 4), (4, 1), 0), primals_2, buf1
def autopad(k, p=None):
if p is None:
p = k // 2 if isinstance(k, int) else [(x // 2) for x in k]
return p
class Flatten(nn.Module):
@staticmethod
def forward(x):
return x.view(x.size(0), -1)
class ClassifyNew(nn.Module):
def __init__(self, c1, c2, k=1, s=1, p=None, g=1):
super(ClassifyNew, self).__init__()
self.aap = nn.AdaptiveAvgPool2d(1)
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False
)
self.flat = Flatten()
def forward(self, input_0):
primals_2 = self.conv.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
hyperparameters/Towards-Realtime-MOT
|
Classify
| false
| 10,282
|
[
"MIT"
] | 0
|
eb956a3bd5991f4895178566cb0173769977f88d
|
https://github.com/hyperparameters/Towards-Realtime-MOT/tree/eb956a3bd5991f4895178566cb0173769977f88d
|
NeuralNerwork
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class NeuralNerwork(nn.Module):
""" Construct a ReLU-activated NN, set Bias to False
Four hidden layers with sizes [1000, 1000, 500, 200]
Features = 784, Targets = 10 classes
"""
def __init__(self, features, targets):
super(NeuralNerwork, self).__init__()
self.fc1 = nn.Linear(features, 1000, bias=False)
self.fc2 = nn.Linear(1000, 1000, bias=False)
self.fc3 = nn.Linear(1000, 500, bias=False)
self.fc4 = nn.Linear(500, 200, bias=False)
self.fc5 = nn.Linear(200, targets, bias=False)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = F.relu(self.fc4(x))
return F.relu(self.fc5(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'features': 4, 'targets': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
x1 = xindex % 4000
x2 = xindex // 4000
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr0 + (x1 + 4096 * x2), tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 32000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2000
x1 = xindex // 2000
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(out_ptr0 + (x0 + 2016 * x1), tmp2, xmask)
tl.store(out_ptr1 + (x0 + 2048 * x1), tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_view_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 500
x1 = xindex // 500
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 500 * (x1 % 4) + 2016 * (x1 // 4)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_3(in_out_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 12800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_4(in_out_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (1000, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1000, 1000), (1000, 1))
assert_size_stride(primals_4, (500, 1000), (1000, 1))
assert_size_stride(primals_5, (200, 500), (500, 1))
assert_size_stride(primals_6, (4, 200), (200, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 1000), (1000, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 1000), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 1000), (16000, 4000, 1000,
1), 0)
del buf0
buf15 = empty_strided_cuda((4, 4, 4, 1000), (16384, 4096, 1000, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(64000)](buf1, buf15,
64000, XBLOCK=256, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 1000), (1000, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 1000), (1000, 1), 0
), reinterpret_tensor(primals_3, (1000, 1000), (1, 1000), 0),
out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 1000), (16000, 4000, 1000,
1), 0)
del buf2
buf14 = empty_strided_cuda((4, 4, 4, 1000), (16384, 4096, 1000, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(64000)](buf3, buf14,
64000, XBLOCK=256, num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((64, 500), (500, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 1000), (1000, 1), 0
), reinterpret_tensor(primals_4, (1000, 500), (1, 1000), 0),
out=buf4)
buf5 = empty_strided_cuda((4, 4, 4, 500), (8064, 2016, 500, 1),
torch.float32)
buf13 = empty_strided_cuda((4, 4, 4, 500), (8192, 2048, 500, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(32000)](buf4, buf5,
buf13, 32000, XBLOCK=256, num_warps=4, num_stages=1)
buf6 = buf4
del buf4
triton_poi_fused_relu_view_2[grid(32000)](buf5, buf6, 32000, XBLOCK
=128, num_warps=4, num_stages=1)
del buf5
buf7 = empty_strided_cuda((64, 200), (200, 1), torch.float32)
extern_kernels.mm(buf6, reinterpret_tensor(primals_5, (500, 200), (
1, 500), 0), out=buf7)
buf8 = reinterpret_tensor(buf7, (4, 4, 4, 200), (3200, 800, 200, 1), 0)
del buf7
buf12 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_3[grid(12800)](buf8, buf12,
12800, XBLOCK=256, num_warps=4, num_stages=1)
buf9 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf8, (64, 200), (200, 1), 0),
reinterpret_tensor(primals_6, (200, 4), (1, 200), 0), out=buf9)
buf10 = reinterpret_tensor(buf9, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf9
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_4[grid(256)](buf10, buf11,
256, XBLOCK=128, num_warps=4, num_stages=1)
return (buf10, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(buf1, (64, 1000), (1000, 1), 0),
reinterpret_tensor(buf3, (64, 1000), (1000, 1), 0), buf6,
reinterpret_tensor(buf8, (64, 200), (200, 1), 0), buf11, primals_6,
buf12, primals_5, buf13, primals_4, buf14, primals_3, buf15)
class NeuralNerworkNew(nn.Module):
""" Construct a ReLU-activated NN, set Bias to False
Four hidden layers with sizes [1000, 1000, 500, 200]
Features = 784, Targets = 10 classes
"""
def __init__(self, features, targets):
super(NeuralNerworkNew, self).__init__()
self.fc1 = nn.Linear(features, 1000, bias=False)
self.fc2 = nn.Linear(1000, 1000, bias=False)
self.fc3 = nn.Linear(1000, 500, bias=False)
self.fc4 = nn.Linear(500, 200, bias=False)
self.fc5 = nn.Linear(200, targets, bias=False)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_3 = self.fc2.weight
primals_4 = self.fc3.weight
primals_5 = self.fc4.weight
primals_6 = self.fc5.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
jf20541/Pruning-DeepNeuralNetwork
|
NeuralNerwork
| false
| 10,283
|
[
"MIT"
] | 0
|
a78a88616c19aa0f1449eb562b7dd8d7c4f47252
|
https://github.com/jf20541/Pruning-DeepNeuralNetwork/tree/a78a88616c19aa0f1449eb562b7dd8d7c4f47252
|
SELayer
|
import torch
import torch.nn.functional as F
import torch.nn as nn
class SELayer(nn.Module):
def __init__(self, in_channels, reduction):
super().__init__()
mid_channels = in_channels // reduction
self.fc1 = nn.Linear(in_channels, mid_channels)
self.fc2 = nn.Linear(mid_channels, in_channels)
def forward(self, x):
n_batches, n_channels, _, _ = x.size()
y = F.adaptive_avg_pool2d(x, output_size=1).view(n_batches, n_channels)
y = F.relu(self.fc1(y), inplace=True)
y = F.sigmoid(self.fc2(y)).view(n_batches, n_channels, 1, 1)
return x * y
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'reduction': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tl.store(in_out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4), (4, 1))
assert_size_stride(primals_3, (1,), (1,))
assert_size_stride(primals_4, (4, 1), (1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (4, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 1), (1, 4), 0), out=buf2)
del primals_2
buf3 = buf2
del buf2
triton_poi_fused_relu_1[grid(4)](buf3, primals_3, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_3
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, buf3, reinterpret_tensor(primals_4,
(1, 4), (1, 1), 0), alpha=1, beta=1, out=buf4)
del primals_5
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_2[grid(256)](primals_1, buf4, buf5, 256,
XBLOCK=256, num_warps=4, num_stages=1)
return buf5, primals_1, reinterpret_tensor(buf1, (4, 4), (4, 1), 0
), buf3, buf4, primals_4
class SELayerNew(nn.Module):
def __init__(self, in_channels, reduction):
super().__init__()
mid_channels = in_channels // reduction
self.fc1 = nn.Linear(in_channels, mid_channels)
self.fc2 = nn.Linear(mid_channels, in_channels)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
implus/pytorch_image_classification
|
SELayer
| false
| 10,284
|
[
"MIT"
] | 0
|
cac490ed518ad09b0429fc01af060457fb050e68
|
https://github.com/implus/pytorch_image_classification/tree/cac490ed518ad09b0429fc01af060457fb050e68
|
WeightedMultilabel
|
import torch
import torch.nn as nn
class WeightedMultilabel(nn.Module):
def __init__(self, weights: 'torch.Tensor'):
super(WeightedMultilabel, self).__init__()
self.cerition = nn.BCEWithLogitsLoss(reduction='none')
self.weights = weights
def forward(self, outputs, targets):
loss = self.cerition(outputs, targets)
return (loss * self.weights).mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'weights': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_binary_cross_entropy_with_logits_mean_mul_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp13 = 4.0
tmp14 = tmp12 * tmp13
tmp15 = tl.broadcast_to(tmp14, [RBLOCK])
tmp17 = triton_helpers.promote_to_tensor(tl.sum(tmp15, 0))
tmp18 = 256.0
tmp19 = tmp17 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp19, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_binary_cross_entropy_with_logits_mean_mul_0[grid(1)](
buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class WeightedMultilabelNew(nn.Module):
def __init__(self, weights: 'torch.Tensor'):
super(WeightedMultilabelNew, self).__init__()
self.cerition = nn.BCEWithLogitsLoss(reduction='none')
self.weights = weights
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
jiawenxiao/physionet2020_0823
|
WeightedMultilabel
| false
| 10,285
|
[
"BSD-2-Clause"
] | 0
|
99dd54a3f7b8cef83ff37a46223f4f979edd2e74
|
https://github.com/jiawenxiao/physionet2020_0823/tree/99dd54a3f7b8cef83ff37a46223f4f979edd2e74
|
BertLayerNormNoVar
|
import torch
import torch.nn as nn
class BertLayerNormNoVar(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
super(BertLayerNormNoVar, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
x = x - u
return self.weight * x + self.bias
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mean_mul_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp0 * tmp11
tmp14 = tmp12 + tmp13
tl.store(out_ptr0 + x2, tmp14, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mean_mul_sub_0[grid(256)](primals_2, primals_1,
primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
del primals_3
return buf0, primals_1
class BertLayerNormNoVarNew(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
super(BertLayerNormNoVarNew, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, input_0):
primals_2 = self.weight
primals_3 = self.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
jiachens/auto_LiRPA
|
BertLayerNormNoVar
| false
| 10,286
|
[
"BSD-3-Clause"
] | 0
|
cc1ff18e8fbc938953b20ae6a030a25761cb0b78
|
https://github.com/jiachens/auto_LiRPA/tree/cc1ff18e8fbc938953b20ae6a030a25761cb0b78
|
RobNet
|
import torch
from torch import nn
import torch.nn.functional as F
class RobNet(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 16, kernel_size=3, stride=2, dilation=1)
self.conv2 = nn.Conv2d(16, 32, kernel_size=3, stride=2)
self.conv3 = nn.Conv2d(32, 64, kernel_size=3, stride=1)
self.conv4 = nn.Conv2d(64, 128, kernel_size=3, stride=1)
self.conv5 = nn.Conv2d(128, 128, kernel_size=2, stride=1)
self.rotate = nn.Conv2d(128, 1, kernel_size=2, stride=1)
self.bbox = nn.Conv2d(128, 4, kernel_size=2, stride=1)
self.sig = nn.Sigmoid()
def forward(self, x):
x = F.relu(self.conv1(x), inplace=True)
x = F.relu(self.conv2(x), inplace=True)
x = F.relu(self.conv3(x), inplace=True)
x = F.relu(self.conv4(x), inplace=True)
x = F.relu(self.conv5(x), inplace=True)
rotate = self.rotate(x)
rotate = self.sig(rotate)
bbox = self.bbox(x)
bbox = self.sig(bbox)
out = torch.cat((bbox, rotate), dim=1)
return out
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 61504
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 961 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 28800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 225 % 32
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 43264
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 169 % 64
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 61952
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 121 % 128
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 100 % 128
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 324
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_poi_fused_convolution_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 1296
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 81 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_cat_7(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 1620
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 81 % 5
x0 = xindex % 81
x2 = xindex // 405
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 81 * x1 + 324 * x2), tmp4 & xmask, other=0.0
)
tmp6 = tl.sigmoid(tmp5)
tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype)
tmp8 = tl.where(tmp4, tmp6, tmp7)
tmp9 = tmp0 >= tmp3
tl.full([1], 5, tl.int64)
tmp12 = tl.load(in_ptr1 + (x0 + 81 * x2), tmp9 & xmask, eviction_policy
='evict_last', other=0.0)
tmp13 = tl.sigmoid(tmp12)
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp9, tmp13, tmp14)
tmp16 = tl.where(tmp4, tmp8, tmp15)
tl.store(out_ptr0 + x3, tmp16, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15) = args
args.clear()
assert_size_stride(primals_1, (16, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (32, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_9, (128,), (1,))
assert_size_stride(primals_10, (128, 128, 2, 2), (512, 4, 2, 1))
assert_size_stride(primals_11, (128,), (1,))
assert_size_stride(primals_12, (1, 128, 2, 2), (512, 4, 2, 1))
assert_size_stride(primals_13, (1,), (1,))
assert_size_stride(primals_14, (4, 128, 2, 2), (512, 4, 2, 1))
assert_size_stride(primals_15, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2,
2), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 31, 31), (15376, 961, 31, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(61504)](buf1, primals_2,
61504, XBLOCK=512, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 15, 15), (7200, 225, 15, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(28800)](buf3, primals_5,
28800, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 64, 13, 13), (10816, 169, 13, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(43264)](buf5, primals_7,
43264, XBLOCK=512, num_warps=4, num_stages=1)
del primals_7
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 128, 11, 11), (15488, 121, 11, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_relu_3[grid(61952)](buf7, primals_9,
61952, XBLOCK=512, num_warps=4, num_stages=1)
del primals_9
buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 128, 10, 10), (12800, 100, 10, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_relu_4[grid(51200)](buf9, primals_11,
51200, XBLOCK=512, num_warps=4, num_stages=1)
del primals_11
buf10 = extern_kernels.convolution(buf9, primals_12, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 1, 9, 9), (81, 81, 9, 1))
buf11 = buf10
del buf10
triton_poi_fused_convolution_5[grid(324)](buf11, primals_13, 324,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_13
buf12 = extern_kernels.convolution(buf9, primals_14, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 4, 9, 9), (324, 81, 9, 1))
buf13 = buf12
del buf12
triton_poi_fused_convolution_6[grid(1296)](buf13, primals_15, 1296,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_15
buf14 = empty_strided_cuda((4, 5, 9, 9), (405, 81, 9, 1), torch.float32
)
triton_poi_fused_cat_7[grid(1620)](buf13, buf11, buf14, 1620,
XBLOCK=256, num_warps=4, num_stages=1)
return (buf14, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, primals_12, primals_14, buf1, buf3, buf5, buf7, buf9,
buf11, buf13)
class RobNetNew(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 16, kernel_size=3, stride=2, dilation=1)
self.conv2 = nn.Conv2d(16, 32, kernel_size=3, stride=2)
self.conv3 = nn.Conv2d(32, 64, kernel_size=3, stride=1)
self.conv4 = nn.Conv2d(64, 128, kernel_size=3, stride=1)
self.conv5 = nn.Conv2d(128, 128, kernel_size=2, stride=1)
self.rotate = nn.Conv2d(128, 1, kernel_size=2, stride=1)
self.bbox = nn.Conv2d(128, 4, kernel_size=2, stride=1)
self.sig = nn.Sigmoid()
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.conv5.weight
primals_11 = self.conv5.bias
primals_12 = self.rotate.weight
primals_13 = self.rotate.bias
primals_14 = self.bbox.weight
primals_15 = self.bbox.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15])
return output[0]
|
hongrui16/rotated_detection
|
RobNet
| false
| 10,287
|
[
"MIT"
] | 0
|
0b0a061b0753950c20d1e52c8ae8fc59e1ceb21d
|
https://github.com/hongrui16/rotated_detection/tree/0b0a061b0753950c20d1e52c8ae8fc59e1ceb21d
|
Conv2
|
import math
import torch
import torch.nn as nn
class Conv2(nn.Module):
""" A convolution layer with the stride of 2.
Input:
x: (N, 2L+2, in_channels) numeric tensor
global_cond: (N, global_cond_channels) numeric tensor
Output:
y: (N, L, out_channels) numeric tensor
"""
def __init__(self, in_channels, out_channels, global_cond_channels):
super().__init__()
ksz = 4
self.out_channels = out_channels
if 0 < global_cond_channels:
self.w_cond = nn.Linear(global_cond_channels, 2 * out_channels,
bias=False)
self.conv_wide = nn.Conv1d(in_channels, 2 * out_channels, ksz, stride=2
)
wsize = 2.967 / math.sqrt(ksz * in_channels)
self.conv_wide.weight.data.uniform_(-wsize, wsize)
self.conv_wide.bias.data.zero_()
def forward(self, x, global_cond):
x1 = self.conv_wide(x.transpose(1, 2)).transpose(1, 2)
if global_cond is not None:
x2 = self.w_cond(global_cond).unsqueeze(1).expand(-1, x1.size(1
), -1)
else:
x2 = torch.zeros_like(x1)
a, b = (x1 + x2).split(self.out_channels, dim=2)
return torch.sigmoid(a) * torch.tanh(b)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4,
'global_cond_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_mul_sigmoid_tanh_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 8 * x1), xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + 8 * x1), xmask)
tmp6 = tl.load(in_ptr0 + (4 + x0 + 8 * x1), xmask)
tmp7 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (4 + x0 + 8 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.sigmoid(tmp4)
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tmp11 = libdevice.tanh(tmp10)
tmp12 = tmp5 * tmp11
tl.store(out_ptr0 + x2, tmp5, xmask)
tl.store(out_ptr1 + x2, tmp11, xmask)
tl.store(out_ptr2 + x2, tmp12, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (8, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (8,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (8, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(2,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 8, 1), (8, 1, 1))
del buf0
buf2 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
extern_kernels.mm(primals_4, reinterpret_tensor(primals_5, (4, 8),
(1, 4), 0), out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
triton_poi_fused_mul_sigmoid_tanh_1[grid(16)](buf1, primals_3, buf2,
buf3, buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf1
del buf2
del primals_3
return buf5, primals_2, primals_4, reinterpret_tensor(primals_1, (4, 4,
4), (16, 1, 4), 0), buf3, buf4
class Conv2New(nn.Module):
""" A convolution layer with the stride of 2.
Input:
x: (N, 2L+2, in_channels) numeric tensor
global_cond: (N, global_cond_channels) numeric tensor
Output:
y: (N, L, out_channels) numeric tensor
"""
def __init__(self, in_channels, out_channels, global_cond_channels):
super().__init__()
ksz = 4
self.out_channels = out_channels
if 0 < global_cond_channels:
self.w_cond = nn.Linear(global_cond_channels, 2 * out_channels,
bias=False)
self.conv_wide = nn.Conv1d(in_channels, 2 * out_channels, ksz, stride=2
)
wsize = 2.967 / math.sqrt(ksz * in_channels)
self.conv_wide.weight.data.uniform_(-wsize, wsize)
self.conv_wide.bias.data.zero_()
def forward(self, input_0, input_1):
primals_5 = self.w_cond.weight
primals_2 = self.conv_wide.weight
primals_3 = self.conv_wide.bias
primals_1 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
jonojace/WaveRNN
|
Conv2
| false
| 10,288
|
[
"MIT"
] | 0
|
5ac72d5ed10262132f016f8e523bc663faa991da
|
https://github.com/jonojace/WaveRNN/tree/5ac72d5ed10262132f016f8e523bc663faa991da
|
CatKLLoss
|
import torch
from torch.nn.modules.loss import _Loss
class CatKLLoss(_Loss):
def __init__(self, reduction='none'):
super(CatKLLoss, self).__init__()
assert reduction in ['none', 'sum', 'mean']
self.reduction = reduction
def forward(self, log_qy, log_py):
"""
KL(qy|py) = Eq[qy * log(q(y) / p(y))]
log_qy: (batch_size, latent_size)
log_py: (batch_size, latent_size)
"""
qy = torch.exp(log_qy)
kl = torch.sum(qy * (log_qy - log_py), dim=1)
if self.reduction == 'mean':
kl = kl.mean()
elif self.reduction == 'sum':
kl = kl.sum()
return kl
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch.nn.modules.loss import _Loss
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_exp_mul_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp2 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp7 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask)
tmp11 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp13 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask)
tmp17 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp19 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask)
tmp1 = tl_math.exp(tmp0)
tmp3 = tmp0 - tmp2
tmp4 = tmp1 * tmp3
tmp6 = tl_math.exp(tmp5)
tmp8 = tmp5 - tmp7
tmp9 = tmp6 * tmp8
tmp10 = tmp4 + tmp9
tmp12 = tl_math.exp(tmp11)
tmp14 = tmp11 - tmp13
tmp15 = tmp12 * tmp14
tmp16 = tmp10 + tmp15
tmp18 = tl_math.exp(tmp17)
tmp20 = tmp17 - tmp19
tmp21 = tmp18 * tmp20
tmp22 = tmp16 + tmp21
tl.store(out_ptr0 + x2, tmp22, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_exp_mul_sub_sum_0[grid(64)](arg0_1, arg1_1, buf0,
64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class CatKLLossNew(_Loss):
def __init__(self, reduction='none'):
super(CatKLLossNew, self).__init__()
assert reduction in ['none', 'sum', 'mean']
self.reduction = reduction
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
imguozhen/proactive-chat
|
CatKLLoss
| false
| 10,289
|
[
"Apache-2.0"
] | 0
|
80d13e28cb93c26a65ace0a028c53fd0bafcdbf9
|
https://github.com/imguozhen/proactive-chat/tree/80d13e28cb93c26a65ace0a028c53fd0bafcdbf9
|
PCN1
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class PCN1(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=2, dilation=1)
self.conv2 = nn.Conv2d(16, 32, kernel_size=3, stride=2)
self.conv3 = nn.Conv2d(32, 64, kernel_size=3, stride=2)
self.conv4 = nn.Conv2d(64, 128, kernel_size=2, stride=1)
self.rotate = nn.Conv2d(128, 2, kernel_size=1, stride=1)
self.cls_prob = nn.Conv2d(128, 2, kernel_size=1, stride=1)
self.bbox = nn.Conv2d(128, 3, kernel_size=1, stride=1)
def forward(self, x):
x = F.relu(self.conv1(x), inplace=True)
x = F.relu(self.conv2(x), inplace=True)
x = F.relu(self.conv3(x), inplace=True)
x = F.relu(self.conv4(x), inplace=True)
cls_prob = F.softmax(self.cls_prob(x), dim=1)
rotate = F.softmax(self.rotate(x), dim=1)
bbox = self.bbox(x)
return cls_prob, rotate, bbox
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 48
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 512
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 16
y1 = yindex // 16
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 16 * x2 + 144 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 32 * x2 + 288 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 256 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 61504
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 28800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 12544
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused__softmax_convolution_9(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
x1 = xindex // 2
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + 2 * x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr0 + (1 + 2 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + 1)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp6 = tmp3 + tmp5
tmp10 = tmp7 + tmp9
tmp11 = triton_helpers.maximum(tmp6, tmp10)
tmp12 = tmp2 - tmp11
tmp13 = tl_math.exp(tmp12)
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused__softmax_10(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 8
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 2
y1 = yindex // 2
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 2 * x2 + 72 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (2 * x2 + 72 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 2 * x2 + 72 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 / tmp3
tl.store(out_ptr0 + (x2 + 36 * y3), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_11(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 12
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 3
y1 = yindex // 3
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 3 * x2 + 108 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 36 * y3), tmp2, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15) = args
args.clear()
assert_size_stride(primals_1, (16, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (32, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (128, 64, 2, 2), (256, 4, 2, 1))
assert_size_stride(primals_9, (128,), (1,))
assert_size_stride(primals_10, (2, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_11, (2,), (1,))
assert_size_stride(primals_12, (2, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_13, (2,), (1,))
assert_size_stride(primals_14, (3, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_15, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 3, 3, 3), (27, 1, 9, 3), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(48, 9)](primals_1, buf0, 48, 9, XBLOCK=16,
YBLOCK=64, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch
.float32)
triton_poi_fused_1[grid(12, 4096)](primals_3, buf1, 12, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((32, 16, 3, 3), (144, 1, 48, 16), torch.
float32)
triton_poi_fused_2[grid(512, 9)](primals_4, buf2, 512, 9, XBLOCK=16,
YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((64, 32, 3, 3), (288, 1, 96, 32), torch.
float32)
triton_poi_fused_3[grid(2048, 9)](primals_6, buf3, 2048, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf4 = empty_strided_cuda((128, 64, 2, 2), (256, 1, 128, 64), torch
.float32)
triton_poi_fused_4[grid(8192, 4)](primals_8, buf4, 8192, 4, XBLOCK=
4, YBLOCK=256, num_warps=4, num_stages=1)
del primals_8
buf5 = extern_kernels.convolution(buf1, buf0, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 16, 31, 31), (15376, 1, 496, 16))
buf6 = buf5
del buf5
triton_poi_fused_convolution_relu_5[grid(61504)](buf6, primals_2,
61504, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf7 = extern_kernels.convolution(buf6, buf2, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 32, 15, 15), (7200, 1, 480, 32))
buf8 = buf7
del buf7
triton_poi_fused_convolution_relu_6[grid(28800)](buf8, primals_5,
28800, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf9 = extern_kernels.convolution(buf8, buf3, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 64, 7, 7), (3136, 1, 448, 64))
buf10 = buf9
del buf9
triton_poi_fused_convolution_relu_7[grid(12544)](buf10, primals_7,
12544, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf11 = extern_kernels.convolution(buf10, buf4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 128, 6, 6), (4608, 1, 768, 128))
buf12 = buf11
del buf11
triton_poi_fused_convolution_relu_8[grid(18432)](buf12, primals_9,
18432, XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf13 = extern_kernels.convolution(buf12, primals_10, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 2, 6, 6), (72, 1, 12, 2))
buf14 = empty_strided_cuda((4, 2, 6, 6), (72, 1, 12, 2), torch.float32)
triton_poi_fused__softmax_convolution_9[grid(288)](buf13,
primals_11, buf14, 288, XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
buf15 = reinterpret_tensor(buf13, (4, 2, 6, 6), (72, 36, 6, 1), 0)
del buf13
triton_poi_fused__softmax_10[grid(8, 36)](buf14, buf15, 8, 36,
XBLOCK=32, YBLOCK=8, num_warps=4, num_stages=1)
buf16 = extern_kernels.convolution(buf12, primals_12, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 2, 6, 6), (72, 1, 12, 2))
buf17 = buf14
del buf14
triton_poi_fused__softmax_convolution_9[grid(288)](buf16,
primals_13, buf17, 288, XBLOCK=256, num_warps=4, num_stages=1)
del primals_13
buf18 = reinterpret_tensor(buf16, (4, 2, 6, 6), (72, 36, 6, 1), 0)
del buf16
triton_poi_fused__softmax_10[grid(8, 36)](buf17, buf18, 8, 36,
XBLOCK=32, YBLOCK=8, num_warps=4, num_stages=1)
del buf17
buf19 = extern_kernels.convolution(buf12, primals_14, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf19, (4, 3, 6, 6), (108, 1, 18, 3))
buf20 = empty_strided_cuda((4, 3, 6, 6), (108, 36, 6, 1), torch.float32
)
triton_poi_fused_convolution_11[grid(12, 36)](buf19, primals_15,
buf20, 12, 36, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del buf19
del primals_15
return (buf15, buf18, buf20, buf0, buf1, buf2, buf3, buf4, primals_10,
primals_12, primals_14, buf6, buf8, buf10, buf12, buf15, buf18)
class PCN1New(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=2, dilation=1)
self.conv2 = nn.Conv2d(16, 32, kernel_size=3, stride=2)
self.conv3 = nn.Conv2d(32, 64, kernel_size=3, stride=2)
self.conv4 = nn.Conv2d(64, 128, kernel_size=2, stride=1)
self.rotate = nn.Conv2d(128, 2, kernel_size=1, stride=1)
self.cls_prob = nn.Conv2d(128, 2, kernel_size=1, stride=1)
self.bbox = nn.Conv2d(128, 3, kernel_size=1, stride=1)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.rotate.weight
primals_11 = self.rotate.bias
primals_12 = self.cls_prob.weight
primals_13 = self.cls_prob.bias
primals_14 = self.bbox.weight
primals_15 = self.bbox.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15])
return output[0], output[1], output[2]
|
jisheng047/blinsert
|
PCN1
| false
| 10,290
|
[
"BSD-2-Clause"
] | 0
|
923d2ea2af3f2f257c817fa8de02c7db8ec9bcc9
|
https://github.com/jisheng047/blinsert/tree/923d2ea2af3f2f257c817fa8de02c7db8ec9bcc9
|
MaskBCELoss
|
import torch
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
class MaskBCELoss(_Loss):
def __init__(self, reduction='mean'):
super(MaskBCELoss, self).__init__()
assert reduction in ['none', 'sum', 'mean']
self.reduction = reduction
def forward(self, input, target, mask=None):
"""
input: (batch_size, max_len)
target: (batch_size, max_len)
mask: (batch_size, max_len)
"""
bce = F.binary_cross_entropy(input=input, target=target, reduction=
'none')
if mask is not None:
bce *= mask.float()
bce = bce.sum(dim=1)
if self.reduction == 'mean':
bce = bce.mean()
elif self.reduction == 'sum':
bce = bce.sum()
return bce
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch.nn.modules.loss import _Loss
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_binary_cross_entropy_mean_sum_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp3 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp13 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp15 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp25 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp27 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp37 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp39 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp1 = 1.0
tmp2 = tmp0 - tmp1
tmp4 = -tmp3
tmp5 = libdevice.log1p(tmp4)
tmp6 = -100.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp2 * tmp7
tmp9 = tl_math.log(tmp3)
tmp10 = triton_helpers.maximum(tmp9, tmp6)
tmp11 = tmp0 * tmp10
tmp12 = tmp8 - tmp11
tmp14 = tmp13 - tmp1
tmp16 = -tmp15
tmp17 = libdevice.log1p(tmp16)
tmp18 = triton_helpers.maximum(tmp17, tmp6)
tmp19 = tmp14 * tmp18
tmp20 = tl_math.log(tmp15)
tmp21 = triton_helpers.maximum(tmp20, tmp6)
tmp22 = tmp13 * tmp21
tmp23 = tmp19 - tmp22
tmp24 = tmp12 + tmp23
tmp26 = tmp25 - tmp1
tmp28 = -tmp27
tmp29 = libdevice.log1p(tmp28)
tmp30 = triton_helpers.maximum(tmp29, tmp6)
tmp31 = tmp26 * tmp30
tmp32 = tl_math.log(tmp27)
tmp33 = triton_helpers.maximum(tmp32, tmp6)
tmp34 = tmp25 * tmp33
tmp35 = tmp31 - tmp34
tmp36 = tmp24 + tmp35
tmp38 = tmp37 - tmp1
tmp40 = -tmp39
tmp41 = libdevice.log1p(tmp40)
tmp42 = triton_helpers.maximum(tmp41, tmp6)
tmp43 = tmp38 * tmp42
tmp44 = tl_math.log(tmp39)
tmp45 = triton_helpers.maximum(tmp44, tmp6)
tmp46 = tmp37 * tmp45
tmp47 = tmp43 - tmp46
tmp48 = tmp36 + tmp47
tmp49 = tl.broadcast_to(tmp48, [XBLOCK, RBLOCK])
tmp51 = tl.sum(tmp49, 1)[:, None]
tmp52 = 64.0
tmp53 = tmp51 / tmp52
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp53, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_binary_cross_entropy_mean_sum_0[grid(1)](buf2,
arg0_1, arg1_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf2,
class MaskBCELossNew(_Loss):
def __init__(self, reduction='mean'):
super(MaskBCELossNew, self).__init__()
assert reduction in ['none', 'sum', 'mean']
self.reduction = reduction
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
imguozhen/proactive-chat
|
MaskBCELoss
| false
| 10,291
|
[
"Apache-2.0"
] | 0
|
80d13e28cb93c26a65ace0a028c53fd0bafcdbf9
|
https://github.com/imguozhen/proactive-chat/tree/80d13e28cb93c26a65ace0a028c53fd0bafcdbf9
|
NormalKLLoss
|
import torch
from torch import distributions
from torch.nn.modules.loss import _Loss
class NormalKLLoss(_Loss):
def __init__(self, reduction='mean'):
super(NormalKLLoss, self).__init__()
assert reduction in ['none', 'sum', 'mean']
self.reduction = reduction
def forward(self, q_mu, q_logvar, p_mu=None, p_logvar=None):
"""
q_mu: (batch_size, latent_size)
q_logvar: (batch_size, latent_size)
"""
if p_mu is None:
p_mu = torch.zeros_like(q_mu)
if p_logvar is None:
p_logvar = torch.zeros_like(q_logvar)
q_norm = distributions.Normal(q_mu, q_logvar.exp().sqrt())
p_norm = distributions.Normal(p_mu, p_logvar.exp().sqrt())
kl = distributions.kl_divergence(q_norm, p_norm).sum(dim=1)
if self.reduction == 'mean':
kl = kl.mean()
elif self.reduction == 'sum':
kl = kl.sum()
return kl
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch.nn.modules.loss import _Loss
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_exp_log_mean_mul_pow_sub_sum_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp4 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp13 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp17 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp25 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp29 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp37 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp41 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp1 = tl_math.exp(tmp0)
tmp2 = libdevice.sqrt(tmp1)
tmp3 = tmp2 * tmp2
tmp5 = tmp4 * tmp4
tmp6 = tmp3 + tmp5
tmp7 = 1.0
tmp8 = tmp6 - tmp7
tmp9 = tl_math.log(tmp3)
tmp10 = tmp8 - tmp9
tmp11 = 0.5
tmp12 = tmp10 * tmp11
tmp14 = tl_math.exp(tmp13)
tmp15 = libdevice.sqrt(tmp14)
tmp16 = tmp15 * tmp15
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 - tmp7
tmp21 = tl_math.log(tmp16)
tmp22 = tmp20 - tmp21
tmp23 = tmp22 * tmp11
tmp24 = tmp12 + tmp23
tmp26 = tl_math.exp(tmp25)
tmp27 = libdevice.sqrt(tmp26)
tmp28 = tmp27 * tmp27
tmp30 = tmp29 * tmp29
tmp31 = tmp28 + tmp30
tmp32 = tmp31 - tmp7
tmp33 = tl_math.log(tmp28)
tmp34 = tmp32 - tmp33
tmp35 = tmp34 * tmp11
tmp36 = tmp24 + tmp35
tmp38 = tl_math.exp(tmp37)
tmp39 = libdevice.sqrt(tmp38)
tmp40 = tmp39 * tmp39
tmp42 = tmp41 * tmp41
tmp43 = tmp40 + tmp42
tmp44 = tmp43 - tmp7
tmp45 = tl_math.log(tmp40)
tmp46 = tmp44 - tmp45
tmp47 = tmp46 * tmp11
tmp48 = tmp36 + tmp47
tmp49 = tl.broadcast_to(tmp48, [XBLOCK, RBLOCK])
tmp51 = tl.sum(tmp49, 1)[:, None]
tmp52 = 64.0
tmp53 = tmp51 / tmp52
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp53, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_add_div_exp_log_mean_mul_pow_sub_sum_0[grid(1)](buf2,
arg1_1, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf2,
class NormalKLLossNew(_Loss):
def __init__(self, reduction='mean'):
super(NormalKLLossNew, self).__init__()
assert reduction in ['none', 'sum', 'mean']
self.reduction = reduction
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
imguozhen/proactive-chat
|
NormalKLLoss
| false
| 10,292
|
[
"Apache-2.0"
] | 0
|
80d13e28cb93c26a65ace0a028c53fd0bafcdbf9
|
https://github.com/imguozhen/proactive-chat/tree/80d13e28cb93c26a65ace0a028c53fd0bafcdbf9
|
Gather
|
import torch
from torch import nn
import torch.onnx
class Gather(nn.Module):
def __init__(self, dim=0):
self.dim = dim
self.selection = [slice(None) for _ in range(dim)]
super().__init__()
def forward(self, input: 'torch.Tensor', indices: 'torch.Tensor'):
selection = self.selection + [indices]
return input.__getitem__(selection)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.ones([4], dtype=torch.int64)]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_index_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 64
x0 = xindex % 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 4) | ~xmask,
'index out of bounds: 0 <= tmp4 < 4')
tmp6 = tl.load(in_ptr1 + (x0 + 64 * tmp4), xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_index_0[grid(256)](arg1_1, arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class GatherNew(nn.Module):
def __init__(self, dim=0):
self.dim = dim
self.selection = [slice(None) for _ in range(dim)]
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
jiuntian/onnx2pytorch
|
Gather
| false
| 10,293
|
[
"Apache-2.0"
] | 0
|
fadca10a6045f4373293c9c0854607fb51a47c12
|
https://github.com/jiuntian/onnx2pytorch/tree/fadca10a6045f4373293c9c0854607fb51a47c12
|
GlobalAveragePool
|
import torch
from torch import nn
import torch.onnx
class GlobalAveragePool(nn.Module):
def forward(self, input: 'torch.Tensor'):
spatial_shape = input.ndimension() - 2
dim = tuple(range(spatial_shape, spatial_shape + 2))
return torch.mean(input, dim=dim, keepdim=True)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, arg0_1, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del arg0_1
return buf1,
class GlobalAveragePoolNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
jiuntian/onnx2pytorch
|
GlobalAveragePool
| false
| 10,294
|
[
"Apache-2.0"
] | 0
|
fadca10a6045f4373293c9c0854607fb51a47c12
|
https://github.com/jiuntian/onnx2pytorch/tree/fadca10a6045f4373293c9c0854607fb51a47c12
|
Scale
|
import torch
from torch import nn
from torch.nn import *
class Scale(nn.Module):
def __init__(self, scale):
super().__init__()
self.scale = scale
def forward(self, x):
return x * self.scale
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'scale': 1.0}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
from torch.nn import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class ScaleNew(nn.Module):
def __init__(self, scale):
super().__init__()
self.scale = scale
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
jlubars/autonomous-learning-library
|
Scale
| false
| 10,295
|
[
"MIT"
] | 0
|
5d2d2e1ee9e0876614d7113e26f026f126a3899f
|
https://github.com/jlubars/autonomous-learning-library/tree/5d2d2e1ee9e0876614d7113e26f026f126a3899f
|
FullSort
|
import torch
import torch.nn as nn
class FullSort(nn.Module):
def forward(self, x):
return torch.sort(x, 1)[0]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_sort_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 16
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 64 * x1), xmask, other=0.0)
tmp1 = r2
tmp2 = tmp1.to(tl.int16)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5, _tmp6 = triton_helpers.sort_with_index(tmp3, tmp4, None, 1,
stable=False, descending=False)
tl.store(out_ptr0 + (x0 + 16 * r2 + 64 * x1), tmp5, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_sort_0[grid(64)](arg0_1, buf0, 64, 4, XBLOCK=1,
num_warps=2, num_stages=1)
del arg0_1
return buf0,
class FullSortNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
hologerry/residual-flows
|
FullSort
| false
| 10,296
|
[
"MIT"
] | 0
|
33a3639150490279c2e13238dd6244b80c52adf7
|
https://github.com/hologerry/residual-flows/tree/33a3639150490279c2e13238dd6244b80c52adf7
|
Accuracy
|
import torch
import torch.nn.functional as F
import torch.nn as nn
class Accuracy(nn.Module):
def __init__(self):
super().__init__()
def forward(self, prediction, target, mask=None, token_dim=-1,
sequence_dim=-2):
prediction = F.softmax(prediction, token_dim).argmax(sequence_dim)
scores = prediction == target
n_padded = 0
if mask is not None:
n_padded = (mask == 0).sum()
return scores.sum() / float(scores.numel() - n_padded)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_argmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr0 + 16 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp4 = tl.load(in_ptr0 + (2 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp6 = tl.load(in_ptr0 + (3 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp9 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp10 = tl.load(in_ptr0 + (4 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (5 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr0 + (6 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (7 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp33 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp34 = tl.load(in_ptr0 + (8 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp35 = tl.load(in_ptr0 + (9 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp37 = tl.load(in_ptr0 + (10 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp39 = tl.load(in_ptr0 + (11 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp56 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp57 = tl.load(in_ptr0 + (12 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp58 = tl.load(in_ptr0 + (13 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp60 = tl.load(in_ptr0 + (14 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp62 = tl.load(in_ptr0 + (15 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp12 = tmp10 + tmp11
tmp14 = tmp12 + tmp13
tmp16 = tmp14 + tmp15
tmp17 = tmp9 / tmp16
tmp18 = tmp8 > tmp17
tmp19 = tmp8 == tmp17
tmp20 = tmp8 != tmp8
tmp21 = tmp17 != tmp17
tmp22 = tmp20 > tmp21
tmp23 = tmp18 | tmp22
tmp24 = tmp20 & tmp21
tmp25 = tmp19 | tmp24
tmp26 = tl.full([1], 0, tl.int64)
tmp27 = tl.full([1], 1, tl.int64)
tmp28 = tmp26 < tmp27
tmp29 = tmp25 & tmp28
tmp30 = tmp23 | tmp29
tmp31 = tl.where(tmp30, tmp8, tmp17)
tmp32 = tl.where(tmp30, tmp26, tmp27)
tmp36 = tmp34 + tmp35
tmp38 = tmp36 + tmp37
tmp40 = tmp38 + tmp39
tmp41 = tmp33 / tmp40
tmp42 = tmp31 > tmp41
tmp43 = tmp31 == tmp41
tmp44 = tmp31 != tmp31
tmp45 = tmp41 != tmp41
tmp46 = tmp44 > tmp45
tmp47 = tmp42 | tmp46
tmp48 = tmp44 & tmp45
tmp49 = tmp43 | tmp48
tmp50 = tl.full([1], 2, tl.int64)
tmp51 = tmp32 < tmp50
tmp52 = tmp49 & tmp51
tmp53 = tmp47 | tmp52
tmp54 = tl.where(tmp53, tmp31, tmp41)
tmp55 = tl.where(tmp53, tmp32, tmp50)
tmp59 = tmp57 + tmp58
tmp61 = tmp59 + tmp60
tmp63 = tmp61 + tmp62
tmp64 = tmp56 / tmp63
tmp65 = tmp54 > tmp64
tmp66 = tmp54 == tmp64
tmp67 = tmp54 != tmp54
tmp68 = tmp64 != tmp64
tmp69 = tmp67 > tmp68
tmp70 = tmp65 | tmp69
tmp71 = tmp67 & tmp68
tmp72 = tmp66 | tmp71
tmp73 = tl.full([1], 3, tl.int64)
tmp74 = tmp55 < tmp73
tmp75 = tmp72 & tmp74
tmp76 = tmp70 | tmp75
tl.where(tmp76, tmp54, tmp64)
tmp78 = tl.where(tmp76, tmp55, tmp73)
tl.store(out_ptr0 + x2, tmp78, xmask)
@triton.jit
def triton_per_fused_div_eq_sum_2(in_ptr0, in_ptr1, out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex % 64
r2 = rindex
tmp0 = tl.load(in_ptr0 + r0, None, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + r2, None)
tmp1 = tmp0.to(tl.float32)
tmp3 = tmp1 == tmp2
tmp4 = tmp3.to(tl.int64)
tmp5 = tl.broadcast_to(tmp4, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = tmp7.to(tl.float32)
tmp9 = 0.00390625
tmp10 = tmp8 * tmp9
tl.store(out_ptr1 + tl.full([1], 0, tl.int32), tmp10, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64)
triton_poi_fused__softmax_argmax_1[grid(64)](buf0, buf1, 64, XBLOCK
=64, num_warps=1, num_stages=1)
del buf0
buf3 = empty_strided_cuda((), (), torch.float32)
triton_per_fused_div_eq_sum_2[grid(1)](buf1, arg1_1, buf3, 1, 256,
num_warps=2, num_stages=1)
del arg1_1
del buf1
return buf3,
class AccuracyNew(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
karadeli98/BBM406-Project
|
Accuracy
| false
| 10,297
|
[
"MIT"
] | 0
|
6de0fa2cbebb93dec272dc7c54a25024880ed1e7
|
https://github.com/karadeli98/BBM406-Project/tree/6de0fa2cbebb93dec272dc7c54a25024880ed1e7
|
LipschitzCube
|
import torch
import torch.nn as nn
class LipschitzCube(nn.Module):
def forward(self, x):
return (x >= 1) * (x - 2 / 3) + (x <= -1) * (x + 2 / 3) + (x > -1) * (x
< 1) * x ** 3 / 3
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_ge_gt_le_lt_mul_pow_sub_0(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 >= tmp1
tmp3 = tmp2.to(tl.float32)
tmp4 = 0.6666666666666666
tmp5 = tmp0 - tmp4
tmp6 = tmp3 * tmp5
tmp7 = -1.0
tmp8 = tmp0 <= tmp7
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp0 + tmp4
tmp11 = tmp9 * tmp10
tmp12 = tmp6 + tmp11
tmp13 = tmp0 > tmp7
tmp14 = tmp0 < tmp1
tmp15 = tmp13 & tmp14
tmp16 = tmp15.to(tl.float32)
tmp17 = tmp0 * tmp0
tmp18 = tmp17 * tmp0
tmp19 = tmp16 * tmp18
tmp20 = 0.3333333333333333
tmp21 = tmp19 * tmp20
tmp22 = tmp12 + tmp21
tl.store(out_ptr0 + x0, tmp22, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_ge_gt_le_lt_mul_pow_sub_0[grid(256)](arg0_1,
buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class LipschitzCubeNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
hologerry/residual-flows
|
LipschitzCube
| false
| 10,298
|
[
"MIT"
] | 0
|
33a3639150490279c2e13238dd6244b80c52adf7
|
https://github.com/hologerry/residual-flows/tree/33a3639150490279c2e13238dd6244b80c52adf7
|
MSDConvBlock
|
import torch
import torch.nn as nn
class MSDConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, dilation, std):
super(MSDConvBlock, self).__init__()
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=(3, 3), padding=(dilation, dilation),
padding_mode='reflect', dilation=dilation, bias=False)
torch.nn.init.normal_(self.conv.weight, 0, std)
def forward(self, x):
y = self.conv(x)
return y
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'dilation': 1, 'std': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0[grid(576)](primals_2, buf0, 576,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
return buf1, primals_1, buf0
class MSDConvBlockNew(nn.Module):
def __init__(self, in_channels, out_channels, dilation, std):
super(MSDConvBlockNew, self).__init__()
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=(3, 3), padding=(dilation, dilation),
padding_mode='reflect', dilation=dilation, bias=False)
torch.nn.init.normal_(self.conv.weight, 0, std)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
jiayangshi/pcf
|
MSDConvBlock
| false
| 10,299
|
[
"MIT"
] | 0
|
1e3c5847bdb4100f60b7251cefb9cfe7a76c3c64
|
https://github.com/jiayangshi/pcf/tree/1e3c5847bdb4100f60b7251cefb9cfe7a76c3c64
|
Sparsify1D
|
import torch
import torch.nn as nn
class SparsifyBase(nn.Module):
def __init__(self, sparse_ratio=0.5):
super(SparsifyBase, self).__init__()
self.sr = sparse_ratio
self.preact = None
self.act = None
def get_activation(self):
def hook(model, input, output):
self.preact = input[0].cpu().detach().clone()
self.act = output.cpu().detach().clone()
return hook
def record_activation(self):
self.register_forward_hook(self.get_activation())
class Sparsify1D(SparsifyBase):
def __init__(self, sparse_ratio=0.5):
super(Sparsify1D, self).__init__()
self.sr = sparse_ratio
def forward(self, x):
k = int(self.sr * x.shape[1])
topval = x.topk(k, dim=1)[0][:, -1]
topval = topval.expand(x.shape[1], x.shape[0]).permute(1, 0)
comp = x >= topval
return comp * x
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_ge_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + (1 + 2 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 >= tmp1
tmp3 = tmp2.to(tl.float32)
tmp4 = tmp3 * tmp0
tl.store(out_ptr0 + x2, tmp4, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = torch.ops.aten.topk.default(arg0_1, 2, 1)
buf1 = buf0[0]
del buf0
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_ge_mul_0[grid(16)](arg0_1, buf1, buf3, 16, XBLOCK=
16, num_warps=1, num_stages=1)
del arg0_1
del buf1
return buf3,
class SparsifyBase(nn.Module):
def __init__(self, sparse_ratio=0.5):
super(SparsifyBase, self).__init__()
self.sr = sparse_ratio
self.preact = None
self.act = None
def get_activation(self):
def hook(model, input, output):
self.preact = input[0].cpu().detach().clone()
self.act = output.cpu().detach().clone()
return hook
def record_activation(self):
self.register_forward_hook(self.get_activation())
class Sparsify1DNew(SparsifyBase):
def __init__(self, sparse_ratio=0.5):
super(Sparsify1DNew, self).__init__()
self.sr = sparse_ratio
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
jmhuer/TCN
|
Sparsify1D
| false
| 10,301
|
[
"MIT"
] | 0
|
8233b2ff5686ef496b113a6984f5100709a503d3
|
https://github.com/jmhuer/TCN/tree/8233b2ff5686ef496b113a6984f5100709a503d3
|
Network
|
import torch
class Network(torch.nn.Module):
def __init__(self, input_dimension, output_dimension):
super(Network, self).__init__()
self.layer_1 = torch.nn.Linear(in_features=input_dimension,
out_features=90)
self.layer_2 = torch.nn.Linear(in_features=90, out_features=125)
self.layer_3 = torch.nn.Linear(in_features=125, out_features=90)
self.output_layer = torch.nn.Linear(in_features=90, out_features=
output_dimension)
def forward(self, input):
layer_1_output = torch.nn.functional.relu(self.layer_1(input))
layer_2_output = torch.nn.functional.relu(self.layer_2(layer_1_output))
layer_3_output = torch.nn.functional.relu(self.layer_3(layer_2_output))
output = self.output_layer(layer_3_output)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dimension': 4, 'output_dimension': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 5760
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 90
x2 = xindex % 1440
x3 = xindex // 1440
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x2 + 1536 * x3), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 8000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 125
x2 = xindex // 2000
x3 = xindex % 2000
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3 + 2016 * x2), tmp4, xmask)
tl.store(out_ptr1 + (x3 + 2048 * x2), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_view_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 8000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 125
x1 = xindex // 125
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 125 * (x1 % 16) + 2016 * (x1 // 16)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (90, 4), (4, 1))
assert_size_stride(primals_2, (90,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (125, 90), (90, 1))
assert_size_stride(primals_5, (125,), (1,))
assert_size_stride(primals_6, (90, 125), (125, 1))
assert_size_stride(primals_7, (90,), (1,))
assert_size_stride(primals_8, (4, 90), (90, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 90), (90, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 90), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 90), (1440, 360, 90, 1), 0)
del buf0
buf10 = empty_strided_cuda((4, 4, 4, 90), (1536, 360, 90, 1), torch
.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(5760)](buf1,
primals_2, buf10, 5760, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 125), (125, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 90), (90, 1), 0),
reinterpret_tensor(primals_4, (90, 125), (1, 90), 0), out=buf2)
buf3 = empty_strided_cuda((4, 4, 4, 125), (2016, 500, 125, 1),
torch.float32)
buf9 = empty_strided_cuda((4, 4, 4, 125), (2048, 500, 125, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(8000)](buf2,
primals_5, buf3, buf9, 8000, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = buf2
del buf2
triton_poi_fused_relu_view_2[grid(8000)](buf3, buf4, 8000, XBLOCK=
128, num_warps=4, num_stages=1)
del buf3
buf5 = empty_strided_cuda((64, 90), (90, 1), torch.float32)
extern_kernels.mm(buf4, reinterpret_tensor(primals_6, (125, 90), (1,
125), 0), out=buf5)
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 90), (1440, 360, 90, 1), 0)
del buf5
buf8 = empty_strided_cuda((4, 4, 4, 90), (1536, 360, 90, 1), torch.bool
)
triton_poi_fused_relu_threshold_backward_0[grid(5760)](buf6,
primals_7, buf8, 5760, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf6, (64, 90),
(90, 1), 0), reinterpret_tensor(primals_8, (90, 4), (1, 90), 0),
alpha=1, beta=1, out=buf7)
del primals_9
return reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 90), (90, 1), 0
), buf4, reinterpret_tensor(buf6, (64, 90), (90, 1), 0
), primals_8, buf8, primals_6, buf9, primals_4, buf10
class NetworkNew(torch.nn.Module):
def __init__(self, input_dimension, output_dimension):
super(NetworkNew, self).__init__()
self.layer_1 = torch.nn.Linear(in_features=input_dimension,
out_features=90)
self.layer_2 = torch.nn.Linear(in_features=90, out_features=125)
self.layer_3 = torch.nn.Linear(in_features=125, out_features=90)
self.output_layer = torch.nn.Linear(in_features=90, out_features=
output_dimension)
def forward(self, input_0):
primals_1 = self.layer_1.weight
primals_2 = self.layer_1.bias
primals_4 = self.layer_2.weight
primals_5 = self.layer_2.bias
primals_6 = self.layer_3.weight
primals_7 = self.layer_3.bias
primals_8 = self.output_layer.weight
primals_9 = self.output_layer.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
joshsia/random-maze-rl
|
Network
| false
| 10,302
|
[
"MIT"
] | 0
|
016b67d23bfba63182cf06ca17bc9a75baca6ee5
|
https://github.com/joshsia/random-maze-rl/tree/016b67d23bfba63182cf06ca17bc9a75baca6ee5
|
Aggregation
|
import torch
from torch import nn
from torch.nn import *
class Aggregation(nn.Module):
"""
Aggregation layer for the Dueling architecture.
https://arxiv.org/abs/1511.06581
This layer computes a Q function by combining
an estimate of V with an estimate of the advantage.
The advantage is normalized by substracting the average
advantage so that we can propertly
"""
def forward(self, value, advantages):
return value + advantages - torch.mean(advantages, dim=1, keepdim=True)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
from torch.nn import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mean_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x3, xmask)
tmp3 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp9 = tmp7 + tmp8
tmp10 = 4.0
tmp11 = tmp9 / tmp10
tmp12 = tmp2 - tmp11
tl.store(out_ptr0 + x3, tmp12, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mean_sub_0[grid(256)](arg0_1, arg1_1, buf0,
256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class AggregationNew(nn.Module):
"""
Aggregation layer for the Dueling architecture.
https://arxiv.org/abs/1511.06581
This layer computes a Q function by combining
an estimate of V with an estimate of the advantage.
The advantage is normalized by substracting the average
advantage so that we can propertly
"""
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
jlubars/autonomous-learning-library
|
Aggregation
| false
| 10,303
|
[
"MIT"
] | 0
|
5d2d2e1ee9e0876614d7113e26f026f126a3899f
|
https://github.com/jlubars/autonomous-learning-library/tree/5d2d2e1ee9e0876614d7113e26f026f126a3899f
|
LipNormLinear
|
import torch
import torch.nn as nn
import torch.nn.functional as F
def _max_except_dim(input, dim):
maxed = input
for axis in range(input.ndimension() - 1, dim, -1):
maxed, _ = maxed.max(axis, keepdim=True)
for axis in range(dim - 1, -1, -1):
maxed, _ = maxed.max(axis, keepdim=True)
return maxed
def _norm_except_dim(w, norm_type, dim):
if norm_type == 1 or norm_type == 2:
return torch.norm_except_dim(w, norm_type, dim)
elif norm_type == float('inf'):
return _max_except_dim(w, dim)
def operator_norm_settings(domain, codomain):
if domain == 1 and codomain == 1:
max_across_input_dims = True
norm_type = 1
elif domain == 1 and codomain == 2:
max_across_input_dims = True
norm_type = 2
elif domain == 1 and codomain == float('inf'):
max_across_input_dims = True
norm_type = float('inf')
elif domain == 2 and codomain == float('inf'):
max_across_input_dims = False
norm_type = 2
elif domain == float('inf') and codomain == float('inf'):
max_across_input_dims = False
norm_type = 1
else:
raise ValueError('Unknown combination of domain "{}" and codomain "{}"'
.format(domain, codomain))
return max_across_input_dims, norm_type
def _logit(p):
p = torch.max(torch.ones(1) * 0.1, torch.min(torch.ones(1) * 0.9, p))
return torch.log(p + 1e-10) + torch.log(1 - p + 1e-10)
class LipNormLinear(nn.Linear):
"""Lipschitz constant defined using operator norms."""
def __init__(self, in_features, out_features, bias=True, coeff=0.97,
domain=float('inf'), codomain=float('inf'), local_constraint=True,
**unused_kwargs):
del unused_kwargs
super(LipNormLinear, self).__init__(in_features, out_features, bias)
self.coeff = coeff
self.domain = domain
self.codomain = codomain
self.local_constraint = local_constraint
max_across_input_dims, self.norm_type = operator_norm_settings(self
.domain, self.codomain)
self.max_across_dim = 1 if max_across_input_dims else 0
with torch.no_grad():
w_scale = _norm_except_dim(self.weight, self.norm_type, dim=
self.max_across_dim)
if not self.local_constraint:
w_scale = w_scale.max()
self.scale = nn.Parameter(_logit(w_scale / self.coeff))
def compute_weight(self):
w_scale = _norm_except_dim(self.weight, self.norm_type, dim=self.
max_across_dim)
if not self.local_constraint:
w_scale = w_scale.max()
return self.weight / w_scale * torch.sigmoid(self.scale) * self.coeff
def forward(self, input):
weight = self.compute_weight()
return F.linear(input, weight, self.bias)
def extra_repr(self):
s = super(LipNormLinear, self).extra_repr()
return s + ', coeff={}, domain={}, codomain={}, local={}'.format(self
.coeff, self.domain, self.codomain, self.local_constraint)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_mul_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl_math.abs(tmp1)
tmp4 = tl_math.abs(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.abs(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.abs(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tmp0 / tmp11
tmp14 = tl.sigmoid(tmp13)
tmp15 = tmp12 * tmp14
tmp16 = 0.97
tmp17 = tmp15 * tmp16
tl.store(out_ptr0 + x2, tmp17, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 1), (1, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_mul_sigmoid_0[grid(16)](primals_1, primals_2,
buf0, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_4, (64,
4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf1)
del buf0
del primals_3
return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_1, primals_2, reinterpret_tensor(primals_4, (64, 4), (4,
1), 0)
def _max_except_dim(input, dim):
maxed = input
for axis in range(input.ndimension() - 1, dim, -1):
maxed, _ = maxed.max(axis, keepdim=True)
for axis in range(dim - 1, -1, -1):
maxed, _ = maxed.max(axis, keepdim=True)
return maxed
def _norm_except_dim(w, norm_type, dim):
if norm_type == 1 or norm_type == 2:
return torch.norm_except_dim(w, norm_type, dim)
elif norm_type == float('inf'):
return _max_except_dim(w, dim)
def operator_norm_settings(domain, codomain):
if domain == 1 and codomain == 1:
max_across_input_dims = True
norm_type = 1
elif domain == 1 and codomain == 2:
max_across_input_dims = True
norm_type = 2
elif domain == 1 and codomain == float('inf'):
max_across_input_dims = True
norm_type = float('inf')
elif domain == 2 and codomain == float('inf'):
max_across_input_dims = False
norm_type = 2
elif domain == float('inf') and codomain == float('inf'):
max_across_input_dims = False
norm_type = 1
else:
raise ValueError('Unknown combination of domain "{}" and codomain "{}"'
.format(domain, codomain))
return max_across_input_dims, norm_type
def _logit(p):
p = torch.max(torch.ones(1) * 0.1, torch.min(torch.ones(1) * 0.9, p))
return torch.log(p + 1e-10) + torch.log(1 - p + 1e-10)
class LipNormLinearNew(nn.Linear):
"""Lipschitz constant defined using operator norms."""
def __init__(self, in_features, out_features, bias=True, coeff=0.97,
domain=float('inf'), codomain=float('inf'), local_constraint=True,
**unused_kwargs):
del unused_kwargs
super(LipNormLinearNew, self).__init__(in_features, out_features, bias)
self.coeff = coeff
self.domain = domain
self.codomain = codomain
self.local_constraint = local_constraint
max_across_input_dims, self.norm_type = operator_norm_settings(self
.domain, self.codomain)
self.max_across_dim = 1 if max_across_input_dims else 0
with torch.no_grad():
w_scale = _norm_except_dim(self.weight, self.norm_type, dim=
self.max_across_dim)
if not self.local_constraint:
w_scale = w_scale.max()
self.scale = nn.Parameter(_logit(w_scale / self.coeff))
def compute_weight(self):
w_scale = _norm_except_dim(self.weight, self.norm_type, dim=self.
max_across_dim)
if not self.local_constraint:
w_scale = w_scale.max()
return self.weight / w_scale * torch.sigmoid(self.scale) * self.coeff
def extra_repr(self):
s = super(LipNormLinearNew, self).extra_repr()
return s + ', coeff={}, domain={}, codomain={}, local={}'.format(self
.coeff, self.domain, self.codomain, self.local_constraint)
def forward(self, input_0):
primals_1 = self.weight
primals_3 = self.bias
primals_2 = self.scale
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
hologerry/residual-flows
|
LipNormLinear
| false
| 10,304
|
[
"MIT"
] | 0
|
33a3639150490279c2e13238dd6244b80c52adf7
|
https://github.com/hologerry/residual-flows/tree/33a3639150490279c2e13238dd6244b80c52adf7
|
LipNormConv2d
|
import torch
import torch.nn as nn
import torch.nn.functional as F
def _max_except_dim(input, dim):
maxed = input
for axis in range(input.ndimension() - 1, dim, -1):
maxed, _ = maxed.max(axis, keepdim=True)
for axis in range(dim - 1, -1, -1):
maxed, _ = maxed.max(axis, keepdim=True)
return maxed
def _norm_except_dim(w, norm_type, dim):
if norm_type == 1 or norm_type == 2:
return torch.norm_except_dim(w, norm_type, dim)
elif norm_type == float('inf'):
return _max_except_dim(w, dim)
def operator_norm_settings(domain, codomain):
if domain == 1 and codomain == 1:
max_across_input_dims = True
norm_type = 1
elif domain == 1 and codomain == 2:
max_across_input_dims = True
norm_type = 2
elif domain == 1 and codomain == float('inf'):
max_across_input_dims = True
norm_type = float('inf')
elif domain == 2 and codomain == float('inf'):
max_across_input_dims = False
norm_type = 2
elif domain == float('inf') and codomain == float('inf'):
max_across_input_dims = False
norm_type = 1
else:
raise ValueError('Unknown combination of domain "{}" and codomain "{}"'
.format(domain, codomain))
return max_across_input_dims, norm_type
def _logit(p):
p = torch.max(torch.ones(1) * 0.1, torch.min(torch.ones(1) * 0.9, p))
return torch.log(p + 1e-10) + torch.log(1 - p + 1e-10)
class LipNormConv2d(nn.Conv2d):
"""Lipschitz constant defined using operator norms."""
def __init__(self, in_channels, out_channels, kernel_size, stride,
padding, bias=True, coeff=0.97, domain=float('inf'), codomain=float
('inf'), local_constraint=True, **unused_kwargs):
del unused_kwargs
super(LipNormConv2d, self).__init__(in_channels, out_channels,
kernel_size, stride, padding, bias)
self.coeff = coeff
self.domain = domain
self.codomain = codomain
self.local_constraint = local_constraint
max_across_input_dims, self.norm_type = operator_norm_settings(self
.domain, self.codomain)
self.max_across_dim = 1 if max_across_input_dims else 0
with torch.no_grad():
w_scale = _norm_except_dim(self.weight, self.norm_type, dim=
self.max_across_dim)
if not self.local_constraint:
w_scale = w_scale.max()
self.scale = nn.Parameter(_logit(w_scale / self.coeff))
def compute_weight(self):
w_scale = _norm_except_dim(self.weight, self.norm_type, dim=self.
max_across_dim)
if not self.local_constraint:
w_scale = w_scale.max()
return self.weight / w_scale * torch.sigmoid(self.scale)
def forward(self, input):
weight = self.compute_weight()
return F.conv2d(input, weight, self.bias, self.stride, self.padding,
1, 1)
def extra_repr(self):
s = super(LipNormConv2d, self).extra_repr()
return s + ', coeff={}, domain={}, codomain={}, local={}'.format(self
.coeff, self.domain, self.codomain, self.local_constraint)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4,
'stride': 1, 'padding': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_div_mul_norm_sigmoid_0(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl_math.abs(tmp0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = tmp0 / tmp5
tmp8 = tl.sigmoid(tmp7)
tmp9 = tmp6 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp5, xmask)
tl.store(out_ptr0 + (r1 + 64 * x0), tmp9, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 1296
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 81 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_div_mul_norm_sigmoid_0[grid(4)](buf1, primals_1,
primals_2, buf2, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
buf3 = extern_kernels.convolution(primals_4, buf2, stride=(1, 1),
padding=(4, 4), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 9, 9), (324, 81, 9, 1))
buf4 = buf3
del buf3
triton_poi_fused_convolution_1[grid(1296)](buf4, primals_3, 1296,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
return buf4, primals_1, primals_2, primals_4, reinterpret_tensor(buf1,
(4, 1, 1, 1), (1, 1, 1, 1), 0), buf2
def _max_except_dim(input, dim):
maxed = input
for axis in range(input.ndimension() - 1, dim, -1):
maxed, _ = maxed.max(axis, keepdim=True)
for axis in range(dim - 1, -1, -1):
maxed, _ = maxed.max(axis, keepdim=True)
return maxed
def _norm_except_dim(w, norm_type, dim):
if norm_type == 1 or norm_type == 2:
return torch.norm_except_dim(w, norm_type, dim)
elif norm_type == float('inf'):
return _max_except_dim(w, dim)
def operator_norm_settings(domain, codomain):
if domain == 1 and codomain == 1:
max_across_input_dims = True
norm_type = 1
elif domain == 1 and codomain == 2:
max_across_input_dims = True
norm_type = 2
elif domain == 1 and codomain == float('inf'):
max_across_input_dims = True
norm_type = float('inf')
elif domain == 2 and codomain == float('inf'):
max_across_input_dims = False
norm_type = 2
elif domain == float('inf') and codomain == float('inf'):
max_across_input_dims = False
norm_type = 1
else:
raise ValueError('Unknown combination of domain "{}" and codomain "{}"'
.format(domain, codomain))
return max_across_input_dims, norm_type
def _logit(p):
p = torch.max(torch.ones(1) * 0.1, torch.min(torch.ones(1) * 0.9, p))
return torch.log(p + 1e-10) + torch.log(1 - p + 1e-10)
class LipNormConv2dNew(nn.Conv2d):
"""Lipschitz constant defined using operator norms."""
def __init__(self, in_channels, out_channels, kernel_size, stride,
padding, bias=True, coeff=0.97, domain=float('inf'), codomain=float
('inf'), local_constraint=True, **unused_kwargs):
del unused_kwargs
super(LipNormConv2dNew, self).__init__(in_channels, out_channels,
kernel_size, stride, padding, bias)
self.coeff = coeff
self.domain = domain
self.codomain = codomain
self.local_constraint = local_constraint
max_across_input_dims, self.norm_type = operator_norm_settings(self
.domain, self.codomain)
self.max_across_dim = 1 if max_across_input_dims else 0
with torch.no_grad():
w_scale = _norm_except_dim(self.weight, self.norm_type, dim=
self.max_across_dim)
if not self.local_constraint:
w_scale = w_scale.max()
self.scale = nn.Parameter(_logit(w_scale / self.coeff))
def compute_weight(self):
w_scale = _norm_except_dim(self.weight, self.norm_type, dim=self.
max_across_dim)
if not self.local_constraint:
w_scale = w_scale.max()
return self.weight / w_scale * torch.sigmoid(self.scale)
def extra_repr(self):
s = super(LipNormConv2dNew, self).extra_repr()
return s + ', coeff={}, domain={}, codomain={}, local={}'.format(self
.coeff, self.domain, self.codomain, self.local_constraint)
def forward(self, input_0):
primals_1 = self.weight
primals_3 = self.bias
primals_2 = self.scale
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
hologerry/residual-flows
|
LipNormConv2d
| false
| 10,305
|
[
"MIT"
] | 0
|
33a3639150490279c2e13238dd6244b80c52adf7
|
https://github.com/hologerry/residual-flows/tree/33a3639150490279c2e13238dd6244b80c52adf7
|
EALSTM
|
import torch
import torch.nn as nn
import torch.utils.data
class EALSTM(nn.Module):
"""Implementation of the Entity-Aware-LSTM (EA-LSTM)
TODO: Include paper ref and latex equations
Parameters
----------
input_size_dyn : int
Number of dynamic features, which are those, passed to the LSTM at each time step.
input_size_stat : int
Number of static features, which are those that are used to modulate the input gate.
hidden_size : int
Number of hidden/memory cells.
batch_first : bool, optional
If True, expects the batch inputs to be of shape [batch, seq, features] otherwise, the
shape has to be [seq, batch, features], by default True.
initial_forget_bias : int, optional
Value of the initial forget gate bias, by default 0
"""
def __init__(self, input_size_dyn: 'int', input_size_stat: 'int',
hidden_size: 'int', batch_first: 'bool'=True, initial_forget_bias:
'int'=0):
super(EALSTM, self).__init__()
self.input_size_dyn = input_size_dyn
self.input_size_stat = input_size_stat
self.hidden_size = hidden_size
self.batch_first = batch_first
self.initial_forget_bias = initial_forget_bias
self.weight_ih = nn.Parameter(torch.FloatTensor(input_size_dyn, 3 *
hidden_size))
self.weight_hh = nn.Parameter(torch.FloatTensor(hidden_size, 3 *
hidden_size))
self.weight_sh = nn.Parameter(torch.FloatTensor(input_size_stat,
hidden_size))
self.bias = nn.Parameter(torch.FloatTensor(3 * hidden_size))
self.bias_s = nn.Parameter(torch.FloatTensor(hidden_size))
self.reset_parameters()
def reset_parameters(self):
"""Initialize all learnable parameters of the LSTM"""
nn.init.orthogonal_(self.weight_ih.data)
nn.init.orthogonal_(self.weight_sh)
weight_hh_data = torch.eye(self.hidden_size)
weight_hh_data = weight_hh_data.repeat(1, 3)
self.weight_hh.data = weight_hh_data
nn.init.constant_(self.bias.data, val=0)
nn.init.constant_(self.bias_s.data, val=0)
if self.initial_forget_bias != 0:
self.bias.data[:self.hidden_size] = self.initial_forget_bias
def forward(self, x_d, x_s):
"""[summary]
Parameters
----------
x_d : torch.Tensor
Tensor, containing a batch of sequences of the dynamic features. Shape has to match
the format specified with batch_first.
x_s : torch.Tensor
Tensor, containing a batch of static features.
Returns
-------
h_n : torch.Tensor
The hidden states of each time step of each sample in the batch.
c_n : torch.Tensor]
The cell states of each time step of each sample in the batch.
"""
if self.batch_first:
x_d = x_d.transpose(0, 1)
seq_len, batch_size, _ = x_d.size()
h_0 = x_d.data.new(batch_size, self.hidden_size).zero_()
c_0 = x_d.data.new(batch_size, self.hidden_size).zero_()
h_x = h_0, c_0
h_n, c_n = [], []
bias_batch = self.bias.unsqueeze(0).expand(batch_size, *self.bias.
size())
bias_s_batch = self.bias_s.unsqueeze(0).expand(batch_size, *self.
bias_s.size())
i = torch.sigmoid(torch.addmm(bias_s_batch, x_s, self.weight_sh))
for t in range(seq_len):
h_0, c_0 = h_x
gates = torch.addmm(bias_batch, h_0, self.weight_hh) + torch.mm(x_d
[t], self.weight_ih)
f, o, g = gates.chunk(3, 1)
c_1 = torch.sigmoid(f) * c_0 + i * torch.tanh(g)
h_1 = torch.sigmoid(o) * torch.tanh(c_1)
h_n.append(h_1)
c_n.append(c_1)
h_x = h_1, c_1
h_n = torch.stack(h_n, 0)
c_n = torch.stack(c_n, 0)
if self.batch_first:
h_n = h_n.transpose(0, 1)
c_n = c_n.transpose(0, 1)
return h_n, c_n
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input_size_dyn': 4, 'input_size_stat': 4, 'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_zero_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, out_ptr3,
out_ptr4, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (8 + x0 + 12 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (8 + x0 + 12 * x1), xmask)
tmp6 = tl.load(in_ptr0 + (x0 + 12 * x1), xmask)
tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (x0 + 12 * x1), xmask)
tmp14 = tl.load(in_ptr3 + x2, xmask)
tmp21 = tl.load(in_ptr0 + (4 + x0 + 12 * x1), xmask)
tmp22 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr2 + (4 + x0 + 12 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = libdevice.tanh(tmp4)
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tmp11 = tl.sigmoid(tmp10)
tmp12 = 0.0
tmp13 = tmp11 * tmp12
tmp15 = tl.sigmoid(tmp14)
tmp16 = tmp15 * tmp5
tmp17 = tmp13 + tmp16
tmp18 = 1.0
tmp19 = tmp18 - tmp11
tmp20 = tmp11 * tmp19
tmp23 = tmp21 + tmp22
tmp25 = tmp23 + tmp24
tmp26 = tl.sigmoid(tmp25)
tmp27 = libdevice.tanh(tmp17)
tmp28 = tmp26 * tmp27
tl.store(out_ptr0 + x2, tmp5, xmask)
tl.store(out_ptr1 + x2, tmp17, xmask)
tl.store(out_ptr2 + x2, tmp20, xmask)
tl.store(out_ptr3 + x2, tmp26, xmask)
tl.store(out_ptr4 + x2, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_tanh_2(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 12 * x1), xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + 12 * x1), xmask)
tmp6 = tl.load(in_ptr0 + (8 + x0 + 12 * x1), xmask)
tmp7 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (8 + x0 + 12 * x1), xmask)
tmp12 = tl.load(in_ptr3 + x2, xmask)
tmp14 = tl.load(in_ptr4 + x2, xmask)
tmp18 = tl.load(in_ptr0 + (4 + x0 + 12 * x1), xmask)
tmp19 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr2 + (4 + x0 + 12 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.sigmoid(tmp4)
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tmp11 = libdevice.tanh(tmp10)
tmp13 = tmp5 * tmp12
tmp15 = tl.sigmoid(tmp14)
tmp16 = tmp15 * tmp11
tmp17 = tmp13 + tmp16
tmp20 = tmp18 + tmp19
tmp22 = tmp20 + tmp21
tmp23 = tl.sigmoid(tmp22)
tmp24 = libdevice.tanh(tmp17)
tmp25 = tmp23 * tmp24
tl.store(out_ptr0 + x2, tmp5, xmask)
tl.store(out_ptr1 + x2, tmp11, xmask)
tl.store(out_ptr2 + x2, tmp17, xmask)
tl.store(out_ptr3 + x2, tmp23, xmask)
tl.store(out_ptr4 + x2, tmp25, xmask)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_tanh_3(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 12 * x1), xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + 12 * x1), xmask)
tmp6 = tl.load(in_ptr0 + (8 + x0 + 12 * x1), xmask)
tmp7 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (8 + x0 + 12 * x1), xmask)
tmp12 = tl.load(in_ptr3 + x2, xmask)
tmp14 = tl.load(in_ptr4 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.sigmoid(tmp4)
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tmp11 = libdevice.tanh(tmp10)
tmp13 = tmp5 * tmp12
tmp15 = tl.sigmoid(tmp14)
tmp16 = tmp15 * tmp11
tmp17 = tmp13 + tmp16
tmp18 = libdevice.tanh(tmp17)
tl.store(out_ptr0 + x2, tmp5, xmask)
tl.store(out_ptr1 + x2, tmp11, xmask)
tl.store(out_ptr2 + x2, tmp18, xmask)
@triton.jit
def triton_poi_fused_sigmoid_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4 + x0 + 12 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (4 + x0 + 12 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.sigmoid(tmp4)
tl.store(out_ptr0 + x2, tmp5, xmask)
@triton.jit
def triton_poi_fused_stack_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + 4 * (-4 + x1)), tmp9 & xmask, other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + (x0 + 4 * (-8 + x1)), tmp14 & xmask, other=0.0)
tmp16 = tmp0 >= tmp12
tl.full([1], 16, tl.int64)
tmp19 = tl.load(in_ptr3 + (x0 + 4 * (-12 + x1)), tmp16 & xmask, other=0.0)
tmp20 = tl.load(in_ptr2 + (x0 + 4 * (-12 + x1)), tmp16 & xmask, other=0.0)
tmp21 = tmp19 * tmp20
tmp22 = tl.load(in_ptr4 + (x0 + 4 * (-12 + x1)), tmp16 & xmask, other=0.0)
tmp23 = tl.sigmoid(tmp22)
tmp24 = tl.load(in_ptr5 + (x0 + 4 * (-12 + x1)), tmp16 & xmask, other=0.0)
tmp25 = tmp23 * tmp24
tmp26 = tmp21 + tmp25
tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype)
tmp28 = tl.where(tmp16, tmp26, tmp27)
tmp29 = tl.where(tmp14, tmp15, tmp28)
tmp30 = tl.where(tmp9, tmp10, tmp29)
tmp31 = tl.where(tmp4, tmp5, tmp30)
tl.store(out_ptr0 + x2, tmp31, xmask)
@triton.jit
def triton_poi_fused_stack_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + 4 * (-4 + x1)), tmp9 & xmask, other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + (x0 + 4 * (-8 + x1)), tmp14 & xmask, other=0.0)
tmp16 = tmp0 >= tmp12
tl.full([1], 16, tl.int64)
tmp19 = tl.load(in_ptr3 + (x0 + 4 * (-12 + x1)), tmp16 & xmask, other=0.0)
tmp20 = tl.load(in_ptr4 + (x0 + 4 * (-12 + x1)), tmp16 & xmask, other=0.0)
tmp21 = tmp19 * tmp20
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp16, tmp21, tmp22)
tmp24 = tl.where(tmp14, tmp15, tmp23)
tmp25 = tl.where(tmp9, tmp10, tmp24)
tmp26 = tl.where(tmp4, tmp5, tmp25)
tl.store(out_ptr0 + x2, tmp26, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (12,), (1,))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, 12), (12, 1))
assert_size_stride(primals_7, (4, 12), (12, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_zero_0[grid(16)](buf0, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_3, (4, 4), (0, 1),
0), primals_5, primals_4, alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
buf2 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
extern_kernels.mm(buf0, primals_6, out=buf2)
buf3 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (16, 1), 0),
primals_7, out=buf3)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf30 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1[grid(16)](buf2
, primals_2, buf3, buf1, buf4, buf5, buf30, buf6, buf7, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf8 = buf3
del buf3
extern_kernels.mm(buf7, primals_6, out=buf8)
buf9 = buf2
del buf2
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (16, 1), 4),
primals_7, out=buf9)
buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_mul_sigmoid_tanh_2[grid(16)](buf8, primals_2,
buf9, buf5, buf1, buf10, buf11, buf12, buf13, buf14, 16, XBLOCK
=16, num_warps=1, num_stages=1)
buf15 = buf9
del buf9
extern_kernels.mm(buf14, primals_6, out=buf15)
buf16 = buf8
del buf8
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (16, 1), 8),
primals_7, out=buf16)
buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf19 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf20 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf21 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_mul_sigmoid_tanh_2[grid(16)](buf15, primals_2,
buf16, buf12, buf1, buf17, buf18, buf19, buf20, buf21, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf22 = buf16
del buf16
extern_kernels.mm(buf21, primals_6, out=buf22)
buf23 = buf15
del buf15
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (16, 1), 12
), primals_7, out=buf23)
del primals_7
buf24 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf25 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf27 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_mul_sigmoid_tanh_3[grid(16)](buf22, primals_2,
buf23, buf19, buf1, buf24, buf25, buf27, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf26 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_sigmoid_4[grid(16)](buf22, primals_2, buf23, buf26,
16, XBLOCK=16, num_warps=1, num_stages=1)
del buf22
del buf23
del primals_2
buf28 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
triton_poi_fused_stack_5[grid(64)](buf5, buf12, buf19, buf24, buf1,
buf25, buf28, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf29 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
triton_poi_fused_stack_6[grid(64)](buf7, buf14, buf21, buf26, buf27,
buf29, 64, XBLOCK=64, num_warps=1, num_stages=1)
return (reinterpret_tensor(buf29, (4, 4, 4), (4, 16, 1), 0),
reinterpret_tensor(buf28, (4, 4, 4), (4, 16, 1), 0), buf0, buf1,
buf4, buf5, buf6, buf10, buf11, buf12, buf13, buf17, buf18, buf19,
buf20, buf24, buf25, buf26, buf27, reinterpret_tensor(primals_1, (4,
4), (1, 16), 12), reinterpret_tensor(primals_6, (12, 4), (1, 12), 0
), reinterpret_tensor(buf21, (4, 4), (1, 4), 0), reinterpret_tensor
(primals_1, (4, 4), (1, 16), 8), reinterpret_tensor(buf14, (4, 4),
(1, 4), 0), reinterpret_tensor(primals_1, (4, 4), (1, 16), 4),
reinterpret_tensor(buf7, (4, 4), (1, 4), 0), buf30,
reinterpret_tensor(primals_1, (4, 4), (1, 16), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0))
class EALSTMNew(nn.Module):
"""Implementation of the Entity-Aware-LSTM (EA-LSTM)
TODO: Include paper ref and latex equations
Parameters
----------
input_size_dyn : int
Number of dynamic features, which are those, passed to the LSTM at each time step.
input_size_stat : int
Number of static features, which are those that are used to modulate the input gate.
hidden_size : int
Number of hidden/memory cells.
batch_first : bool, optional
If True, expects the batch inputs to be of shape [batch, seq, features] otherwise, the
shape has to be [seq, batch, features], by default True.
initial_forget_bias : int, optional
Value of the initial forget gate bias, by default 0
"""
def __init__(self, input_size_dyn: 'int', input_size_stat: 'int',
hidden_size: 'int', batch_first: 'bool'=True, initial_forget_bias:
'int'=0):
super(EALSTMNew, self).__init__()
self.input_size_dyn = input_size_dyn
self.input_size_stat = input_size_stat
self.hidden_size = hidden_size
self.batch_first = batch_first
self.initial_forget_bias = initial_forget_bias
self.weight_ih = nn.Parameter(torch.FloatTensor(input_size_dyn, 3 *
hidden_size))
self.weight_hh = nn.Parameter(torch.FloatTensor(hidden_size, 3 *
hidden_size))
self.weight_sh = nn.Parameter(torch.FloatTensor(input_size_stat,
hidden_size))
self.bias = nn.Parameter(torch.FloatTensor(3 * hidden_size))
self.bias_s = nn.Parameter(torch.FloatTensor(hidden_size))
self.reset_parameters()
def reset_parameters(self):
"""Initialize all learnable parameters of the LSTM"""
nn.init.orthogonal_(self.weight_ih.data)
nn.init.orthogonal_(self.weight_sh)
weight_hh_data = torch.eye(self.hidden_size)
weight_hh_data = weight_hh_data.repeat(1, 3)
self.weight_hh.data = weight_hh_data
nn.init.constant_(self.bias.data, val=0)
nn.init.constant_(self.bias_s.data, val=0)
if self.initial_forget_bias != 0:
self.bias.data[:self.hidden_size] = self.initial_forget_bias
def forward(self, input_0, input_1):
primals_6 = self.weight_ih
primals_7 = self.weight_hh
primals_4 = self.weight_sh
primals_2 = self.bias
primals_3 = self.bias_s
primals_1 = input_0
primals_5 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0], output[1]
|
jdwillard19/lake_conus_surface_temp_2021
|
EALSTM
| false
| 10,306
|
[
"MIT"
] | 0
|
88334091dec71ae43fe4256603d65045141936b5
|
https://github.com/jdwillard19/lake_conus_surface_temp_2021/tree/88334091dec71ae43fe4256603d65045141936b5
|
AttentionConv
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
class AttentionConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, groups=1, bias=False):
super(AttentionConv, self).__init__()
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.groups = groups
assert self.out_channels % self.groups == 0, 'out_channels should be divided by groups. (example: out_channels: 40, groups: 4)'
self.rel_h = nn.Parameter(torch.randn(out_channels // 2, 1, 1,
kernel_size, 1), requires_grad=True)
self.rel_w = nn.Parameter(torch.randn(out_channels // 2, 1, 1, 1,
kernel_size), requires_grad=True)
self.key_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1,
bias=bias)
self.query_conv = nn.Conv2d(in_channels, out_channels, kernel_size=
1, bias=bias)
self.value_conv = nn.Conv2d(in_channels, out_channels, kernel_size=
1, bias=bias)
self.attn_raw = None
self.reset_parameters()
def forward(self, x):
batch, _channels, height, width = x.size()
padded_x = F.pad(x, [self.padding, self.padding, self.padding, self
.padding])
q_out = self.query_conv(x)
k_out = self.key_conv(padded_x)
v_out = self.value_conv(padded_x)
k_out = k_out.unfold(2, self.kernel_size, self.stride).unfold(3,
self.kernel_size, self.stride)
v_out = v_out.unfold(2, self.kernel_size, self.stride).unfold(3,
self.kernel_size, self.stride)
k_out_h, k_out_w = k_out.split(self.out_channels // 2, dim=1)
k_out = torch.cat((k_out_h + self.rel_h, k_out_w + self.rel_w), dim=1)
k_out = k_out.contiguous().view(batch, self.groups, self.
out_channels // self.groups, height, width, -1)
v_out = v_out.contiguous().view(batch, self.groups, self.
out_channels // self.groups, height, width, -1)
q_out = q_out.view(batch, self.groups, self.out_channels // self.
groups, height, width, 1)
out = q_out * k_out
out = F.softmax(out, dim=-1)
self.attn_raw = out
out = torch.einsum('bnchwk,bnchwk -> bnchw', out, v_out).view(batch,
-1, height, width)
return out
def reset_parameters(self):
init.kaiming_normal_(self.key_conv.weight, mode='fan_out',
nonlinearity='relu')
init.kaiming_normal_(self.value_conv.weight, mode='fan_out',
nonlinearity='relu')
init.kaiming_normal_(self.query_conv.weight, mode='fan_out',
nonlinearity='relu')
init.normal_(self.rel_h, 0, 1)
init.normal_(self.rel_w, 0, 1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.init as init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_cat_mul_unfold_0(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
x3 = xindex // 16 % 4
x4 = xindex // 64
x5 = xindex % 16
x2 = xindex // 4 % 4
x1 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp20 = tl.load(in_ptr3 + x0, xmask)
tmp1 = x3
tl.full([1], 0, tl.int64)
tmp4 = tl.full([1], 2, tl.int64)
tmp5 = tmp1 < tmp4
tmp6 = tl.load(in_ptr0 + (x5 + 16 * x3 + 64 * x4), tmp5 & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + (x2 + 4 * x3), tmp5 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp5, tmp8, tmp9)
tmp11 = tmp1 >= tmp4
tl.full([1], 4, tl.int64)
tmp14 = tl.load(in_ptr0 + (32 + x5 + 16 * (-2 + x3) + 64 * x4), tmp11 &
xmask, other=0.0)
tmp15 = tl.load(in_ptr2 + (x1 + 4 * (-2 + x3)), tmp11 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp11, tmp16, tmp17)
tmp19 = tl.where(tmp5, tmp10, tmp18)
tmp21 = 0.0
tmp22 = tmp19 >= tmp21
tmp23 = 1.0
tmp24 = -1.0
tmp25 = tl.where(tmp22, tmp23, tmp24)
tmp26 = tmp20 * tmp25
tmp27 = tmp26 - tmp26
tmp28 = tmp25 * tmp19
tmp29 = tmp27 * tmp28
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp30 / tmp30
tmp32 = tmp31 * tmp0
tl.store(in_out_ptr0 + x0, tmp0, xmask)
tl.store(out_ptr0 + x0, tmp19, xmask)
tl.store(out_ptr1 + x0, tmp31, xmask)
tl.store(out_ptr2 + x0, tmp32, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (2, 1, 1, 4, 1), (4, 4, 4, 1, 1))
assert_size_stride(primals_6, (2, 1, 1, 1, 4), (4, 4, 4, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = extern_kernels.convolution(primals_1, primals_3, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = extern_kernels.convolution(primals_1, primals_4, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = reinterpret_tensor(buf2, (4, 4, 1, 1, 4, 4), (64, 16, 16, 4,
4, 1), 0)
del buf2
buf4 = empty_strided_cuda((4, 4, 1, 1, 4, 4), (64, 16, 16, 16, 4, 1
), torch.float32)
buf5 = empty_strided_cuda((4, 1, 4, 4, 4, 1), (64, 64, 16, 4, 1, 1),
torch.float32)
buf6 = empty_strided_cuda((4, 1, 4, 4, 4, 1), (64, 64, 16, 4, 1, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_cat_mul_unfold_0[grid(256)](buf3, buf1,
primals_5, primals_6, buf0, buf4, buf5, buf6, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf1
del primals_5
del primals_6
return reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0
), buf5, primals_1, primals_2, primals_3, primals_4, buf0, buf3, buf4, buf5
class AttentionConvNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, groups=1, bias=False):
super(AttentionConvNew, self).__init__()
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.groups = groups
assert self.out_channels % self.groups == 0, 'out_channels should be divided by groups. (example: out_channels: 40, groups: 4)'
self.rel_h = nn.Parameter(torch.randn(out_channels // 2, 1, 1,
kernel_size, 1), requires_grad=True)
self.rel_w = nn.Parameter(torch.randn(out_channels // 2, 1, 1, 1,
kernel_size), requires_grad=True)
self.key_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1,
bias=bias)
self.query_conv = nn.Conv2d(in_channels, out_channels, kernel_size=
1, bias=bias)
self.value_conv = nn.Conv2d(in_channels, out_channels, kernel_size=
1, bias=bias)
self.attn_raw = None
self.reset_parameters()
def reset_parameters(self):
init.kaiming_normal_(self.key_conv.weight, mode='fan_out',
nonlinearity='relu')
init.kaiming_normal_(self.value_conv.weight, mode='fan_out',
nonlinearity='relu')
init.kaiming_normal_(self.query_conv.weight, mode='fan_out',
nonlinearity='relu')
init.normal_(self.rel_h, 0, 1)
init.normal_(self.rel_w, 0, 1)
def forward(self, input_0):
primals_5 = self.rel_h
primals_6 = self.rel_w
primals_2 = self.key_conv.weight
primals_3 = self.query_conv.weight
primals_4 = self.value_conv.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
jhvics1/pytorch-stand-alone-self-attention
|
AttentionConv
| false
| 10,307
|
[
"MIT"
] | 0
|
77375d99250ab9d8089e73bd4803afae30843748
|
https://github.com/jhvics1/pytorch-stand-alone-self-attention/tree/77375d99250ab9d8089e73bd4803afae30843748
|
ConvNet
|
import torch
import torch.optim
import torch.nn as nn
import torch.nn.functional as F
class ConvNet(nn.Module):
def __init__(self):
super(ConvNet, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 1000)
self.fc2 = nn.Linear(1000, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
def get_inputs():
return [torch.rand([4, 3, 32, 32])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.optim
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 18816
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 784 % 6
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4704
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 14
x3 = xindex // 14
x2 = xindex // 1176
x4 = xindex % 1176
tmp0 = tl.load(in_ptr0 + (2 * x0 + 56 * x3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 56 * x3), xmask, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (28 + 2 * x0 + 56 * x3), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (29 + 2 * x0 + 56 * x3), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x4 + 1184 * x2), tmp6, xmask)
tl.store(out_ptr1 + (x4 + 1280 * x2), tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 100 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = xindex // 5
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 20 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 20 * x1), xmask, eviction_policy
='evict_last')
tmp7 = tl.load(in_ptr0 + (10 + 2 * x0 + 20 * x1), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (11 + 2 * x0 + 20 * x1), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x2, tmp15, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 4000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 1000
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (6, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (6,), (1,))
assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1))
assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (1000, 400), (400, 1))
assert_size_stride(primals_7, (1000,), (1,))
assert_size_stride(primals_8, (10, 1000), (1000, 1))
assert_size_stride(primals_9, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 6, 28, 28), (4704, 784, 28, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(18816)](buf1, primals_2,
18816, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 6, 14, 14), (1184, 196, 14, 1), torch
.float32)
buf3 = empty_strided_cuda((4, 6, 14, 14), (1280, 196, 14, 1), torch
.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(4704)](buf1, buf2,
buf3, 4704, XBLOCK=256, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 10, 10), (1600, 100, 10, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(6400)](buf5, primals_5,
6400, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.int8)
buf7 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.float32
)
triton_poi_fused_max_pool2d_with_indices_3[grid(1600)](buf5, buf6,
buf7, 1600, XBLOCK=128, num_warps=4, num_stages=1)
buf8 = empty_strided_cuda((4, 1000), (1000, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf7, (4, 400), (400, 1), 0),
reinterpret_tensor(primals_6, (400, 1000), (1, 400), 0), out=buf8)
buf9 = buf8
del buf8
triton_poi_fused_relu_4[grid(4000)](buf9, primals_7, 4000, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_7
buf10 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_9, buf9, reinterpret_tensor(primals_8,
(1000, 10), (1, 1000), 0), alpha=1, beta=1, out=buf10)
del primals_9
return (buf10, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5,
buf6, reinterpret_tensor(buf7, (4, 400), (400, 1), 0), buf9,
primals_8, primals_6)
class ConvNetNew(nn.Module):
def __init__(self):
super(ConvNetNew, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 1000)
self.fc2 = nn.Linear(1000, 10)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
jwdink/PyTorch-LBFGS
|
ConvNet
| false
| 10,308
|
[
"MIT"
] | 0
|
7e18ea3d9cb16a0af1a76f7c9c023c916b408a04
|
https://github.com/jwdink/PyTorch-LBFGS/tree/7e18ea3d9cb16a0af1a76f7c9c023c916b408a04
|
AttentionLayer
|
import torch
import torch.nn as nn
class AttentionLayer(nn.Module):
def __init__(self, embed_dim, num_heads, dropout_rate=0.1,
feedforward_size=256):
"""The core module with both spatial attention module and
temporal attention model embedded within it.
"""
super(AttentionLayer, self).__init__()
self.spatial_attention = nn.MultiheadAttention(embed_dim, num_heads)
self.temporal_attention = nn.MultiheadAttention(embed_dim, num_heads)
self.linear1 = nn.Linear(embed_dim, feedforward_size)
self.linear2 = nn.Linear(feedforward_size, embed_dim)
self.layer_norm = nn.LayerNorm(embed_dim)
self.dropout = nn.Dropout(dropout_rate)
def forward(self, inputs):
"""
:param inputs: shape (T, B, H)
:returns out: shape (T, B, H)
"""
spatial_out, _spatial_attention_matrix = self.spatial_attention(inputs,
inputs, inputs)
spatial_out = self.dropout(spatial_out)
spatial_out += inputs
spatial_out = self.layer_norm(spatial_out)
temporal_out, _temporal_attention_matrix = self.temporal_attention(
inputs, inputs, inputs)
temporal_out = self.dropout(temporal_out)
temporal_out += inputs
temporal_out = self.layer_norm(temporal_out)
attention_out = spatial_out + temporal_out
out = self.linear1(attention_out)
out = self.linear2(out)
out = self.dropout(out)
out += attention_out
out = self.layer_norm(out)
return out
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'embed_dim': 4, 'num_heads': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp29 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp34 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp37 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tmp30 = tmp29 + tmp1
tmp32 = tmp31 + tmp4
tmp33 = tmp30 + tmp32
tmp35 = tmp34 + tmp8
tmp36 = tmp33 + tmp35
tmp38 = tmp37 + tmp12
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp15
tmp41 = tmp30 - tmp40
tmp42 = tmp41 * tmp41
tmp43 = tmp32 - tmp40
tmp44 = tmp43 * tmp43
tmp45 = tmp42 + tmp44
tmp46 = tmp35 - tmp40
tmp47 = tmp46 * tmp46
tmp48 = tmp45 + tmp47
tmp49 = tmp38 - tmp40
tmp50 = tmp49 * tmp49
tmp51 = tmp48 + tmp50
tmp52 = tmp51 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
tl.store(out_ptr2 + x0, tmp40, xmask)
tl.store(out_ptr3 + x0, tmp52, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr6 + x2, xmask)
tmp16 = tl.load(in_ptr7 + x1, xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr8 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp15 = tmp14 + tmp1
tmp17 = tmp15 - tmp16
tmp19 = tmp18 + tmp6
tmp20 = libdevice.rsqrt(tmp19)
tmp21 = tmp17 * tmp20
tmp22 = tmp21 * tmp10
tmp23 = tmp22 + tmp12
tmp24 = tmp13 + tmp23
tl.store(out_ptr0 + x2, tmp24, xmask)
@triton.jit
def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_7(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (12,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (12, 4), (4, 1))
assert_size_stride(primals_9, (12,), (1,))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (256, 4), (4, 1))
assert_size_stride(primals_13, (256,), (1,))
assert_size_stride(primals_14, (4, 256), (256, 1))
assert_size_stride(primals_15, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4),
(1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_3, (4,), (1,), 4),
primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 16),
alpha=1, beta=1, out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_3, (4,), (1,), 8),
primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 32),
alpha=1, beta=1, out=buf2)
del primals_2
buf3 = reinterpret_tensor(buf0, (4, 4, 1), (1, 4, 16), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](buf3, primals_3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_3
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf3, reinterpret_tensor(buf1, (4, 1, 4), (1, 1,
4), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(64)](buf4, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf6 = buf4
del buf4
triton_poi_fused__softmax_2[grid(64)](buf5, buf6, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (4, 4, 1), (1, 4,
1), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
triton_poi_fused_clone_3[grid(4, 4)](buf7, buf8, 4, 4, XBLOCK=4,
YBLOCK=4, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf7, (4, 4), (4, 1), 0)
del buf7
extern_kernels.addmm(primals_5, reinterpret_tensor(buf8, (4, 4), (4,
1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha
=1, beta=1, out=buf9)
del primals_5
buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_9, (4,), (1,), 8),
primals_1, reinterpret_tensor(primals_8, (4, 4), (1, 4), 32),
alpha=1, beta=1, out=buf14)
buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_9, (4,), (1,), 4),
primals_1, reinterpret_tensor(primals_8, (4, 4), (1, 4), 16),
alpha=1, beta=1, out=buf13)
buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_8, (4, 4),
(1, 4), 0), out=buf12)
del primals_8
buf15 = reinterpret_tensor(buf12, (4, 4, 1), (1, 4, 16), 0)
del buf12
triton_poi_fused_mul_0[grid(16)](buf15, primals_9, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_9
buf16 = buf5
del buf5
extern_kernels.bmm(buf15, reinterpret_tensor(buf13, (4, 1, 4), (1,
1, 4), 0), out=buf16)
buf17 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(64)](buf16, buf17, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf18 = buf16
del buf16
triton_poi_fused__softmax_2[grid(64)](buf17, buf18, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf17
buf19 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf18, reinterpret_tensor(buf14, (4, 4, 1), (1,
4, 1), 0), out=buf19)
buf20 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
triton_poi_fused_clone_3[grid(4, 4)](buf19, buf20, 4, 4, XBLOCK=4,
YBLOCK=4, num_warps=1, num_stages=1)
buf21 = reinterpret_tensor(buf19, (4, 4), (4, 1), 0)
del buf19
extern_kernels.addmm(primals_11, reinterpret_tensor(buf20, (4, 4),
(4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf21)
del primals_11
buf10 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf11 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf22 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf23 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused_add_native_layer_norm_4[grid(4)](buf9, primals_1,
buf21, buf10, buf11, buf22, buf23, 4, XBLOCK=4, num_warps=1,
num_stages=1)
buf24 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_5[grid(16)](buf9, primals_1,
buf10, buf11, primals_6, primals_7, buf21, buf22, buf23, buf24,
16, XBLOCK=16, num_warps=1, num_stages=1)
del buf10
del buf11
buf25 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
extern_kernels.addmm(primals_13, buf24, reinterpret_tensor(
primals_12, (4, 256), (1, 4), 0), alpha=1, beta=1, out=buf25)
del primals_13
buf26 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf25, reinterpret_tensor(primals_14, (256, 4), (
1, 256), 0), out=buf26)
buf27 = buf26
del buf26
triton_poi_fused_add_6[grid(16)](buf27, primals_15, buf24, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_15
buf28 = buf23
del buf23
buf29 = buf22
del buf22
triton_poi_fused_native_layer_norm_7[grid(4)](buf27, buf28, buf29,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf30 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_native_layer_norm_8[grid(16)](buf27, buf28, buf29,
primals_6, primals_7, buf30, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del buf28
del buf29
del primals_7
return (buf30, primals_1, primals_6, buf6, reinterpret_tensor(buf8, (4,
4), (4, 1), 0), buf9, buf18, reinterpret_tensor(buf20, (4, 4), (4,
1), 0), buf21, buf24, buf25, buf27, primals_14, primals_12,
primals_10, reinterpret_tensor(buf14, (4, 1, 4), (1, 1, 4), 0),
reinterpret_tensor(buf15, (4, 1, 4), (1, 1, 4), 0),
reinterpret_tensor(buf13, (4, 4, 1), (1, 4, 1), 0), primals_4,
reinterpret_tensor(buf2, (4, 1, 4), (1, 1, 4), 0),
reinterpret_tensor(buf3, (4, 1, 4), (1, 1, 4), 0),
reinterpret_tensor(buf1, (4, 4, 1), (1, 4, 1), 0))
class AttentionLayerNew(nn.Module):
def __init__(self, embed_dim, num_heads, dropout_rate=0.1,
feedforward_size=256):
"""The core module with both spatial attention module and
temporal attention model embedded within it.
"""
super(AttentionLayerNew, self).__init__()
self.spatial_attention = nn.MultiheadAttention(embed_dim, num_heads)
self.temporal_attention = nn.MultiheadAttention(embed_dim, num_heads)
self.linear1 = nn.Linear(embed_dim, feedforward_size)
self.linear2 = nn.Linear(feedforward_size, embed_dim)
self.layer_norm = nn.LayerNorm(embed_dim)
self.dropout = nn.Dropout(dropout_rate)
def forward(self, input_0):
primals_2 = self.spatial_attention.in_proj_weight
primals_3 = self.spatial_attention.in_proj_bias
primals_1 = self.spatial_attention.out_proj.weight
primals_5 = self.spatial_attention.out_proj.bias
primals_8 = self.temporal_attention.in_proj_weight
primals_9 = self.temporal_attention.in_proj_bias
primals_4 = self.temporal_attention.out_proj.weight
primals_6 = self.temporal_attention.out_proj.bias
primals_12 = self.linear1.weight
primals_13 = self.linear1.bias
primals_14 = self.linear2.weight
primals_7 = self.linear2.bias
primals_11 = self.layer_norm.weight
primals_15 = self.layer_norm.bias
primals_10 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15])
return output[0]
|
jhbed/fairmotion
|
AttentionLayer
| false
| 10,310
|
[
"BSD-3-Clause"
] | 0
|
949683d628b389a1e4f241b21e88f5d57f3a488e
|
https://github.com/jhbed/fairmotion/tree/949683d628b389a1e4f241b21e88f5d57f3a488e
|
AUGRUCell
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.metrics import *
import torch.onnx
import torch as torch
class AUGRUCell(nn.Module):
""" Effect of GRU with attentional update gate (AUGRU)
Reference:
- Deep Interest Evolution Network for Click-Through Rate Prediction[J]. arXiv preprint arXiv:1809.03672, 2018.
"""
def __init__(self, input_size, hidden_size, bias=True):
super(AUGRUCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.weight_ih = nn.Parameter(torch.Tensor(3 * hidden_size, input_size)
)
self.register_parameter('weight_ih', self.weight_ih)
self.weight_hh = nn.Parameter(torch.Tensor(3 * hidden_size,
hidden_size))
self.register_parameter('weight_hh', self.weight_hh)
if bias:
self.bias_ih = nn.Parameter(torch.Tensor(3 * hidden_size))
self.register_parameter('bias_ih', self.bias_ih)
self.bias_hh = nn.Parameter(torch.Tensor(3 * hidden_size))
self.register_parameter('bias_ih', self.bias_hh)
for tensor in [self.bias_ih, self.bias_hh]:
nn.init.zeros_(tensor)
else:
self.register_parameter('bias_ih', None)
self.register_parameter('bias_hh', None)
def forward(self, inputs, hx, att_score):
gi = F.linear(inputs, self.weight_ih, self.bias_ih)
gh = F.linear(hx, self.weight_hh, self.bias_hh)
i_r, i_z, i_n = gi.chunk(3, 1)
h_r, h_z, h_n = gh.chunk(3, 1)
reset_gate = torch.sigmoid(i_r + h_r)
update_gate = torch.sigmoid(i_z + h_z)
new_state = torch.tanh(i_n + reset_gate * h_n)
att_score = att_score.view(-1, 1)
update_gate = att_score * update_gate
hy = (1.0 - update_gate) * hx + update_gate * new_state
return hy
def get_inputs():
return [torch.rand([64, 4]), torch.rand([64, 4]), torch.rand([16, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from sklearn.metrics import *
import torch.onnx
import torch as torch
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_tanh_0(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4 + x0 + 12 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (4 + x0 + 12 * x1), xmask)
tmp6 = tl.load(in_ptr0 + (x0 + 12 * x1), xmask)
tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (x0 + 12 * x1), xmask)
tmp12 = tl.load(in_ptr0 + (8 + x0 + 12 * x1), xmask)
tmp13 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr2 + (8 + x0 + 12 * x1), xmask)
tmp19 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr4 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.sigmoid(tmp4)
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tmp11 = tl.sigmoid(tmp10)
tmp14 = tmp12 + tmp13
tmp16 = tmp11 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = libdevice.tanh(tmp17)
tmp20 = tmp19 * tmp5
tmp21 = 1.0
tmp22 = tmp21 - tmp20
tmp24 = tmp22 * tmp23
tmp25 = tmp20 * tmp18
tmp26 = tmp24 + tmp25
tl.store(out_ptr0 + x2, tmp5, xmask)
tl.store(out_ptr1 + x2, tmp11, xmask)
tl.store(out_ptr2 + x2, tmp18, xmask)
tl.store(out_ptr3 + x2, tmp26, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (12, 4), (4, 1))
assert_size_stride(primals_2, (12,), (1,))
assert_size_stride(primals_3, (64, 4), (4, 1))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (64, 4), (4, 1))
assert_size_stride(primals_6, (16, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 12), (12, 1), torch.float32)
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 12),
(1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 12), (12, 1), torch.float32)
extern_kernels.addmm(primals_2, primals_5, reinterpret_tensor(
primals_4, (4, 12), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_rsub_sigmoid_tanh_0[grid(256)](buf0,
primals_2, buf1, primals_6, primals_5, buf3, buf2, buf4, buf5,
256, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_2
return buf5, primals_3, primals_5, primals_6, reinterpret_tensor(buf1,
(64, 4), (12, 1), 8), buf2, buf3, buf4
class AUGRUCellNew(nn.Module):
""" Effect of GRU with attentional update gate (AUGRU)
Reference:
- Deep Interest Evolution Network for Click-Through Rate Prediction[J]. arXiv preprint arXiv:1809.03672, 2018.
"""
def __init__(self, input_size, hidden_size, bias=True):
super(AUGRUCellNew, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.weight_ih = nn.Parameter(torch.Tensor(3 * hidden_size, input_size)
)
self.register_parameter('weight_ih', self.weight_ih)
self.weight_hh = nn.Parameter(torch.Tensor(3 * hidden_size,
hidden_size))
self.register_parameter('weight_hh', self.weight_hh)
if bias:
self.bias_ih = nn.Parameter(torch.Tensor(3 * hidden_size))
self.register_parameter('bias_ih', self.bias_ih)
self.bias_hh = nn.Parameter(torch.Tensor(3 * hidden_size))
self.register_parameter('bias_ih', self.bias_hh)
for tensor in [self.bias_ih, self.bias_hh]:
nn.init.zeros_(tensor)
else:
self.register_parameter('bias_ih', None)
self.register_parameter('bias_hh', None)
def forward(self, input_0, input_1, input_2):
primals_1 = self.weight_ih
primals_4 = self.weight_hh
primals_2 = self.bias_ih
primals_3 = input_0
primals_5 = input_1
primals_6 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
dulvqingyunLT/DeepCTR-Torch
|
AUGRUCell
| false
| 10,311
|
[
"Apache-2.0"
] | 0
|
f40cf08f3469aa471f9ca69e44c5de51180341cc
|
https://github.com/dulvqingyunLT/DeepCTR-Torch/tree/f40cf08f3469aa471f9ca69e44c5de51180341cc
|
Norm
|
import torch
import torch.nn as nn
class Norm(nn.Module):
"""
Re-usable class for either batch-norm or layer-norm (by swapping dim)
"""
def __init__(self, n_hidden, eps=1e-08, dim=0):
super(Norm, self).__init__()
self.eps = eps
self.n_hidden = n_hidden
self.a = nn.Parameter(torch.ones(1, n_hidden), requires_grad=True)
self.b = nn.Parameter(torch.zeros(1, n_hidden), requires_grad=True)
self.dim = dim
def forward(self, x):
mean_x = torch.mean(x, dim=self.dim).expand_as(x)
std_x = torch.std(x, dim=self.dim).expand_as(x)
out = (x - mean_x) / (std_x + self.eps)
out = out * self.a.expand_as(x) + self.b.expand_as(x)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_hidden': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mul_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex % 64
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (64 + x4), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (128 + x4), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (192 + x4), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tmp11 = tmp1 - tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp2 - tmp9
tmp14 = tmp13 * tmp13
tmp15 = tmp12 + tmp14
tmp16 = tmp4 - tmp9
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp6 - tmp9
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = 3.0
tmp23 = tmp21 / tmp22
tmp24 = libdevice.sqrt(tmp23)
tmp25 = 1e-08
tmp26 = tmp24 + tmp25
tmp27 = tmp10 / tmp26
tmp29 = tmp27 * tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + x3, tmp31, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4), (4, 1))
assert_size_stride(primals_3, (1, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mul_sub_0[grid(256)](primals_1, primals_2,
primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
del primals_3
return buf0, primals_1
class NormNew(nn.Module):
"""
Re-usable class for either batch-norm or layer-norm (by swapping dim)
"""
def __init__(self, n_hidden, eps=1e-08, dim=0):
super(NormNew, self).__init__()
self.eps = eps
self.n_hidden = n_hidden
self.a = nn.Parameter(torch.ones(1, n_hidden), requires_grad=True)
self.b = nn.Parameter(torch.zeros(1, n_hidden), requires_grad=True)
self.dim = dim
def forward(self, input_0):
primals_2 = self.a
primals_3 = self.b
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
jrbtaylor/recurrent_pytorch
|
Norm
| false
| 10,312
|
[
"Apache-2.0"
] | 0
|
09ee203a86b70a32aec3e97d7daa646caf8fd182
|
https://github.com/jrbtaylor/recurrent_pytorch/tree/09ee203a86b70a32aec3e97d7daa646caf8fd182
|
Adjust_naive
|
from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
def get_conv2d_layer(in_c, out_c, k, s, p=0, dilation=1, groups=1):
return nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=k,
stride=s, padding=p, dilation=dilation, groups=groups)
class Adjust_naive(nn.Module):
def __init__(self, opt):
super().__init__()
self.conv1 = get_conv2d_layer(in_c=2, out_c=32, k=5, s=1, p=2)
self.conv2 = get_conv2d_layer(in_c=32, out_c=32, k=5, s=1, p=2)
self.conv3 = get_conv2d_layer(in_c=32, out_c=32, k=5, s=1, p=2)
self.conv4 = get_conv2d_layer(in_c=32, out_c=1, k=5, s=1, p=2)
self.leaky_relu = nn.LeakyReLU(0.2)
self.relu = nn.ReLU()
def forward(self, l, alpha):
input = torch.cat([l, alpha], dim=1)
x = self.conv1(input)
x = self.conv2(self.leaky_relu(x))
x = self.conv3(self.leaky_relu(x))
x = self.conv4(self.leaky_relu(x))
x = self.relu(x)
return x
def get_inputs():
return [torch.rand([4, 1, 4, 4]), torch.rand([4, 1, 4, 4])]
def get_init_inputs():
return [[], {'opt': _mock_config()}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 2
x0 = xindex % 16
x2 = xindex // 32
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 16 * x2), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 32
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_2(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = 0.0
tmp7 = tmp5 <= tmp6
tl.store(in_out_ptr0 + x0, tmp5, xmask)
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(primals_2, (4, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(primals_3, (32, 2, 5, 5), (50, 25, 5, 1))
assert_size_stride(primals_4, (32,), (1,))
assert_size_stride(primals_5, (32, 32, 5, 5), (800, 25, 5, 1))
assert_size_stride(primals_6, (32,), (1,))
assert_size_stride(primals_7, (32, 32, 5, 5), (800, 25, 5, 1))
assert_size_stride(primals_8, (32,), (1,))
assert_size_stride(primals_9, (1, 32, 5, 5), (800, 25, 5, 1))
assert_size_stride(primals_10, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(128)](primals_1, primals_2, buf0, 128,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 32, 4, 4), (512, 16, 4, 1))
buf2 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
buf3 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.float32
)
triton_poi_fused_convolution_leaky_relu_1[grid(2048)](buf1,
primals_4, buf2, buf3, 2048, XBLOCK=256, num_warps=4, num_stages=1)
del primals_4
buf4 = extern_kernels.convolution(buf3, primals_5, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 32, 4, 4), (512, 16, 4, 1))
buf5 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
buf6 = buf1
del buf1
triton_poi_fused_convolution_leaky_relu_1[grid(2048)](buf4,
primals_6, buf5, buf6, 2048, XBLOCK=256, num_warps=4, num_stages=1)
del primals_6
buf7 = extern_kernels.convolution(buf6, primals_7, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 32, 4, 4), (512, 16, 4, 1))
buf8 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
buf9 = buf4
del buf4
triton_poi_fused_convolution_leaky_relu_1[grid(2048)](buf7,
primals_8, buf8, buf9, 2048, XBLOCK=256, num_warps=4, num_stages=1)
del buf7
del primals_8
buf10 = extern_kernels.convolution(buf9, primals_9, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 1, 4, 4), (16, 16, 4, 1))
buf11 = buf10
del buf10
buf12 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_2[grid(64)](buf11,
primals_10, buf12, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_10
return (buf11, primals_3, primals_5, primals_7, primals_9, buf0, buf2,
buf3, buf5, buf6, buf8, buf9, buf12)
def get_conv2d_layer(in_c, out_c, k, s, p=0, dilation=1, groups=1):
return nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=k,
stride=s, padding=p, dilation=dilation, groups=groups)
class Adjust_naiveNew(nn.Module):
def __init__(self, opt):
super().__init__()
self.conv1 = get_conv2d_layer(in_c=2, out_c=32, k=5, s=1, p=2)
self.conv2 = get_conv2d_layer(in_c=32, out_c=32, k=5, s=1, p=2)
self.conv3 = get_conv2d_layer(in_c=32, out_c=32, k=5, s=1, p=2)
self.conv4 = get_conv2d_layer(in_c=32, out_c=1, k=5, s=1, p=2)
self.leaky_relu = nn.LeakyReLU(0.2)
self.relu = nn.ReLU()
def forward(self, input_0, input_1):
primals_3 = self.conv1.weight
primals_4 = self.conv1.bias
primals_5 = self.conv2.weight
primals_6 = self.conv2.bias
primals_7 = self.conv3.weight
primals_8 = self.conv3.bias
primals_9 = self.conv4.weight
primals_10 = self.conv4.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
|
AndersonYong/URetinex-Net-Retinex-based-Deep-Unfolding-Network-for-Low-light-Image-Enhancem
|
Adjust_naive
| false
| 10,313
|
[
"MIT"
] | 0
|
9d837b8df9c761defb1eca390b3a60aa4a6fbb1a
|
https://github.com/AndersonYong/URetinex-Net-Retinex-based-Deep-Unfolding-Network-for-Low-light-Image-Enhancem/tree/9d837b8df9c761defb1eca390b3a60aa4a6fbb1a
|
SpaceToDepth
|
import torch
from torchvision import datasets as datasets
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data.distributed
class SpaceToDepth(nn.Module):
def __init__(self, block_size=4):
super().__init__()
assert block_size == 4
self.bs = block_size
def forward(self, x):
N, C, H, W = x.size()
x = x.view(N, C, H // self.bs, self.bs, W // self.bs, self.bs)
x = x.permute(0, 3, 5, 1, 2, 4).contiguous()
x = x.view(N, C * self.bs ** 2, H // self.bs, W // self.bs)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torchvision import datasets as datasets
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 1, 1), (64, 16, 4, 1, 1, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64, 4)](arg0_1, buf0, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 64, 1, 1), (64, 1, 1, 1), 0),
class SpaceToDepthNew(nn.Module):
def __init__(self, block_size=4):
super().__init__()
assert block_size == 4
self.bs = block_size
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
jasonnoy/COMP5329
|
SpaceToDepth
| false
| 10,314
|
[
"MIT"
] | 0
|
fc17c80b1ac41d788cc0a92d3a033dbe2f9b8b81
|
https://github.com/jasonnoy/COMP5329/tree/fc17c80b1ac41d788cc0a92d3a033dbe2f9b8b81
|
ContrastiveLoss
|
import torch
import torch.nn.functional as F
class ContrastiveLoss(torch.nn.Module):
"""
Contrastive loss function.
Based on: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
"""
def __init__(self, margin=2):
super(ContrastiveLoss, self).__init__()
self.margin = margin
def forward(self, output1, output2, label):
euclidean_distance = F.pairwise_distance(output1, output2)
loss_contrastive = torch.mean((1 - label) * torch.pow(
euclidean_distance, 2) + label * torch.pow(torch.clamp(self.
margin - euclidean_distance, min=0.0), 2))
return loss_contrastive
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_norm_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 - tmp1
tmp3 = 1e-06
tmp4 = tmp2 + tmp3
tmp5 = tmp4 * tmp4
tmp8 = tmp6 - tmp7
tmp9 = tmp8 + tmp3
tmp10 = tmp9 * tmp9
tmp11 = tmp5 + tmp10
tmp14 = tmp12 - tmp13
tmp15 = tmp14 + tmp3
tmp16 = tmp15 * tmp15
tmp17 = tmp11 + tmp16
tmp20 = tmp18 - tmp19
tmp21 = tmp20 + tmp3
tmp22 = tmp21 * tmp21
tmp23 = tmp17 + tmp22
tmp24 = libdevice.sqrt(tmp23)
tl.store(out_ptr0 + x0, tmp24, xmask)
@triton.jit
def triton_per_fused_add_clamp_mean_mul_pow_rsub_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex
r0 = rindex % 64
tmp0 = tl.load(in_ptr0 + r2, None)
tmp3 = tl.load(in_ptr1 + r0, None, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp3 * tmp3
tmp5 = tmp2 * tmp4
tmp6 = 2.0
tmp7 = tmp6 - tmp3
tmp8 = 0.0
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp10 = tmp9 * tmp9
tmp11 = tmp0 * tmp10
tmp12 = tmp5 + tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 256.0
tmp17 = tmp15 / tmp16
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_norm_sub_0[grid(64)](arg1_1, arg0_1, buf0, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused_add_clamp_mean_mul_pow_rsub_1[grid(1)](buf2,
arg2_1, buf0, 1, 256, num_warps=2, num_stages=1)
del arg2_1
del buf0
return buf2,
class ContrastiveLossNew(torch.nn.Module):
"""
Contrastive loss function.
Based on: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
"""
def __init__(self, margin=2):
super(ContrastiveLossNew, self).__init__()
self.margin = margin
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
kevincao91/SiameseNet_Demo
|
ContrastiveLoss
| false
| 10,315
|
[
"MIT"
] | 0
|
6ec4384159682a8ee93fb110d6fca33de85fa1ba
|
https://github.com/kevincao91/SiameseNet_Demo/tree/6ec4384159682a8ee93fb110d6fca33de85fa1ba
|
Actor
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, max_action):
super(Actor, self).__init__()
self.l1 = nn.Linear(state_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, action_dim)
self.max_action = max_action
def forward(self, state):
a = F.relu(self.l1(state))
a = F.relu(self.l2(a))
return self.max_action * torch.tanh(self.l3(a))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4, 'action_dim': 4, 'max_action': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_mul_tanh_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tmp2 = 4.0
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (256, 256), (256, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (4, 256), (256, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf0
buf7 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf1,
primals_2, buf7, 16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0),
reinterpret_tensor(primals_4, (256, 256), (1, 256), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf2
buf6 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf3,
primals_5, buf6, 16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 256),
(256, 1), 0), reinterpret_tensor(primals_6, (256, 4), (1, 256),
0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_tanh_1[grid(256)](buf4, buf5, 256, XBLOCK=128,
num_warps=4, num_stages=1)
return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 256), (256, 1), 0
), reinterpret_tensor(buf3, (64, 256), (256, 1), 0
), buf4, primals_6, buf6, primals_4, buf7
class ActorNew(nn.Module):
def __init__(self, state_dim, action_dim, max_action):
super(ActorNew, self).__init__()
self.l1 = nn.Linear(state_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, action_dim)
self.max_action = max_action
def forward(self, input_0):
primals_1 = self.l1.weight
primals_2 = self.l1.bias
primals_4 = self.l2.weight
primals_5 = self.l2.bias
primals_6 = self.l3.weight
primals_7 = self.l3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
karush17/gym-pybullet-drones
|
Actor
| false
| 10,316
|
[
"MIT"
] | 0
|
7a7acd4f51dcb1cbea8eb9ef0cfcfc7dcf1c90ba
|
https://github.com/karush17/gym-pybullet-drones/tree/7a7acd4f51dcb1cbea8eb9ef0cfcfc7dcf1c90ba
|
FM
|
import torch
import torch.nn as nn
from sklearn.metrics import *
import torch.onnx
import torch as torch
class FM(nn.Module):
"""Factorization Machine models pairwise (order-2) feature interactions
without linear term and bias.
Input shape
- 3D tensor with shape: ``(batch_size,field_size,embedding_size)``.
Output shape
- 2D tensor with shape: ``(batch_size, 1)``.
References
- [Factorization Machines](https://www.csie.ntu.edu.tw/~b97053/paper/Rendle2010FM.pdf)
"""
def __init__(self):
super(FM, self).__init__()
def forward(self, inputs):
fm_input = inputs
square_of_sum = torch.pow(torch.sum(fm_input, dim=1, keepdim=True), 2)
sum_of_square = torch.sum(fm_input * fm_input, dim=1, keepdim=True)
cross_term = square_of_sum - sum_of_square
cross_term = 0.5 * torch.sum(cross_term, dim=2, keepdim=False)
return cross_term
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from sklearn.metrics import *
import torch.onnx
import torch as torch
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp16 = tl.load(in_ptr0 + (4 + x0 + 64 * x1), xmask)
tmp17 = tl.load(in_ptr0 + (20 + x0 + 64 * x1), xmask)
tmp19 = tl.load(in_ptr0 + (36 + x0 + 64 * x1), xmask)
tmp21 = tl.load(in_ptr0 + (52 + x0 + 64 * x1), xmask)
tmp33 = tl.load(in_ptr0 + (8 + x0 + 64 * x1), xmask)
tmp34 = tl.load(in_ptr0 + (24 + x0 + 64 * x1), xmask)
tmp36 = tl.load(in_ptr0 + (40 + x0 + 64 * x1), xmask)
tmp38 = tl.load(in_ptr0 + (56 + x0 + 64 * x1), xmask)
tmp50 = tl.load(in_ptr0 + (12 + x0 + 64 * x1), xmask)
tmp51 = tl.load(in_ptr0 + (28 + x0 + 64 * x1), xmask)
tmp53 = tl.load(in_ptr0 + (44 + x0 + 64 * x1), xmask)
tmp55 = tl.load(in_ptr0 + (60 + x0 + 64 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp0 * tmp0
tmp9 = tmp1 * tmp1
tmp10 = tmp8 + tmp9
tmp11 = tmp3 * tmp3
tmp12 = tmp10 + tmp11
tmp13 = tmp5 * tmp5
tmp14 = tmp12 + tmp13
tmp15 = tmp7 - tmp14
tmp18 = tmp16 + tmp17
tmp20 = tmp18 + tmp19
tmp22 = tmp20 + tmp21
tmp23 = tmp22 * tmp22
tmp24 = tmp16 * tmp16
tmp25 = tmp17 * tmp17
tmp26 = tmp24 + tmp25
tmp27 = tmp19 * tmp19
tmp28 = tmp26 + tmp27
tmp29 = tmp21 * tmp21
tmp30 = tmp28 + tmp29
tmp31 = tmp23 - tmp30
tmp32 = tmp15 + tmp31
tmp35 = tmp33 + tmp34
tmp37 = tmp35 + tmp36
tmp39 = tmp37 + tmp38
tmp40 = tmp39 * tmp39
tmp41 = tmp33 * tmp33
tmp42 = tmp34 * tmp34
tmp43 = tmp41 + tmp42
tmp44 = tmp36 * tmp36
tmp45 = tmp43 + tmp44
tmp46 = tmp38 * tmp38
tmp47 = tmp45 + tmp46
tmp48 = tmp40 - tmp47
tmp49 = tmp32 + tmp48
tmp52 = tmp50 + tmp51
tmp54 = tmp52 + tmp53
tmp56 = tmp54 + tmp55
tmp57 = tmp56 * tmp56
tmp58 = tmp50 * tmp50
tmp59 = tmp51 * tmp51
tmp60 = tmp58 + tmp59
tmp61 = tmp53 * tmp53
tmp62 = tmp60 + tmp61
tmp63 = tmp55 * tmp55
tmp64 = tmp62 + tmp63
tmp65 = tmp57 - tmp64
tmp66 = tmp49 + tmp65
tmp67 = 0.5
tmp68 = tmp66 * tmp67
tl.store(in_out_ptr0 + x2, tmp68, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 1, 4), (4, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_mul_pow_sub_sum_0[grid(16)](buf1, arg0_1, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del arg0_1
return buf1,
class FMNew(nn.Module):
"""Factorization Machine models pairwise (order-2) feature interactions
without linear term and bias.
Input shape
- 3D tensor with shape: ``(batch_size,field_size,embedding_size)``.
Output shape
- 2D tensor with shape: ``(batch_size, 1)``.
References
- [Factorization Machines](https://www.csie.ntu.edu.tw/~b97053/paper/Rendle2010FM.pdf)
"""
def __init__(self):
super(FMNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
dulvqingyunLT/DeepCTR-Torch
|
FM
| false
| 10,317
|
[
"Apache-2.0"
] | 0
|
f40cf08f3469aa471f9ca69e44c5de51180341cc
|
https://github.com/dulvqingyunLT/DeepCTR-Torch/tree/f40cf08f3469aa471f9ca69e44c5de51180341cc
|
CapsuleConvLayer
|
import torch
import torch.nn as nn
class CapsuleConvLayer(nn.Module):
def __init__(self, in_channels, out_channels):
super(CapsuleConvLayer, self).__init__()
self.conv0 = nn.Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=9, stride=1, bias=True)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
return self.relu(self.conv0(x))
def get_inputs():
return [torch.rand([4, 4, 64, 64])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 50176
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x1 = xindex // 3136 % 4
x0 = xindex % 3136
x3 = xindex // 3136
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x0 + 3200 * x3), tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 9, 9), (324, 81, 9, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 56, 56), (12544, 3136, 56, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 56, 56), (12800, 3200, 56, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_convolution_relu_threshold_backward_0[grid(50176)](
buf1, primals_2, buf2, 50176, XBLOCK=256, num_warps=4, num_stages=1
)
del primals_2
return buf1, primals_1, primals_3, buf2
class CapsuleConvLayerNew(nn.Module):
def __init__(self, in_channels, out_channels):
super(CapsuleConvLayerNew, self).__init__()
self.conv0 = nn.Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=9, stride=1, bias=True)
self.relu = nn.ReLU(inplace=True)
def forward(self, input_0):
primals_1 = self.conv0.weight
primals_2 = self.conv0.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
juingzhou/Base-on-PyTorch-implementation-CapsuleNet
|
CapsuleConvLayer
| false
| 10,318
|
[
"MIT"
] | 0
|
6b030bf93b258d9d6496379bcbe4b94542366817
|
https://github.com/juingzhou/Base-on-PyTorch-implementation-CapsuleNet/tree/6b030bf93b258d9d6496379bcbe4b94542366817
|
ConvUnit
|
import torch
import torch.nn as nn
class ConvUnit(nn.Module):
def __init__(self, in_channels):
super(ConvUnit, self).__init__()
self.conv0 = nn.Conv2d(in_channels=in_channels, out_channels=32,
kernel_size=9, stride=2, bias=True)
def forward(self, x):
return self.conv0(x)
def get_inputs():
return [torch.rand([4, 4, 64, 64])]
def get_init_inputs():
return [[], {'in_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 784 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (32, 4, 9, 9), (324, 81, 9, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2,
2), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 28, 28), (25088, 784, 28, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(100352)](buf1, primals_2,
100352, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
return buf1, primals_1, primals_3
class ConvUnitNew(nn.Module):
def __init__(self, in_channels):
super(ConvUnitNew, self).__init__()
self.conv0 = nn.Conv2d(in_channels=in_channels, out_channels=32,
kernel_size=9, stride=2, bias=True)
def forward(self, input_0):
primals_1 = self.conv0.weight
primals_2 = self.conv0.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
juingzhou/Base-on-PyTorch-implementation-CapsuleNet
|
ConvUnit
| false
| 10,319
|
[
"MIT"
] | 0
|
6b030bf93b258d9d6496379bcbe4b94542366817
|
https://github.com/juingzhou/Base-on-PyTorch-implementation-CapsuleNet/tree/6b030bf93b258d9d6496379bcbe4b94542366817
|
KLLoss
|
import torch
from torch import nn
import torch.nn.functional as F
import torch.utils.checkpoint
class KLLoss(nn.Module):
"""Loss that uses a 'hinge' on the lower bound.
This means that for samples with a label value smaller than the threshold, the loss is zero if the prediction is
also smaller than that threshold.
args:
error_matric: What base loss to use (MSE by default).
threshold: Threshold to use for the hinge.
clip: Clip the loss if it is above this value.
"""
def __init__(self, error_metric=nn.KLDivLoss(size_average=True, reduce=
True)):
super().__init__()
None
self.error_metric = error_metric
def forward(self, prediction, label):
batch_size = prediction.shape[0]
probs1 = F.log_softmax(prediction, 1)
probs2 = F.softmax(label * 10, 1)
loss = self.error_metric(probs1, probs2) * batch_size
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
import torch.utils.checkpoint
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 10.0
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x3, tmp17, xmask)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_per_fused__log_softmax__softmax_mean_mul_sub_xlogy_2(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = rindex // 64
tmp0 = tl.load(in_ptr0 + r3, None)
tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last'
)
tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr1 + r3, None)
tmp18 = tl.load(in_ptr1 + (r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr1 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr1 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp26 = tl.load(in_ptr1 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = libdevice.isnan(tmp8).to(tl.int1)
tmp10 = 0.0
tmp11 = tmp8 == tmp10
tmp12 = tl_math.log(tmp8)
tmp13 = tmp8 * tmp12
tmp14 = tl.where(tmp11, tmp10, tmp13)
tmp15 = float('nan')
tmp16 = tl.where(tmp9, tmp15, tmp14)
tmp19 = tl_math.exp(tmp18)
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tl_math.log(tmp28)
tmp30 = tmp17 - tmp29
tmp31 = tmp8 * tmp30
tmp32 = tmp16 - tmp31
tmp33 = tl.broadcast_to(tmp32, [RBLOCK])
tmp35 = triton_helpers.promote_to_tensor(tl.sum(tmp33, 0))
tmp36 = 256.0
tmp37 = tmp35 / tmp36
tmp38 = 4.0
tmp39 = tmp37 * tmp38
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp39, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__log_softmax_1[grid(256)](arg0_1, buf2, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3
del buf3
triton_per_fused__log_softmax__softmax_mean_mul_sub_xlogy_2[grid(1)](
buf4, buf0, buf2, 1, 256, num_warps=2, num_stages=1)
del buf0
del buf2
return buf4,
class KLLossNew(nn.Module):
"""Loss that uses a 'hinge' on the lower bound.
This means that for samples with a label value smaller than the threshold, the loss is zero if the prediction is
also smaller than that threshold.
args:
error_matric: What base loss to use (MSE by default).
threshold: Threshold to use for the hinge.
clip: Clip the loss if it is above this value.
"""
def __init__(self, error_metric=nn.KLDivLoss(size_average=True, reduce=
True)):
super().__init__()
None
self.error_metric = error_metric
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
jiazheng-xing/Swin_Multimodal
|
KLLoss
| false
| 10,320
|
[
"MIT"
] | 0
|
7bc41977fe7d8d4f0091852c63a6a32a0fada0fb
|
https://github.com/jiazheng-xing/Swin_Multimodal/tree/7bc41977fe7d8d4f0091852c63a6a32a0fada0fb
|
DeepHeadModule
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from math import sqrt as sqrt
class DeepHeadModule(nn.Module):
def __init__(self, input_channels, output_channels):
super(DeepHeadModule, self).__init__()
self._input_channels = input_channels
self._output_channels = output_channels
self._mid_channels = min(self._input_channels, 256)
self.conv1 = nn.Conv2d(self._input_channels, self._mid_channels,
kernel_size=3, dilation=1, stride=1, padding=1)
self.conv2 = nn.Conv2d(self._mid_channels, self._mid_channels,
kernel_size=3, dilation=1, stride=1, padding=1)
self.conv3 = nn.Conv2d(self._mid_channels, self._mid_channels,
kernel_size=3, dilation=1, stride=1, padding=1)
self.conv4 = nn.Conv2d(self._mid_channels, self._output_channels,
kernel_size=1, dilation=1, stride=1, padding=0)
def forward(self, x):
return self.conv4(F.relu(self.conv3(F.relu(self.conv2(F.relu(self.
conv1(x), inplace=True)), inplace=True)), inplace=True))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_channels': 4, 'output_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from math import sqrt as sqrt
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(256)](buf1, primals_2, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_0[grid(256)](buf3, primals_5, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_0[grid(256)](buf5, primals_7, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_1[grid(256)](buf7, primals_9, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
return (buf7, primals_1, primals_3, primals_4, primals_6, primals_8,
buf1, buf3, buf5)
class DeepHeadModuleNew(nn.Module):
def __init__(self, input_channels, output_channels):
super(DeepHeadModuleNew, self).__init__()
self._input_channels = input_channels
self._output_channels = output_channels
self._mid_channels = min(self._input_channels, 256)
self.conv1 = nn.Conv2d(self._input_channels, self._mid_channels,
kernel_size=3, dilation=1, stride=1, padding=1)
self.conv2 = nn.Conv2d(self._mid_channels, self._mid_channels,
kernel_size=3, dilation=1, stride=1, padding=1)
self.conv3 = nn.Conv2d(self._mid_channels, self._mid_channels,
kernel_size=3, dilation=1, stride=1, padding=1)
self.conv4 = nn.Conv2d(self._mid_channels, self._output_channels,
kernel_size=1, dilation=1, stride=1, padding=0)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
juanmed/FaceDetection-DSFD
|
DeepHeadModule
| false
| 10,321
|
[
"Apache-2.0"
] | 0
|
23650ca492444f9f052ca9b8db8b068a9be5bc68
|
https://github.com/juanmed/FaceDetection-DSFD/tree/23650ca492444f9f052ca9b8db8b068a9be5bc68
|
CNN
|
import torch
from torch import nn
import torch.nn.functional as F
class CNN(torch.nn.Module):
"""Basic CNN architecture."""
def __init__(self, in_channels=1):
super(CNN, self).__init__()
self.conv1 = nn.Conv2d(in_channels, 64, 8, 1)
self.conv2 = nn.Conv2d(64, 128, 6, 2)
self.conv3 = nn.Conv2d(128, 128, 5, 2)
self.fc = nn.Linear(128 * 3 * 3, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = x.view(-1, 128 * 3 * 3)
x = self.fc(x)
return x
def get_inputs():
return [torch.rand([4, 1, 32, 32])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 36 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 64 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 128 * x2 + 3200 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 256
xnumel = 625
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 625 * y3), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (y0 + 64 * x2 + 40000 * y1), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_4(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.
constexpr):
ynumel = 512
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 128
y1 = yindex // 128
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 128 * x2 + 1152 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2 + 9 * y3), tmp4, xmask & ymask)
tl.store(out_ptr1 + (y0 + 128 * x2 + 1152 * y1), tmp6, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (64, 1, 8, 8), (64, 64, 8, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 1, 32, 32), (1024, 1024, 32, 1))
assert_size_stride(primals_4, (128, 64, 6, 6), (2304, 36, 6, 1))
assert_size_stride(primals_5, (128,), (1,))
assert_size_stride(primals_6, (128, 128, 5, 5), (3200, 25, 5, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (10, 1152), (1152, 1))
assert_size_stride(primals_9, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((128, 64, 6, 6), (2304, 1, 384, 64),
torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(8192, 36)](primals_4, buf0, 8192, 36,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_4
buf1 = empty_strided_cuda((128, 128, 5, 5), (3200, 1, 640, 128),
torch.float32)
triton_poi_fused_1[grid(16384, 25)](primals_6, buf1, 16384, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_6
buf2 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 25, 25), (40000, 625, 25, 1))
buf3 = empty_strided_cuda((4, 64, 25, 25), (40000, 1, 1600, 64),
torch.float32)
triton_poi_fused_convolution_relu_2[grid(256, 625)](buf2, primals_2,
buf3, 256, 625, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del buf2
del primals_2
buf4 = extern_kernels.convolution(buf3, buf0, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 128, 10, 10), (12800, 1, 1280, 128))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_3[grid(51200)](buf5, primals_5,
51200, XBLOCK=512, num_warps=4, num_stages=1)
del primals_5
buf6 = extern_kernels.convolution(buf5, buf1, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 128, 3, 3), (1152, 1, 384, 128))
buf7 = empty_strided_cuda((4, 128, 3, 3), (1152, 9, 3, 1), torch.
float32)
buf9 = empty_strided_cuda((4, 128, 3, 3), (1152, 1, 384, 128),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_4[grid(512, 9)](
buf6, primals_7, buf7, buf9, 512, 9, XBLOCK=1, YBLOCK=256,
num_warps=4, num_stages=1)
del buf6
del primals_7
buf8 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf7, (4, 1152),
(1152, 1), 0), reinterpret_tensor(primals_8, (1152, 10), (1,
1152), 0), alpha=1, beta=1, out=buf8)
del primals_9
return (buf8, primals_1, primals_3, buf0, buf1, buf3, buf5,
reinterpret_tensor(buf7, (4, 1152), (1152, 1), 0), primals_8, buf9)
class CNNNew(torch.nn.Module):
"""Basic CNN architecture."""
def __init__(self, in_channels=1):
super(CNNNew, self).__init__()
self.conv1 = nn.Conv2d(in_channels, 64, 8, 1)
self.conv2 = nn.Conv2d(64, 128, 6, 2)
self.conv3 = nn.Conv2d(128, 128, 5, 2)
self.fc = nn.Linear(128 * 3 * 3, 10)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.fc.weight
primals_9 = self.fc.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
jubueche/cleverhans
|
CNN
| false
| 10,322
|
[
"MIT"
] | 0
|
2e45b75ccc7b04ffec27fd9e6079f00451586266
|
https://github.com/jubueche/cleverhans/tree/2e45b75ccc7b04ffec27fd9e6079f00451586266
|
AsymmetricLossOptimized
|
import torch
from torchvision import datasets as datasets
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data.distributed
class AsymmetricLossOptimized(nn.Module):
""" Notice - optimized version, minimizes memory allocation and gpu uploading,
favors inplace operations"""
def __init__(self, gamma_neg=4, gamma_pos=1, clip=0.05, eps=1e-08,
disable_torch_grad_focal_loss=False):
super(AsymmetricLossOptimized, self).__init__()
self.gamma_neg = gamma_neg
self.gamma_pos = gamma_pos
self.clip = clip
self.disable_torch_grad_focal_loss = disable_torch_grad_focal_loss
self.eps = eps
(self.targets) = (self.anti_targets) = (self.xs_pos) = (self.xs_neg
) = (self.asymmetric_w) = (self.loss) = None
def forward(self, x, y):
""""
Parameters
----------
x: input logits
y: targets (multi-label binarized vector)
"""
self.targets = y
self.anti_targets = 1 - y
self.xs_pos = torch.sigmoid(x)
self.xs_neg = 1.0 - self.xs_pos
if self.clip is not None and self.clip > 0:
self.xs_neg.add_(self.clip).clamp_(max=1)
self.loss = self.targets * torch.log(self.xs_pos.clamp(min=self.eps))
self.loss.add_(self.anti_targets * torch.log(self.xs_neg.clamp(min=
self.eps)))
if self.gamma_neg > 0 or self.gamma_pos > 0:
if self.disable_torch_grad_focal_loss:
torch._C.set_grad_enabled(False)
self.xs_pos = self.xs_pos * self.targets
self.xs_neg = self.xs_neg * self.anti_targets
self.asymmetric_w = torch.pow(1 - self.xs_pos - self.xs_neg,
self.gamma_pos * self.targets + self.gamma_neg * self.
anti_targets)
if self.disable_torch_grad_focal_loss:
torch._C.set_grad_enabled(True)
self.loss *= self.asymmetric_w
return -self.loss.sum()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torchvision import datasets as datasets
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_clamp_log_mul_neg_pow_rsub_sigmoid_sub_sum_0(
in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, out_ptr3,
out_ptr4, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tl.sigmoid(tmp3)
tmp5 = tmp4 * tmp0
tmp6 = tmp1 - tmp4
tmp7 = 0.05
tmp8 = tmp6 + tmp7
tmp9 = triton_helpers.minimum(tmp8, tmp1)
tmp10 = tmp9 * tmp2
tmp11 = tmp1 - tmp5
tmp12 = tmp11 - tmp10
tmp13 = tmp0 * tmp1
tmp14 = 4.0
tmp15 = tmp2 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = libdevice.pow(tmp12, tmp16)
tmp18 = 1e-08
tmp19 = triton_helpers.maximum(tmp4, tmp18)
tmp20 = tl_math.log(tmp19)
tmp21 = tmp0 * tmp20
tmp22 = triton_helpers.maximum(tmp9, tmp18)
tmp23 = tl_math.log(tmp22)
tmp24 = tmp2 * tmp23
tmp25 = tmp21 + tmp24
tmp26 = tmp25 * tmp17
tmp27 = tl.broadcast_to(tmp26, [RBLOCK])
tmp29 = triton_helpers.promote_to_tensor(tl.sum(tmp27, 0))
tmp30 = -tmp29
tl.store(out_ptr0 + tl.broadcast_to(r0, [RBLOCK]), tmp2, None)
tl.store(out_ptr1 + tl.broadcast_to(r0, [RBLOCK]), tmp5, None)
tl.store(out_ptr2 + tl.broadcast_to(r0, [RBLOCK]), tmp10, None)
tl.store(out_ptr3 + tl.broadcast_to(r0, [RBLOCK]), tmp17, None)
tl.store(out_ptr4 + tl.broadcast_to(r0, [RBLOCK]), tmp26, None)
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp30, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((), (), torch.float32)
buf6 = buf5
del buf5
get_raw_stream(0)
triton_per_fused_add_clamp_log_mul_neg_pow_rsub_sigmoid_sub_sum_0[grid
(1)](buf6, arg0_1, arg1_1, buf0, buf1, buf2, buf3, buf4, 1, 256,
num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf6, buf3, buf4, buf2, buf1, buf0
class AsymmetricLossOptimizedNew(nn.Module):
""" Notice - optimized version, minimizes memory allocation and gpu uploading,
favors inplace operations"""
def __init__(self, gamma_neg=4, gamma_pos=1, clip=0.05, eps=1e-08,
disable_torch_grad_focal_loss=False):
super(AsymmetricLossOptimizedNew, self).__init__()
self.gamma_neg = gamma_neg
self.gamma_pos = gamma_pos
self.clip = clip
self.disable_torch_grad_focal_loss = disable_torch_grad_focal_loss
self.eps = eps
(self.targets) = (self.anti_targets) = (self.xs_pos) = (self.xs_neg
) = (self.asymmetric_w) = (self.loss) = None
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
jasonnoy/COMP5329
|
AsymmetricLossOptimized
| false
| 10,323
|
[
"MIT"
] | 0
|
fc17c80b1ac41d788cc0a92d3a033dbe2f9b8b81
|
https://github.com/jasonnoy/COMP5329/tree/fc17c80b1ac41d788cc0a92d3a033dbe2f9b8b81
|
SEModule
|
import torch
from torchvision import datasets as datasets
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data.distributed
class FastAvgPool2d(nn.Module):
def __init__(self, flatten=False):
super(FastAvgPool2d, self).__init__()
self.flatten = flatten
def forward(self, x):
if self.flatten:
in_size = x.size()
return x.view((in_size[0], in_size[1], -1)).mean(dim=2)
else:
return x.view(x.size(0), x.size(1), -1).mean(-1).view(x.size(0),
x.size(1), 1, 1)
class SEModule(nn.Module):
def __init__(self, channels, reduction_channels, inplace=True):
super(SEModule, self).__init__()
self.avg_pool = FastAvgPool2d()
self.fc1 = nn.Conv2d(channels, reduction_channels, kernel_size=1,
padding=0, bias=True)
self.relu = nn.ReLU(inplace=inplace)
self.fc2 = nn.Conv2d(reduction_channels, channels, kernel_size=1,
padding=0, bias=True)
self.activation = nn.Sigmoid()
def forward(self, x):
x_se = self.avg_pool(x)
x_se2 = self.fc1(x_se)
x_se2 = self.relu(x_se2)
x_se = self.fc2(x_se2)
x_se = self.activation(x_se)
return x * x_se
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4, 'reduction_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torchvision import datasets as datasets
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_3(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (4, 4, 1,
1), (4, 1, 0, 0), 0), primals_2, stride=(1, 1), padding=(0, 0),
dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(16)](buf3, primals_3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 1, 1), (4, 1, 1, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_2[grid(16)](buf5, primals_5, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sigmoid_3[grid(256)](primals_1, buf5, buf6,
256, XBLOCK=256, num_warps=4, num_stages=1)
return buf6, primals_1, primals_2, primals_4, reinterpret_tensor(buf1,
(4, 4, 1, 1), (4, 1, 1, 1), 0), buf3, buf5
class FastAvgPool2d(nn.Module):
def __init__(self, flatten=False):
super(FastAvgPool2d, self).__init__()
self.flatten = flatten
def forward(self, x):
if self.flatten:
in_size = x.size()
return x.view((in_size[0], in_size[1], -1)).mean(dim=2)
else:
return x.view(x.size(0), x.size(1), -1).mean(-1).view(x.size(0),
x.size(1), 1, 1)
class SEModuleNew(nn.Module):
def __init__(self, channels, reduction_channels, inplace=True):
super(SEModuleNew, self).__init__()
self.avg_pool = FastAvgPool2d()
self.fc1 = nn.Conv2d(channels, reduction_channels, kernel_size=1,
padding=0, bias=True)
self.relu = nn.ReLU(inplace=inplace)
self.fc2 = nn.Conv2d(reduction_channels, channels, kernel_size=1,
padding=0, bias=True)
self.activation = nn.Sigmoid()
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
jasonnoy/COMP5329
|
SEModule
| false
| 10,324
|
[
"MIT"
] | 0
|
fc17c80b1ac41d788cc0a92d3a033dbe2f9b8b81
|
https://github.com/jasonnoy/COMP5329/tree/fc17c80b1ac41d788cc0a92d3a033dbe2f9b8b81
|
StdConv2d
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class StdConv2d(nn.Conv2d):
def forward(self, x):
w = self.weight
v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False)
w = (w - m) / torch.sqrt(v + 1e-10)
return F.conv2d(x, w, self.bias, self.stride, self.padding, self.
dilation, self.groups)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_0(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 64.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-10
tmp20 = tmp18 + tmp19
tmp21 = libdevice.sqrt(tmp20)
tmp22 = tmp0 - tmp10
tmp23 = tmp22 / tmp21
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp21, xmask)
tl.store(out_ptr1 + (r1 + 64 * x0), tmp23, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf3 = reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf1
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_add_div_sqrt_sub_var_mean_0[grid(4)](buf3,
primals_1, buf4, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
buf5 = extern_kernels.convolution(primals_3, buf4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 1, 1), (4, 1, 1, 1))
buf6 = buf5
del buf5
triton_poi_fused_convolution_1[grid(16)](buf6, primals_2, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
return buf6, primals_1, primals_3, buf3, buf4
class StdConv2dNew(nn.Conv2d):
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
kayvane1/BiT-Tobacco-800
|
StdConv2d
| false
| 10,325
|
[
"Apache-2.0"
] | 0
|
fd937cc3f8fc1d5e45744defd82d112c10281433
|
https://github.com/kayvane1/BiT-Tobacco-800/tree/fd937cc3f8fc1d5e45744defd82d112c10281433
|
RNN
|
import torch
import torch.nn as nn
class Norm(nn.Module):
"""
Re-usable class for either batch-norm or layer-norm (by swapping dim)
"""
def __init__(self, n_hidden, eps=1e-08, dim=0):
super(Norm, self).__init__()
self.eps = eps
self.n_hidden = n_hidden
self.a = nn.Parameter(torch.ones(1, n_hidden), requires_grad=True)
self.b = nn.Parameter(torch.zeros(1, n_hidden), requires_grad=True)
self.dim = dim
def forward(self, x):
mean_x = torch.mean(x, dim=self.dim).expand_as(x)
std_x = torch.std(x, dim=self.dim).expand_as(x)
out = (x - mean_x) / (std_x + self.eps)
out = out * self.a.expand_as(x) + self.b.expand_as(x)
return out
class LayerNorm(Norm):
def __init__(self, n_hidden, eps=1e-08):
super(LayerNorm, self).__init__(n_hidden, eps, dim=1)
class RNN(nn.Module):
def __init__(self, n_in, n_hidden, n_out, layer_norm=False):
super(RNN, self).__init__()
self.n_hidden = n_hidden
self.i2h = nn.Linear(n_in + n_hidden, n_hidden)
self.dropout = nn.Dropout()
self.h2o = nn.Linear(n_hidden, n_out)
self.softmax = nn.LogSoftmax()
self.layer_norm = layer_norm
self.aux_loss = 0
if layer_norm:
self.normh = LayerNorm(n_hidden)
self.activation = nn.ReLU()
else:
self.activation = nn.Sigmoid()
def forward(self, input, hidden):
combined = torch.cat((input, hidden), 1)
hidden = self.i2h(combined)
hidden = self.activation(hidden)
if self.layer_norm:
hidden = self.normh(hidden)
output = self.h2o(self.dropout(hidden))
output = self.softmax(output)
return output, hidden
def init_hidden(self, batch_size=1):
return nn.Parameter(torch.zeros(1, self.n_hidden), requires_grad=True
).repeat(batch_size, 1)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'n_in': 4, 'n_hidden': 4, 'n_out': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8
), 0), out=buf1)
del primals_3
buf2 = buf1
del buf1
triton_poi_fused_sigmoid_1[grid(16)](buf2, primals_4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, buf2, reinterpret_tensor(primals_5,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_6
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__log_softmax_2[grid(16)](buf3, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf5 = buf3
del buf3
triton_poi_fused__log_softmax_3[grid(16)](buf4, buf5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf4
return buf5, buf2, buf0, buf2, buf5, primals_5
class Norm(nn.Module):
"""
Re-usable class for either batch-norm or layer-norm (by swapping dim)
"""
def __init__(self, n_hidden, eps=1e-08, dim=0):
super(Norm, self).__init__()
self.eps = eps
self.n_hidden = n_hidden
self.a = nn.Parameter(torch.ones(1, n_hidden), requires_grad=True)
self.b = nn.Parameter(torch.zeros(1, n_hidden), requires_grad=True)
self.dim = dim
def forward(self, x):
mean_x = torch.mean(x, dim=self.dim).expand_as(x)
std_x = torch.std(x, dim=self.dim).expand_as(x)
out = (x - mean_x) / (std_x + self.eps)
out = out * self.a.expand_as(x) + self.b.expand_as(x)
return out
class LayerNorm(Norm):
def __init__(self, n_hidden, eps=1e-08):
super(LayerNorm, self).__init__(n_hidden, eps, dim=1)
class RNNNew(nn.Module):
def __init__(self, n_in, n_hidden, n_out, layer_norm=False):
super(RNNNew, self).__init__()
self.n_hidden = n_hidden
self.i2h = nn.Linear(n_in + n_hidden, n_hidden)
self.dropout = nn.Dropout()
self.h2o = nn.Linear(n_hidden, n_out)
self.softmax = nn.LogSoftmax()
self.layer_norm = layer_norm
self.aux_loss = 0
if layer_norm:
self.normh = LayerNorm(n_hidden)
self.activation = nn.ReLU()
else:
self.activation = nn.Sigmoid()
def init_hidden(self, batch_size=1):
return nn.Parameter(torch.zeros(1, self.n_hidden), requires_grad=True
).repeat(batch_size, 1)
def forward(self, input_0, input_1):
primals_3 = self.i2h.weight
primals_4 = self.i2h.bias
primals_1 = self.h2o.weight
primals_6 = self.h2o.bias
primals_2 = input_0
primals_5 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0], output[1]
|
jrbtaylor/recurrent_pytorch
|
RNN
| false
| 10,326
|
[
"Apache-2.0"
] | 0
|
09ee203a86b70a32aec3e97d7daa646caf8fd182
|
https://github.com/jrbtaylor/recurrent_pytorch/tree/09ee203a86b70a32aec3e97d7daa646caf8fd182
|
BinaryLogisticRegressionLoss
|
import torch
import torch.nn as nn
def binary_logistic_regression_loss(reg_score, label, threshold=0.5,
ratio_range=(1.05, 21), eps=1e-05):
"""Binary Logistic Regression Loss."""
label = label.view(-1)
reg_score = reg_score.contiguous().view(-1)
pmask = (label > threshold).float()
num_positive = max(torch.sum(pmask), 1)
num_entries = len(label)
ratio = num_entries / num_positive
ratio = min(max(ratio, ratio_range[0]), ratio_range[1])
coef_0 = 0.5 * ratio / (ratio - 1)
coef_1 = 0.5 * ratio
loss = coef_1 * pmask * torch.log(reg_score + eps) + coef_0 * (1.0 - pmask
) * torch.log(1.0 - reg_score + eps)
loss = -torch.mean(loss)
return loss
class BinaryLogisticRegressionLoss(nn.Module):
"""Binary Logistic Regression Loss.
It will calculate binary logistic regression loss given reg_score and
label.
"""
def forward(self, reg_score, label, threshold=0.5, ratio_range=(1.05,
21), eps=1e-05):
"""Calculate Binary Logistic Regression Loss.
Args:
reg_score (torch.Tensor): Predicted score by model.
label (torch.Tensor): Groundtruth labels.
threshold (float): Threshold for positive instances.
Default: 0.5.
ratio_range (tuple): Lower bound and upper bound for ratio.
Default: (1.05, 21)
eps (float): Epsilon for small value. Default: 1e-5.
Returns:
torch.Tensor: Returned binary logistic loss.
"""
return binary_logistic_regression_loss(reg_score, label, threshold,
ratio_range, eps)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused__to_copy_add_clamp_div_gt_log_mean_mul_neg_reciprocal_rsub_sub_sum_0(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp19 = tl.load(in_ptr1 + r0, None)
tmp1 = 0.5
tmp2 = tmp0 > tmp1
tmp3 = tmp2.to(tl.float32)
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 1.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tl.full([1], 1, tl.int32)
tmp10 = tmp9 / tmp8
tmp11 = 256.0
tmp12 = tmp10 * tmp11
tmp13 = 1.05
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = 21.0
tmp16 = triton_helpers.minimum(tmp14, tmp15)
tmp17 = tmp16 * tmp1
tmp18 = tmp17 * tmp3
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = tl_math.log(tmp21)
tmp23 = tmp18 * tmp22
tmp24 = tmp16 - tmp7
tmp25 = tmp17 / tmp24
tmp26 = tmp7 - tmp3
tmp27 = tmp25 * tmp26
tmp28 = tmp7 - tmp19
tmp29 = tmp28 + tmp20
tmp30 = tl_math.log(tmp29)
tmp31 = tmp27 * tmp30
tmp32 = tmp23 + tmp31
tmp33 = tl.broadcast_to(tmp32, [RBLOCK])
tmp35 = triton_helpers.promote_to_tensor(tl.sum(tmp33, 0))
tmp36 = tmp35 / tmp11
tmp37 = -tmp36
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp37, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
get_raw_stream(0)
triton_per_fused__to_copy_add_clamp_div_gt_log_mean_mul_neg_reciprocal_rsub_sub_sum_0[
grid(1)](buf3, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf3,
def binary_logistic_regression_loss(reg_score, label, threshold=0.5,
ratio_range=(1.05, 21), eps=1e-05):
"""Binary Logistic Regression Loss."""
label = label.view(-1)
reg_score = reg_score.contiguous().view(-1)
pmask = (label > threshold).float()
num_positive = max(torch.sum(pmask), 1)
num_entries = len(label)
ratio = num_entries / num_positive
ratio = min(max(ratio, ratio_range[0]), ratio_range[1])
coef_0 = 0.5 * ratio / (ratio - 1)
coef_1 = 0.5 * ratio
loss = coef_1 * pmask * torch.log(reg_score + eps) + coef_0 * (1.0 - pmask
) * torch.log(1.0 - reg_score + eps)
loss = -torch.mean(loss)
return loss
class BinaryLogisticRegressionLossNew(nn.Module):
"""Binary Logistic Regression Loss.
It will calculate binary logistic regression loss given reg_score and
label.
"""
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
giahaowjx/mmaction2
|
BinaryLogisticRegressionLoss
| false
| 10,327
|
[
"Apache-2.0"
] | 0
|
4f95e9b91354acdcae768ce94e01d3821bba0154
|
https://github.com/giahaowjx/mmaction2/tree/4f95e9b91354acdcae768ce94e01d3821bba0154
|
FEM
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from math import sqrt as sqrt
class FEM(nn.Module):
def __init__(self, channel_size):
super(FEM, self).__init__()
self.cs = channel_size
self.cpm1 = nn.Conv2d(self.cs, 256, kernel_size=3, dilation=1,
stride=1, padding=1)
self.cpm2 = nn.Conv2d(self.cs, 256, kernel_size=3, dilation=2,
stride=1, padding=2)
self.cpm3 = nn.Conv2d(256, 128, kernel_size=3, dilation=1, stride=1,
padding=1)
self.cpm4 = nn.Conv2d(256, 128, kernel_size=3, dilation=2, stride=1,
padding=2)
self.cpm5 = nn.Conv2d(128, 128, kernel_size=3, dilation=1, stride=1,
padding=1)
def forward(self, x):
x1_1 = F.relu(self.cpm1(x), inplace=True)
x1_2 = F.relu(self.cpm2(x), inplace=True)
x2_1 = F.relu(self.cpm3(x1_2), inplace=True)
x2_2 = F.relu(self.cpm4(x1_2), inplace=True)
x3_1 = F.relu(self.cpm5(x2_2), inplace=True)
return torch.cat((x1_1, x2_1, x3_1), 1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from math import sqrt as sqrt
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 256
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 128
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 16 % 512
x0 = xindex % 16
x2 = xindex // 8192
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 256, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 4096 * x2), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tmp13 = tl.full([1], 384, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = tmp12 & tmp14
tmp16 = tl.load(in_ptr2 + (x0 + 16 * (-256 + x1) + 2048 * x2), tmp15,
other=0.0)
tmp17 = tl.load(in_ptr3 + (-256 + x1), tmp15, eviction_policy=
'evict_last', other=0.0)
tmp18 = tmp16 + tmp17
tmp19 = triton_helpers.maximum(tmp8, tmp18)
tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype)
tmp21 = tl.where(tmp15, tmp19, tmp20)
tmp22 = tmp0 >= tmp13
tl.full([1], 512, tl.int64)
tmp25 = tl.load(in_ptr4 + (x0 + 16 * (-384 + x1) + 2048 * x2), tmp22,
other=0.0)
tmp26 = tl.load(in_ptr5 + (-384 + x1), tmp22, eviction_policy=
'evict_last', other=0.0)
tmp27 = tmp25 + tmp26
tmp28 = triton_helpers.maximum(tmp8, tmp27)
tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp30 = tl.where(tmp22, tmp28, tmp29)
tmp31 = tl.where(tmp15, tmp21, tmp30)
tmp32 = tl.where(tmp4, tmp11, tmp31)
tl.store(out_ptr0 + x3, tmp32, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_3(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 128
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_4(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 256
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (256, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (256, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_9, (128,), (1,))
assert_size_stride(primals_10, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_11, (128,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 256, 4, 4), (4096, 16, 4, 1))
buf1 = extern_kernels.convolution(primals_3, primals_4, stride=(1,
1), padding=(2, 2), dilation=(2, 2), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 256, 4, 4), (4096, 16, 4, 1))
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(16384)](buf2, primals_5,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf3 = extern_kernels.convolution(buf2, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 128, 4, 4), (2048, 16, 4, 1))
buf4 = extern_kernels.convolution(buf2, primals_8, stride=(1, 1),
padding=(2, 2), dilation=(2, 2), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 128, 4, 4), (2048, 16, 4, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_1[grid(8192)](buf5, primals_9,
8192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_9
buf6 = extern_kernels.convolution(buf5, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 128, 4, 4), (2048, 16, 4, 1))
buf7 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_2[grid(32768)](buf0, primals_2, buf3,
primals_7, buf6, primals_11, buf7, 32768, XBLOCK=256, num_warps
=4, num_stages=1)
buf8 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_3[grid(8192)](buf6
, primals_11, buf8, 8192, XBLOCK=128, num_warps=4, num_stages=1)
del buf6
del primals_11
buf9 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_3[grid(8192)](buf3
, primals_7, buf9, 8192, XBLOCK=128, num_warps=4, num_stages=1)
del buf3
del primals_7
buf10 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.bool
)
triton_poi_fused_convolution_relu_threshold_backward_4[grid(16384)](
buf0, primals_2, buf10, 16384, XBLOCK=256, num_warps=4,
num_stages=1)
del buf0
del primals_2
return (buf7, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, buf2, buf5, buf8, buf9, buf10)
class FEMNew(nn.Module):
def __init__(self, channel_size):
super(FEMNew, self).__init__()
self.cs = channel_size
self.cpm1 = nn.Conv2d(self.cs, 256, kernel_size=3, dilation=1,
stride=1, padding=1)
self.cpm2 = nn.Conv2d(self.cs, 256, kernel_size=3, dilation=2,
stride=1, padding=2)
self.cpm3 = nn.Conv2d(256, 128, kernel_size=3, dilation=1, stride=1,
padding=1)
self.cpm4 = nn.Conv2d(256, 128, kernel_size=3, dilation=2, stride=1,
padding=2)
self.cpm5 = nn.Conv2d(128, 128, kernel_size=3, dilation=1, stride=1,
padding=1)
def forward(self, input_0):
primals_1 = self.cpm1.weight
primals_2 = self.cpm1.bias
primals_4 = self.cpm2.weight
primals_5 = self.cpm2.bias
primals_6 = self.cpm3.weight
primals_7 = self.cpm3.bias
primals_8 = self.cpm4.weight
primals_9 = self.cpm4.bias
primals_10 = self.cpm5.weight
primals_11 = self.cpm5.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
juanmed/FaceDetection-DSFD
|
FEM
| false
| 10,328
|
[
"Apache-2.0"
] | 0
|
23650ca492444f9f052ca9b8db8b068a9be5bc68
|
https://github.com/juanmed/FaceDetection-DSFD/tree/23650ca492444f9f052ca9b8db8b068a9be5bc68
|
ExpandNetLoss
|
import torch
from torch import nn
class ExpandNetLoss(nn.Module):
def __init__(self, loss_lambda=5):
super(ExpandNetLoss, self).__init__()
self.similarity = torch.nn.CosineSimilarity(dim=1, eps=1e-20)
self.l1_loss = nn.L1Loss()
self.loss_lambda = loss_lambda
def forward(self, x, y):
cosine_term = (1 - self.similarity(x, y)).mean()
return self.l1_loss(x, y) + self.loss_lambda * cosine_term
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_clamp_min_div_linalg_vector_norm_mean_mul_sub_0(
in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
r1 = rindex % 16
r3 = rindex // 64
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp7 = tl.load(in_ptr0 + (r1 + 64 * r3), None, eviction_policy='evict_last'
)
tmp9 = tl.load(in_ptr0 + (16 + r1 + 64 * r3), None, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr0 + (32 + r1 + 64 * r3), None, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (48 + r1 + 64 * r3), None, eviction_policy=
'evict_last')
tmp22 = tl.load(in_ptr1 + (r1 + 64 * r3), None, eviction_policy=
'evict_last')
tmp24 = tl.load(in_ptr1 + (16 + r1 + 64 * r3), None, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr1 + (32 + r1 + 64 * r3), None, eviction_policy=
'evict_last')
tmp30 = tl.load(in_ptr1 + (48 + r1 + 64 * r3), None, eviction_policy=
'evict_last')
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp8 = tmp7 * tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp13 = tmp12 * tmp12
tmp14 = tmp11 + tmp13
tmp16 = tmp15 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = 1e-20
tmp20 = triton_helpers.maximum(tmp18, tmp19)
tmp21 = tmp0 / tmp20
tmp23 = tmp22 * tmp22
tmp25 = tmp24 * tmp24
tmp26 = tmp23 + tmp25
tmp28 = tmp27 * tmp27
tmp29 = tmp26 + tmp28
tmp31 = tmp30 * tmp30
tmp32 = tmp29 + tmp31
tmp33 = libdevice.sqrt(tmp32)
tmp34 = triton_helpers.maximum(tmp33, tmp19)
tmp35 = tmp1 / tmp34
tmp36 = tmp21 * tmp35
tl.store(out_ptr1 + tl.broadcast_to(r0, [RBLOCK]), tmp36, None)
tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp6, None)
@triton.jit
def triton_per_fused_abs_add_mean_mul_rsub_sub_sum_1(in_out_ptr0, in_ptr0,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp1 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp3 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp5 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp12 = tl.load(in_out_ptr0 + 0)
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, 1])
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 1.0
tmp8 = tmp7 - tmp6
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.sum(tmp9, 1)[:, None]
tmp14 = 256.0
tmp15 = tmp13 / tmp14
tmp16 = 64.0
tmp17 = tmp11 / tmp16
tmp18 = 5.0
tmp19 = tmp17 * tmp18
tmp20 = tmp15 + tmp19
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp20, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_abs_clamp_min_div_linalg_vector_norm_mean_mul_sub_0[
grid(1)](arg1_1, arg0_1, buf0, buf1, 1, 256, num_warps=2,
num_stages=1)
del arg0_1
del arg1_1
buf3 = buf0
del buf0
triton_per_fused_abs_add_mean_mul_rsub_sub_sum_1[grid(1)](buf3,
buf1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del buf1
return buf3,
class ExpandNetLossNew(nn.Module):
def __init__(self, loss_lambda=5):
super(ExpandNetLossNew, self).__init__()
self.similarity = torch.nn.CosineSimilarity(dim=1, eps=1e-20)
self.l1_loss = nn.L1Loss()
self.loss_lambda = loss_lambda
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
kacperkk2/temporalStableExpandNet
|
ExpandNetLoss
| false
| 10,329
|
[
"BSD-3-Clause-Clear"
] | 0
|
87a4d6c8c1a47b721760c9daf2727e380b90c541
|
https://github.com/kacperkk2/temporalStableExpandNet/tree/87a4d6c8c1a47b721760c9daf2727e380b90c541
|
ScaledDotProductAttention
|
import torch
import numpy as np
from torch import nn
class ScaledDotProductAttention(nn.Module):
"""
Scaled dot-product attention
"""
def __init__(self, d_model, d_k, d_v, h):
"""
:param d_model: Output dimensionality of the model
:param d_k: Dimensionality of queries and keys
:param d_v: Dimensionality of values
:param h: Number of heads
"""
super(ScaledDotProductAttention, self).__init__()
self.fc_q = nn.Linear(d_model, h * d_k)
self.fc_k = nn.Linear(d_model, h * d_k)
self.fc_v = nn.Linear(d_model, h * d_v)
self.fc_o = nn.Linear(h * d_v, d_model)
self.d_model = d_model
self.d_k = d_k
self.d_v = d_v
self.h = h
self.init_weights()
def init_weights(self):
nn.init.xavier_uniform_(self.fc_q.weight)
nn.init.xavier_uniform_(self.fc_k.weight)
nn.init.xavier_uniform_(self.fc_v.weight)
nn.init.xavier_uniform_(self.fc_o.weight)
nn.init.constant_(self.fc_q.bias, 0)
nn.init.constant_(self.fc_k.bias, 0)
nn.init.constant_(self.fc_v.bias, 0)
nn.init.constant_(self.fc_o.bias, 0)
def forward(self, queries, keys, values, attention_mask=None,
attention_weights=None):
"""
Computes
:param queries: Queries (b_s, nq, d_model)
:param keys: Keys (b_s, nk, d_model)
:param values: Values (b_s, nk, d_model)
:param attention_mask: Mask over attention values (b_s, h, nq, nk). True indicates masking.
:param attention_weights: Multiplicative weights for attention values (b_s, h, nq, nk).
:return:
"""
b_s, nq = queries.shape[:2]
nk = keys.shape[1]
q = self.fc_q(queries).view(b_s, nq, self.h, self.d_k).permute(0, 2,
1, 3)
k = self.fc_k(keys).view(b_s, nk, self.h, self.d_k).permute(0, 2, 3, 1)
v = self.fc_v(values).view(b_s, nk, self.h, self.d_v).permute(0, 2,
1, 3)
att = torch.matmul(q, k) / np.sqrt(self.d_k)
if attention_weights is not None:
att = att * attention_weights
if attention_mask is not None:
att = att.masked_fill(attention_mask.bool(), -np.inf)
att = torch.softmax(att, -1)
out = torch.matmul(att, v).permute(0, 2, 1, 3).contiguous().view(b_s,
nq, self.h * self.d_v)
out = self.fc_o(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'d_model': 4, 'd_k': 4, 'd_v': 4, 'h': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x4, tmp2, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_sqrt_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp8 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = tl.full([1], 2.0, tl.float64)
tmp2 = tl.full([1], 0.0, tl.float64)
tmp3 = tmp1 >= tmp2
tmp4 = 1.0
tmp5 = -1.0
tmp6 = tl.where(tmp3, tmp4, tmp5)
tmp7 = tmp0 * tmp6
tmp9 = tmp8 * tmp6
tmp11 = tmp10 * tmp6
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp14 = tmp13 * tmp6
tmp15 = triton_helpers.maximum(tmp12, tmp14)
tmp17 = tmp16 * tmp6
tmp18 = triton_helpers.maximum(tmp15, tmp17)
tmp19 = tmp7 - tmp18
tmp20 = tmp6.to(tl.float64)
tmp21 = tmp20 * tmp1
tmp22 = tmp21.to(tl.float32)
tmp23 = tmp19 / tmp22
tmp24 = tl_math.exp(tmp23)
tl.store(out_ptr0 + x2, tmp24, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (16, 4), (4, 1))
assert_size_stride(primals_4, (16,), (1,))
assert_size_stride(primals_5, (16, 4), (4, 1))
assert_size_stride(primals_6, (16,), (1,))
assert_size_stride(primals_7, (16, 4), (4, 1))
assert_size_stride(primals_8, (16,), (1,))
assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_10, (4, 16), (16, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 16), (1, 4), 0), out=buf0)
del primals_3
buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 16), (1, 4), 0), out=buf1)
del primals_5
buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 16), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(256)](buf0, primals_4, buf3, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_4
buf4 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused_clone_1[grid(64, 4)](buf1, primals_6, buf4, 64, 4,
XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1)
del primals_6
buf5 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0)
del buf1
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_sqrt_2[grid(256)](buf5, buf6, 256, XBLOCK
=128, num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused__softmax_3[grid(256)](buf6, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf8 = buf6
del buf6
triton_poi_fused_clone_0[grid(256)](buf2, primals_8, buf8, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_8
buf9 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_4[grid(256)](buf9, buf10, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf9
buf11 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_11, reinterpret_tensor(buf10, (16, 16),
(16, 1), 0), reinterpret_tensor(primals_10, (16, 4), (1, 16), 0
), alpha=1, beta=1, out=buf11)
del primals_11
return reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0
), buf7, reinterpret_tensor(buf10, (16, 16), (16, 1), 0
), primals_10, reinterpret_tensor(buf8, (16, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf4, (16, 4, 4), (16, 1, 4), 0)
class ScaledDotProductAttentionNew(nn.Module):
"""
Scaled dot-product attention
"""
def __init__(self, d_model, d_k, d_v, h):
"""
:param d_model: Output dimensionality of the model
:param d_k: Dimensionality of queries and keys
:param d_v: Dimensionality of values
:param h: Number of heads
"""
super(ScaledDotProductAttentionNew, self).__init__()
self.fc_q = nn.Linear(d_model, h * d_k)
self.fc_k = nn.Linear(d_model, h * d_k)
self.fc_v = nn.Linear(d_model, h * d_v)
self.fc_o = nn.Linear(h * d_v, d_model)
self.d_model = d_model
self.d_k = d_k
self.d_v = d_v
self.h = h
self.init_weights()
def init_weights(self):
nn.init.xavier_uniform_(self.fc_q.weight)
nn.init.xavier_uniform_(self.fc_k.weight)
nn.init.xavier_uniform_(self.fc_v.weight)
nn.init.xavier_uniform_(self.fc_o.weight)
nn.init.constant_(self.fc_q.bias, 0)
nn.init.constant_(self.fc_k.bias, 0)
nn.init.constant_(self.fc_v.bias, 0)
nn.init.constant_(self.fc_o.bias, 0)
def forward(self, input_0, input_1, input_2):
primals_3 = self.fc_q.weight
primals_4 = self.fc_q.bias
primals_5 = self.fc_k.weight
primals_6 = self.fc_k.bias
primals_7 = self.fc_v.weight
primals_8 = self.fc_v.bias
primals_10 = self.fc_o.weight
primals_11 = self.fc_o.bias
primals_1 = input_0
primals_2 = input_1
primals_9 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
jmhessel/meshed-memory-transformer
|
ScaledDotProductAttention
| false
| 10,330
|
[
"BSD-3-Clause"
] | 0
|
b502da2522f2e25d602fba547ed6ebf7968857a9
|
https://github.com/jmhessel/meshed-memory-transformer/tree/b502da2522f2e25d602fba547ed6ebf7968857a9
|
Clamp
|
import torch
from torch import nn
class Clamp(nn.Module):
"""Clamp energy output"""
def forward(self, x):
x = torch.clamp(x, min=0, max=30)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clamp_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 30.0
tmp4 = triton_helpers.minimum(tmp2, tmp3)
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class ClampNew(nn.Module):
"""Clamp energy output"""
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
kgreif24/mlhep-aka
|
Clamp
| false
| 10,331
|
[
"Apache-2.0"
] | 0
|
41e120eb3e7049a01ffdb22c4e00b3aaca94b541
|
https://github.com/kgreif24/mlhep-aka/tree/41e120eb3e7049a01ffdb22c4e00b3aaca94b541
|
AvgConsensus
|
import torch
import torch.nn as nn
class AvgConsensus(nn.Module):
"""Average consensus module.
Args:
dim (int): Decide which dim consensus function to apply.
Default: 1.
"""
def __init__(self, dim=1):
super().__init__()
self.dim = dim
def forward(self, x):
"""Defines the computation performed at every call."""
return x.mean(dim=self.dim, keepdim=True)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
class AvgConsensusNew(nn.Module):
"""Average consensus module.
Args:
dim (int): Decide which dim consensus function to apply.
Default: 1.
"""
def __init__(self, dim=1):
super().__init__()
self.dim = dim
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
giahaowjx/mmaction2
|
AvgConsensus
| false
| 10,332
|
[
"Apache-2.0"
] | 0
|
4f95e9b91354acdcae768ce94e01d3821bba0154
|
https://github.com/giahaowjx/mmaction2/tree/4f95e9b91354acdcae768ce94e01d3821bba0154
|
OffsetNet
|
import torch
import torch.nn as nn
class OffsetNet(nn.Module):
"""OffsetNet in Temporal interlace module.
The OffsetNet consists of one convolution layer and two fc layers
with a relu activation following with a sigmoid function. Following
the convolution layer, two fc layers and relu are applied to the output.
Then, apply the sigmoid function with a multiply factor and a minus 0.5
to transform the output to (-4, 4).
Args:
in_channels (int): Channel num of input features.
groups (int): Number of groups for fc layer outputs.
num_segments (int): Number of frame segments.
"""
def __init__(self, in_channels, groups, num_segments):
super().__init__()
self.sigmoid = nn.Sigmoid()
kernel_size = 3
padding = 1
self.conv = nn.Conv1d(in_channels, 1, kernel_size, padding=padding)
self.fc1 = nn.Linear(num_segments, num_segments)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(num_segments, groups)
self.init_weights()
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
self.fc2.bias.data[...] = 0.5108
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
n, _, t = x.shape
x = self.conv(x)
x = x.view(n, t)
x = self.relu(self.fc1(x))
x = self.fc2(x)
x = x.view(n, 1, -1)
x = 4 * (self.sigmoid(x) - 0.5)
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'groups': 1, 'num_segments': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_sub_2(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = 0.5
tmp3 = tmp1 - tmp2
tmp4 = 4.0
tmp5 = tmp3 * tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (1, 4, 3), (12, 3, 1))
assert_size_stride(primals_3, (1,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (1, 4), (4, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,),
padding=(1,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 4), (4, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16)](buf1, primals_3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (4, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = buf2
del buf2
triton_poi_fused_relu_1[grid(16)](buf3, primals_5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6,
(4, 1), (1, 4), 0), alpha=1, beta=1, out=buf5)
del primals_7
buf6 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
triton_poi_fused_mul_sigmoid_sub_2[grid(4)](buf5, buf6, 4, XBLOCK=4,
num_warps=1, num_stages=1)
return buf6, primals_1, primals_2, reinterpret_tensor(buf1, (4, 4), (4,
1), 0), buf3, buf5, primals_6, primals_4
class OffsetNetNew(nn.Module):
"""OffsetNet in Temporal interlace module.
The OffsetNet consists of one convolution layer and two fc layers
with a relu activation following with a sigmoid function. Following
the convolution layer, two fc layers and relu are applied to the output.
Then, apply the sigmoid function with a multiply factor and a minus 0.5
to transform the output to (-4, 4).
Args:
in_channels (int): Channel num of input features.
groups (int): Number of groups for fc layer outputs.
num_segments (int): Number of frame segments.
"""
def __init__(self, in_channels, groups, num_segments):
super().__init__()
self.sigmoid = nn.Sigmoid()
kernel_size = 3
padding = 1
self.conv = nn.Conv1d(in_channels, 1, kernel_size, padding=padding)
self.fc1 = nn.Linear(num_segments, num_segments)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(num_segments, groups)
self.init_weights()
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
self.fc2.bias.data[...] = 0.5108
def forward(self, input_0):
primals_2 = self.conv.weight
primals_3 = self.conv.bias
primals_4 = self.fc1.weight
primals_5 = self.fc1.bias
primals_6 = self.fc2.weight
primals_7 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
giahaowjx/mmaction2
|
OffsetNet
| false
| 10,333
|
[
"Apache-2.0"
] | 0
|
4f95e9b91354acdcae768ce94e01d3821bba0154
|
https://github.com/giahaowjx/mmaction2/tree/4f95e9b91354acdcae768ce94e01d3821bba0154
|
ScaledDotProductAttentionMemory
|
import torch
import numpy as np
from torch import nn
class ScaledDotProductAttentionMemory(nn.Module):
"""
Scaled dot-product attention with memory
"""
def __init__(self, d_model, d_k, d_v, h, m):
"""
:param d_model: Output dimensionality of the model
:param d_k: Dimensionality of queries and keys
:param d_v: Dimensionality of values
:param h: Number of heads
:param m: Number of memory slots
"""
super(ScaledDotProductAttentionMemory, self).__init__()
self.fc_q = nn.Linear(d_model, h * d_k)
self.fc_k = nn.Linear(d_model, h * d_k)
self.fc_v = nn.Linear(d_model, h * d_v)
self.fc_o = nn.Linear(h * d_v, d_model)
self.m_k = nn.Parameter(torch.FloatTensor(1, m, h * d_k))
self.m_v = nn.Parameter(torch.FloatTensor(1, m, h * d_v))
self.d_model = d_model
self.d_k = d_k
self.d_v = d_v
self.h = h
self.m = m
self.init_weights()
def init_weights(self):
nn.init.xavier_uniform_(self.fc_q.weight)
nn.init.xavier_uniform_(self.fc_k.weight)
nn.init.xavier_uniform_(self.fc_v.weight)
nn.init.xavier_uniform_(self.fc_o.weight)
nn.init.normal_(self.m_k, 0, 1 / self.d_k)
nn.init.normal_(self.m_v, 0, 1 / self.m)
nn.init.constant_(self.fc_q.bias, 0)
nn.init.constant_(self.fc_k.bias, 0)
nn.init.constant_(self.fc_v.bias, 0)
nn.init.constant_(self.fc_o.bias, 0)
def forward(self, queries, keys, values, attention_mask=None,
attention_weights=None):
"""
Computes
:param queries: Queries (b_s, nq, d_model)
:param keys: Keys (b_s, nk, d_model)
:param values: Values (b_s, nk, d_model)
:param attention_mask: Mask over attention values (b_s, h, nq, nk). True indicates masking.
:param attention_weights: Multiplicative weights for attention values (b_s, h, nq, nk).
:return:
"""
b_s, nq = queries.shape[:2]
nk = keys.shape[1]
m_k = np.sqrt(self.d_k) * self.m_k.expand(b_s, self.m, self.h *
self.d_k)
m_v = np.sqrt(self.m) * self.m_v.expand(b_s, self.m, self.h * self.d_v)
q = self.fc_q(queries).view(b_s, nq, self.h, self.d_k).permute(0, 2,
1, 3)
k = torch.cat([self.fc_k(keys), m_k], 1).view(b_s, nk + self.m,
self.h, self.d_k).permute(0, 2, 3, 1)
v = torch.cat([self.fc_v(values), m_v], 1).view(b_s, nk + self.m,
self.h, self.d_v).permute(0, 2, 1, 3)
att = torch.matmul(q, k) / np.sqrt(self.d_k)
if attention_weights is not None:
att = torch.cat([att[:, :, :, :nk] * attention_weights, att[:,
:, :, nk:]], -1)
if attention_mask is not None:
att[:, :, :, :nk] = att[:, :, :, :nk].masked_fill(attention_mask,
-np.inf)
att = torch.softmax(att, -1)
out = torch.matmul(att, v).permute(0, 2, 1, 3).contiguous().view(b_s,
nq, self.h * self.d_v)
out = self.fc_o(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'d_model': 4, 'd_k': 4, 'd_v': 4, 'h': 4, 'm': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x4, tmp2, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8 % 16
x2 = xindex // 128
x3 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x1 + 16 * x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (x1 + 16 * (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = 2.0
tmp11 = tmp10 * tmp9
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp6, tmp11, tmp12)
tmp14 = tl.where(tmp4, tmp5, tmp13)
tl.store(out_ptr0 + x3, tmp14, xmask)
@triton.jit
def triton_per_fused__softmax_sqrt_2(in_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 8 * x0), xmask, other=0.0)
tmp1 = tl.full([1, 1], 2.0, tl.float64)
tmp2 = tl.full([1, 1], 0.0, tl.float64)
tmp3 = tmp1 >= tmp2
tmp4 = 1.0
tmp5 = -1.0
tmp6 = tl.where(tmp3, tmp4, tmp5)
tmp7 = tmp0 * tmp6
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp10 = tl.where(xmask, tmp8, float('-inf'))
tmp11 = triton_helpers.max2(tmp10, 1)[:, None]
tmp12 = tmp7 - tmp11
tmp13 = tmp6.to(tl.float64)
tmp14 = tmp13 * tmp1
tmp15 = tmp14.to(tl.float32)
tmp16 = tmp12 / tmp15
tmp17 = tl_math.exp(tmp16)
tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK])
tmp20 = tl.where(xmask, tmp18, 0)
tmp21 = tl.sum(tmp20, 1)[:, None]
tmp22 = tmp17 / tmp21
tl.store(out_ptr2 + (r1 + 8 * x0), tmp22, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 8
x0 = xindex % 4
x2 = xindex // 32 % 4
x3 = xindex // 128
x4 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), tmp4 &
xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 4 * x2 + 16 * (-4 + x1)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = 2.0
tmp11 = tmp10 * tmp9
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp6, tmp11, tmp12)
tmp14 = tl.where(tmp4, tmp5, tmp13)
tl.store(out_ptr0 + x4, tmp14, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (1, 4, 16), (64, 16, 1))
assert_size_stride(primals_4, (1, 4, 16), (64, 16, 1))
assert_size_stride(primals_5, (16, 4), (4, 1))
assert_size_stride(primals_6, (16,), (1,))
assert_size_stride(primals_7, (16, 4), (4, 1))
assert_size_stride(primals_8, (16,), (1,))
assert_size_stride(primals_9, (16, 4), (4, 1))
assert_size_stride(primals_10, (16,), (1,))
assert_size_stride(primals_11, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_12, (4, 16), (16, 1))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 16), (1, 4), 0), out=buf0)
del primals_5
buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_8, reinterpret_tensor(primals_2, (16,
4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 16), (1, 4),
0), alpha=1, beta=1, out=buf1)
del primals_7
del primals_8
buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_10, reinterpret_tensor(primals_11, (16,
4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 16), (1, 4),
0), alpha=1, beta=1, out=buf2)
del primals_10
del primals_9
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(256)](buf0, primals_6, buf3, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_6
buf4 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
triton_poi_fused_clone_1[grid(512)](buf1, primals_3, buf4, 512,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf5 = empty_strided_cuda((16, 4, 8), (32, 8, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf4, (16, 4, 8), (32, 8, 1), 0), out=buf5)
buf8 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
triton_per_fused__softmax_sqrt_2[grid(64)](buf5, buf8, 64, 8,
XBLOCK=32, num_warps=2, num_stages=1)
buf9 = reinterpret_tensor(buf5, (4, 4, 8, 4), (128, 32, 4, 1), 0)
del buf5
triton_poi_fused_clone_3[grid(512)](buf2, primals_4, buf9, 512,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_4
buf10 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 8), (32, 8, 1),
0), reinterpret_tensor(buf9, (16, 8, 4), (32, 4, 1), 0), out=buf10)
buf11 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
triton_poi_fused_clone_4[grid(256)](buf10, buf11, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf10
buf12 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_13, reinterpret_tensor(buf11, (16, 16),
(16, 1), 0), reinterpret_tensor(primals_12, (16, 4), (1, 16), 0
), alpha=1, beta=1, out=buf12)
del primals_13
return reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_11, (16, 4), (4, 1), 0
), buf8, reinterpret_tensor(buf11, (16, 16), (16, 1), 0
), primals_12, reinterpret_tensor(buf9, (16, 4, 8), (32, 1, 4), 0
), reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf4, (16, 8, 4), (32, 1, 8), 0)
class ScaledDotProductAttentionMemoryNew(nn.Module):
"""
Scaled dot-product attention with memory
"""
def __init__(self, d_model, d_k, d_v, h, m):
"""
:param d_model: Output dimensionality of the model
:param d_k: Dimensionality of queries and keys
:param d_v: Dimensionality of values
:param h: Number of heads
:param m: Number of memory slots
"""
super(ScaledDotProductAttentionMemoryNew, self).__init__()
self.fc_q = nn.Linear(d_model, h * d_k)
self.fc_k = nn.Linear(d_model, h * d_k)
self.fc_v = nn.Linear(d_model, h * d_v)
self.fc_o = nn.Linear(h * d_v, d_model)
self.m_k = nn.Parameter(torch.FloatTensor(1, m, h * d_k))
self.m_v = nn.Parameter(torch.FloatTensor(1, m, h * d_v))
self.d_model = d_model
self.d_k = d_k
self.d_v = d_v
self.h = h
self.m = m
self.init_weights()
def init_weights(self):
nn.init.xavier_uniform_(self.fc_q.weight)
nn.init.xavier_uniform_(self.fc_k.weight)
nn.init.xavier_uniform_(self.fc_v.weight)
nn.init.xavier_uniform_(self.fc_o.weight)
nn.init.normal_(self.m_k, 0, 1 / self.d_k)
nn.init.normal_(self.m_v, 0, 1 / self.m)
nn.init.constant_(self.fc_q.bias, 0)
nn.init.constant_(self.fc_k.bias, 0)
nn.init.constant_(self.fc_v.bias, 0)
nn.init.constant_(self.fc_o.bias, 0)
def forward(self, input_0, input_1, input_2):
primals_3 = self.m_k
primals_4 = self.m_v
primals_5 = self.fc_q.weight
primals_6 = self.fc_q.bias
primals_7 = self.fc_k.weight
primals_8 = self.fc_k.bias
primals_9 = self.fc_v.weight
primals_10 = self.fc_v.bias
primals_12 = self.fc_o.weight
primals_13 = self.fc_o.bias
primals_1 = input_0
primals_2 = input_1
primals_11 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
|
jmhessel/meshed-memory-transformer
|
ScaledDotProductAttentionMemory
| false
| 10,334
|
[
"BSD-3-Clause"
] | 0
|
b502da2522f2e25d602fba547ed6ebf7968857a9
|
https://github.com/jmhessel/meshed-memory-transformer/tree/b502da2522f2e25d602fba547ed6ebf7968857a9
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.