entry_point
stringlengths 1
65
| original_triton_python_code
stringlengths 208
619k
| optimised_triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
listlengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
|
|---|---|---|---|---|---|---|---|---|---|---|
L2loss
|
import torch
class L2loss(torch.nn.Module):
def __init__(self):
super(L2loss, self).__init__()
def forward(self, y, yhat):
loss = (y - yhat).pow(2).sum() / y.shape[0]
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_div_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp8, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_div_pow_sub_sum_0[grid(1)](buf1, arg0_1, arg1_1, 1,
256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class L2lossNew(torch.nn.Module):
def __init__(self):
super(L2lossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
btolooshams/densae
|
L2loss
| false
| 6,365
|
[
"MIT"
] | 1
|
a1e4c4cc1b4be0386d42136f2695615ea3cf4815
|
https://github.com/btolooshams/densae/tree/a1e4c4cc1b4be0386d42136f2695615ea3cf4815
|
FeedForwardActorNN
|
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
class FeedForwardActorNN(nn.Module):
def __init__(self, in_dim, out_dim, is_discrete):
super(FeedForwardActorNN, self).__init__()
self.layer1 = nn.Linear(in_dim, 64)
self.layer2 = nn.Linear(64, 64)
self.layer3 = nn.Linear(64, out_dim)
self.is_discrete = is_discrete
def forward(self, obs):
if isinstance(obs, np.ndarray):
obs = torch.tensor(obs, dtype=torch.float)
activation1 = F.relu(self.layer1(obs))
activation2 = F.relu(self.layer2(activation1))
if self.is_discrete:
output = torch.softmax(self.layer3(activation2), dim=0)
else:
output = self.layer3(activation2)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4, 'out_dim': 4, 'is_discrete': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (64, 4), (4, 1))
assert_size_stride(primals_3, (64,), (1,))
assert_size_stride(primals_4, (64, 64), (64, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (4, 64), (64, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf0
buf8 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool
)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(4096)](buf1,
primals_3, buf8, 4096, XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0),
reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf2
buf7 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool
)
triton_poi_fused_relu_threshold_backward_0[grid(4096)](buf3,
primals_5, buf7, 4096, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64),
(64, 1), 0), reinterpret_tensor(primals_6, (64, 4), (1, 64), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf4, buf5, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf5
return buf6, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(
buf3, (64, 64), (64, 1), 0), buf6, primals_6, buf7, primals_4, buf8
class FeedForwardActorNNNew(nn.Module):
def __init__(self, in_dim, out_dim, is_discrete):
super(FeedForwardActorNNNew, self).__init__()
self.layer1 = nn.Linear(in_dim, 64)
self.layer2 = nn.Linear(64, 64)
self.layer3 = nn.Linear(64, out_dim)
self.is_discrete = is_discrete
def forward(self, input_0):
primals_2 = self.layer1.weight
primals_3 = self.layer1.bias
primals_4 = self.layer2.weight
primals_5 = self.layer2.bias
primals_6 = self.layer3.weight
primals_7 = self.layer3.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
britig/policy-refinement-bo
|
FeedForwardActorNN
| false
| 6,366
|
[
"MIT"
] | 1
|
c8a1e347d6e27c991e945afae9b5d9b482806f4b
|
https://github.com/britig/policy-refinement-bo/tree/c8a1e347d6e27c991e945afae9b5d9b482806f4b
|
Disc
|
import torch
from torch import nn
from torch.nn import functional as F
class MLP(nn.Module):
"""
Multi-Layer Perceptron
:param in_dim: int, size of input feature
:param n_classes: int, number of output classes
:param hidden_dim: int, size of hidden vector
:param dropout: float, dropout rate
:param n_layers: int, number of layers, at least 2, default = 2
:param act: function, activation function, default = leaky_relu
"""
def __init__(self, in_dim, n_classes, hidden_dim, dropout, n_layers=2,
act=F.leaky_relu):
super(MLP, self).__init__()
self.l_in = nn.Linear(in_dim, hidden_dim)
self.l_hs = nn.ModuleList(nn.Linear(hidden_dim, hidden_dim) for _ in
range(n_layers - 2))
self.l_out = nn.Linear(hidden_dim, n_classes)
self.dropout = nn.Dropout(p=dropout)
self.act = act
return
def forward(self, input):
"""
:param input: Tensor of (batch_size, in_dim), input feature
:returns: Tensor of (batch_size, n_classes), output class
"""
hidden = self.act(self.l_in(self.dropout(input)))
for l_h in self.l_hs:
hidden = self.act(l_h(self.dropout(hidden)))
output = self.l_out(self.dropout(hidden))
return output
class Disc(nn.Module):
"""
2-layer discriminator for MI estimator
:param x_dim: int, size of x vector
:param y_dim: int, size of y vector
:param dropout: float, dropout rate
"""
def __init__(self, x_dim, y_dim, dropout):
super(Disc, self).__init__()
self.disc = MLP(x_dim + y_dim, 1, y_dim, dropout, n_layers=2)
return
def forward(self, x, y):
"""
:param x: Tensor of (batch_size, hidden_dim), x
:param y: Tensor of (batch_size, hidden_dim), y
:returns: Tensor of (batch_size), score
"""
input = torch.cat((x, y), dim=-1)
score = self.disc(input).squeeze(-1)
return score
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'x_dim': 4, 'y_dim': 4, 'dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
from torch.nn import functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (1, 4), (4, 1))
assert_size_stride(primals_6, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0),
reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_leaky_relu_1[grid(256)](buf1, primals_4, buf2,
buf3, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf1
del primals_4
buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_6, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_5, (4, 1), (1, 4), 0),
alpha=1, beta=1, out=buf5)
del primals_6
return reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(buf0, (64, 8), (8, 1), 0
), buf2, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), primals_5
class MLP(nn.Module):
"""
Multi-Layer Perceptron
:param in_dim: int, size of input feature
:param n_classes: int, number of output classes
:param hidden_dim: int, size of hidden vector
:param dropout: float, dropout rate
:param n_layers: int, number of layers, at least 2, default = 2
:param act: function, activation function, default = leaky_relu
"""
def __init__(self, in_dim, n_classes, hidden_dim, dropout, n_layers=2,
act=F.leaky_relu):
super(MLP, self).__init__()
self.l_in = nn.Linear(in_dim, hidden_dim)
self.l_hs = nn.ModuleList(nn.Linear(hidden_dim, hidden_dim) for _ in
range(n_layers - 2))
self.l_out = nn.Linear(hidden_dim, n_classes)
self.dropout = nn.Dropout(p=dropout)
self.act = act
return
def forward(self, input):
"""
:param input: Tensor of (batch_size, in_dim), input feature
:returns: Tensor of (batch_size, n_classes), output class
"""
hidden = self.act(self.l_in(self.dropout(input)))
for l_h in self.l_hs:
hidden = self.act(l_h(self.dropout(hidden)))
output = self.l_out(self.dropout(hidden))
return output
class DiscNew(nn.Module):
"""
2-layer discriminator for MI estimator
:param x_dim: int, size of x vector
:param y_dim: int, size of y vector
:param dropout: float, dropout rate
"""
def __init__(self, x_dim, y_dim, dropout):
super(DiscNew, self).__init__()
self.disc = MLP(x_dim + y_dim, 1, y_dim, dropout, n_layers=2)
return
def forward(self, input_0, input_1):
primals_3 = self.disc.l_in.weight
primals_4 = self.disc.l_in.bias
primals_5 = self.disc.l_out.weight
primals_6 = self.disc.l_out.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
bigdata-ustc/DisenQNet
|
Disc
| false
| 6,367
|
[
"MIT"
] | 1
|
908fadeb9b8d278450213deff70205703bd91da6
|
https://github.com/bigdata-ustc/DisenQNet/tree/908fadeb9b8d278450213deff70205703bd91da6
|
MultiheadAttentionWrapper
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils import weight_norm
from torch.optim.lr_scheduler import *
import torch.utils.data
import torch.onnx.operators
import torch.optim
import torch.optim.lr_scheduler
def linear(x):
return x
def activation(func_a):
"""Activation function wrapper
"""
try:
f = eval(func_a)
except:
f = linear
return f
class DropoutWrapper(nn.Module):
"""
This is a dropout wrapper which supports the fix mask dropout
"""
def __init__(self, dropout_p=0, enable_vbp=True):
super(DropoutWrapper, self).__init__()
"""variational dropout means fix dropout mask
ref: https://discuss.pytorch.org/t/dropout-for-rnns/633/11
"""
self.enable_variational_dropout = enable_vbp
self.dropout_p = dropout_p
def forward(self, x):
"""
:param x: batch * len * input_size
"""
if self.training is False or self.dropout_p == 0:
return x
if len(x.size()) == 3:
mask = 1.0 / (1 - self.dropout_p) * torch.bernoulli((1 - self.
dropout_p) * (x.data.new(x.size(0), x.size(2)).zero_() + 1))
mask.requires_grad = False
return mask.unsqueeze(1).expand_as(x) * x
else:
return F.dropout(x, p=self.dropout_p, training=self.training)
class MultiheadAttentionWrapper(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, query_dim, key_dim, value_dim, prefix='attention',
opt={}, dropout=None):
super().__init__()
self.prefix = prefix
self.num_heads = opt.get('{}_head'.format(self.prefix), 1)
self.dropout = DropoutWrapper(opt.get('{}_dropout'.format(self.
prefix), 0)) if dropout is None else dropout
self.qkv_dim = [query_dim, key_dim, value_dim]
assert query_dim == key_dim, 'query dim must equal with key dim'
self.hidden_size = opt.get('{}_hidden_size'.format(self.prefix), 64)
self.proj_on = opt.get('{}_proj_on'.format(prefix), False)
self.share = opt.get('{}_share'.format(self.prefix), False)
self.layer_norm_on = opt.get('{}_norm_on'.format(self.prefix), False)
self.scale_on = opt.get('{}_scale_on'.format(self.prefix), False)
if self.proj_on:
self.proj_modules = nn.ModuleList([nn.Linear(dim, self.
hidden_size) for dim in self.qkv_dim[0:2]])
if self.layer_norm_on:
for proj in self.proj_modules:
proj = weight_norm(proj)
if self.share and self.qkv_dim[0] == self.qkv_dim[1]:
self.proj_modules[1] = self.proj_modules[0]
self.f = activation(opt.get('{}_activation'.format(self.prefix),
'relu'))
self.qkv_head_dim = [self.hidden_size // self.num_heads] * 3
self.qkv_head_dim[2] = value_dim // self.num_heads
assert self.qkv_head_dim[0
] * self.num_heads == self.hidden_size, 'hidden size must be divisible by num_heads'
assert self.qkv_head_dim[2
] * self.num_heads == value_dim, 'value size must be divisible by num_heads'
else:
self.qkv_head_dim = [(emb // self.num_heads) for emb in self.
qkv_dim]
assert self.qkv_head_dim[0] * self.num_heads == self.qkv_dim[0
], 'query size must be divisible by num_heads'
assert self.qkv_head_dim[1] * self.num_heads == self.qkv_dim[1
], 'key size must be divisible by num_heads'
assert self.qkv_head_dim[2] * self.num_heads == self.qkv_dim[2
], 'value size must be divisible by num_heads'
if self.scale_on:
self.scaling = self.qkv_head_dim[0] ** -0.5
self.drop_diagonal = opt.get('{}_drop_diagonal'.format(self.prefix),
False)
self.output_size = self.qkv_dim[2]
def forward(self, query, key, value, key_padding_mask=None):
query = query.transpose(0, 1)
key = key.transpose(0, 1)
value = value.transpose(0, 1)
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.qkv_dim[0]
q, k, v = query, key, value
if self.proj_on:
if self.dropout:
q, k = self.dropout(q), self.dropout(k)
q, k = [self.f(proj(input)) for input, proj in zip([query, key],
self.proj_modules)]
src_len = k.size(0)
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if self.scale_on:
q *= self.scaling
q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.
qkv_head_dim[0]).transpose(0, 1)
k = k.contiguous().view(src_len, bsz * self.num_heads, self.
qkv_head_dim[1]).transpose(0, 1)
v = v.contiguous().view(src_len, bsz * self.num_heads, self.
qkv_head_dim[2]).transpose(0, 1)
attn_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len,
src_len]
if key_padding_mask is not None:
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len,
src_len)
attn_weights = attn_weights.float().masked_fill(key_padding_mask
.unsqueeze(1).unsqueeze(2), float('-inf')).type_as(attn_weights
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len,
src_len)
if self.drop_diagonal:
assert attn_weights.size(1) == attn_weights.size(2)
diag_mask = torch.diag(attn_weights.data.new(attn_weights.size(
1)).zero_() + 1).byte().unsqueeze(0).expand_as(attn_weights)
attn_weights.data.masked_fill_(diag_mask, -float('inf'))
attn_weights = F.softmax(attn_weights.float(), dim=-1).type_as(
attn_weights)
attn_weights = self.dropout(attn_weights)
attn = torch.bmm(attn_weights, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.
qkv_head_dim[2]]
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, -1)
attn = attn.transpose(0, 1)
return attn
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'query_dim': 4, 'key_dim': 4, 'value_dim': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils import weight_norm
from torch.optim.lr_scheduler import *
import torch.utils.data
import torch.onnx.operators
import torch.optim
import torch.optim.lr_scheduler
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tl.store(out_ptr0 + (x0 + 4 * x2 + 16 * x1), tmp0, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 4, 4, 4), (64, 4, 16, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps
=1, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((1, 4, 4, 4), (64, 4, 16, 1), torch.float32)
triton_poi_fused_0[grid(64)](arg1_1, buf1, 64, XBLOCK=64, num_warps
=1, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((1, 4, 4, 4), (64, 4, 16, 1), torch.float32)
triton_poi_fused_0[grid(64)](arg2_1, buf2, 64, XBLOCK=64, num_warps
=1, num_stages=1)
del arg2_1
buf3 = torch.ops.aten._scaled_dot_product_efficient_attention.default(
buf0, buf1, buf2, None, False, scale=1.0)
del buf0
del buf1
del buf2
buf4 = buf3[0]
del buf3
return reinterpret_tensor(buf4, (4, 4, 4), (4, 16, 1), 0),
def linear(x):
return x
def activation(func_a):
"""Activation function wrapper
"""
try:
f = eval(func_a)
except:
f = linear
return f
class DropoutWrapper(nn.Module):
"""
This is a dropout wrapper which supports the fix mask dropout
"""
def __init__(self, dropout_p=0, enable_vbp=True):
super(DropoutWrapper, self).__init__()
"""variational dropout means fix dropout mask
ref: https://discuss.pytorch.org/t/dropout-for-rnns/633/11
"""
self.enable_variational_dropout = enable_vbp
self.dropout_p = dropout_p
def forward(self, x):
"""
:param x: batch * len * input_size
"""
if self.training is False or self.dropout_p == 0:
return x
if len(x.size()) == 3:
mask = 1.0 / (1 - self.dropout_p) * torch.bernoulli((1 - self.
dropout_p) * (x.data.new(x.size(0), x.size(2)).zero_() + 1))
mask.requires_grad = False
return mask.unsqueeze(1).expand_as(x) * x
else:
return F.dropout(x, p=self.dropout_p, training=self.training)
class MultiheadAttentionWrapperNew(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, query_dim, key_dim, value_dim, prefix='attention',
opt={}, dropout=None):
super().__init__()
self.prefix = prefix
self.num_heads = opt.get('{}_head'.format(self.prefix), 1)
self.dropout = DropoutWrapper(opt.get('{}_dropout'.format(self.
prefix), 0)) if dropout is None else dropout
self.qkv_dim = [query_dim, key_dim, value_dim]
assert query_dim == key_dim, 'query dim must equal with key dim'
self.hidden_size = opt.get('{}_hidden_size'.format(self.prefix), 64)
self.proj_on = opt.get('{}_proj_on'.format(prefix), False)
self.share = opt.get('{}_share'.format(self.prefix), False)
self.layer_norm_on = opt.get('{}_norm_on'.format(self.prefix), False)
self.scale_on = opt.get('{}_scale_on'.format(self.prefix), False)
if self.proj_on:
self.proj_modules = nn.ModuleList([nn.Linear(dim, self.
hidden_size) for dim in self.qkv_dim[0:2]])
if self.layer_norm_on:
for proj in self.proj_modules:
proj = weight_norm(proj)
if self.share and self.qkv_dim[0] == self.qkv_dim[1]:
self.proj_modules[1] = self.proj_modules[0]
self.f = activation(opt.get('{}_activation'.format(self.prefix),
'relu'))
self.qkv_head_dim = [self.hidden_size // self.num_heads] * 3
self.qkv_head_dim[2] = value_dim // self.num_heads
assert self.qkv_head_dim[0
] * self.num_heads == self.hidden_size, 'hidden size must be divisible by num_heads'
assert self.qkv_head_dim[2
] * self.num_heads == value_dim, 'value size must be divisible by num_heads'
else:
self.qkv_head_dim = [(emb // self.num_heads) for emb in self.
qkv_dim]
assert self.qkv_head_dim[0] * self.num_heads == self.qkv_dim[0
], 'query size must be divisible by num_heads'
assert self.qkv_head_dim[1] * self.num_heads == self.qkv_dim[1
], 'key size must be divisible by num_heads'
assert self.qkv_head_dim[2] * self.num_heads == self.qkv_dim[2
], 'value size must be divisible by num_heads'
if self.scale_on:
self.scaling = self.qkv_head_dim[0] ** -0.5
self.drop_diagonal = opt.get('{}_drop_diagonal'.format(self.prefix),
False)
self.output_size = self.qkv_dim[2]
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
brightgems/BartWithRL
|
MultiheadAttentionWrapper
| false
| 6,368
|
[
"MIT"
] | 1
|
17614c4009ec976cdc73dacaf94573a6d8f6d529
|
https://github.com/brightgems/BartWithRL/tree/17614c4009ec976cdc73dacaf94573a6d8f6d529
|
CNNCifar
|
from _paritybench_helpers import _mock_config
import torch
from torch import nn
import torch.nn.functional as F
class CNNCifar(nn.Module):
def __init__(self, args):
super(CNNCifar, self).__init__()
self.conv1 = nn.Conv2d(3, 64, 5)
self.pool1 = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(64, 64, 5)
self.pool2 = nn.MaxPool2d(2, 2)
self.fc1 = nn.Linear(64 * 3 * 3, 384)
self.fc2 = nn.Linear(384, 192)
self.fc3 = nn.Linear(192, args.num_classes)
def forward(self, x):
x = self.pool1(F.relu(self.conv1(x)))
x = self.pool2(F.relu(self.conv2(x)))
x = x.view(-1, 64 * 3 * 3)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 3, 24, 24])]
def get_init_inputs():
return [[], {'args': _mock_config(num_classes=4)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 400 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 10
x1 = xindex // 10
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 40 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 40 * x1), xmask, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (20 + 2 * x0 + 40 * x1), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (21 + 2 * x0 + 40 * x1), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 9216
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 36 % 64
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 2304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x1 = xindex // 3
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 12 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 12 * x1), xmask, eviction_policy
='evict_last')
tmp7 = tl.load(in_ptr0 + (6 + 2 * x0 + 12 * x1), xmask, eviction_policy
='evict_last')
tmp12 = tl.load(in_ptr0 + (7 + 2 * x0 + 12 * x1), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x2, tmp15, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 384
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 192
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (64, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 3, 24, 24), (1728, 576, 24, 1))
assert_size_stride(primals_4, (64, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (384, 576), (576, 1))
assert_size_stride(primals_7, (384,), (1,))
assert_size_stride(primals_8, (192, 384), (384, 1))
assert_size_stride(primals_9, (192,), (1,))
assert_size_stride(primals_10, (4, 192), (192, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 20, 20), (25600, 400, 20, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(102400)](buf1, primals_2,
102400, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 64, 10, 10), (6400, 100, 10, 1),
torch.float32)
buf3 = empty_strided_cuda((4, 64, 10, 10), (6400, 100, 10, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(25600)](buf1, buf2,
buf3, 25600, XBLOCK=128, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 64, 6, 6), (2304, 36, 6, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(9216)](buf5, primals_5,
9216, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 64, 3, 3), (576, 9, 3, 1), torch.int8)
buf7 = empty_strided_cuda((4, 64, 3, 3), (576, 9, 3, 1), torch.float32)
triton_poi_fused_max_pool2d_with_indices_3[grid(2304)](buf5, buf6,
buf7, 2304, XBLOCK=128, num_warps=4, num_stages=1)
buf8 = empty_strided_cuda((4, 384), (384, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf7, (4, 576), (576, 1), 0),
reinterpret_tensor(primals_6, (576, 384), (1, 576), 0), out=buf8)
buf9 = buf8
del buf8
triton_poi_fused_relu_4[grid(1536)](buf9, primals_7, 1536, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_7
buf10 = empty_strided_cuda((4, 192), (192, 1), torch.float32)
extern_kernels.mm(buf9, reinterpret_tensor(primals_8, (384, 192), (
1, 384), 0), out=buf10)
buf11 = buf10
del buf10
triton_poi_fused_relu_5[grid(768)](buf11, primals_9, 768, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_9
buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_11, buf11, reinterpret_tensor(
primals_10, (192, 4), (1, 192), 0), alpha=1, beta=1, out=buf12)
del primals_11
return (buf12, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5,
buf6, reinterpret_tensor(buf7, (4, 576), (576, 1), 0), buf9, buf11,
primals_10, primals_8, primals_6)
class CNNCifarNew(nn.Module):
def __init__(self, args):
super(CNNCifarNew, self).__init__()
self.conv1 = nn.Conv2d(3, 64, 5)
self.pool1 = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(64, 64, 5)
self.pool2 = nn.MaxPool2d(2, 2)
self.fc1 = nn.Linear(64 * 3 * 3, 384)
self.fc2 = nn.Linear(384, 192)
self.fc3 = nn.Linear(192, args.num_classes)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_10 = self.fc3.weight
primals_11 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
bobvo23/Federated-Learning-PyTorch
|
CNNCifar
| false
| 6,369
|
[
"MIT"
] | 1
|
e5cffe8f39cfad76c13c78b9f1c6ef0976e4cc81
|
https://github.com/bobvo23/Federated-Learning-PyTorch/tree/e5cffe8f39cfad76c13c78b9f1c6ef0976e4cc81
|
MLP
|
import torch
import torch as th
import torch.nn as nn
class MLP(nn.Module):
def __init__(self, input_size, output_size, hidden=128):
super(MLP, self).__init__()
self.linear1 = nn.Linear(input_size, hidden, bias=False)
self.linear2 = nn.Linear(hidden, output_size, bias=False)
def forward(self, x):
x = self.linear1(x)
x = th.tanh(x)
x = self.linear2(x)
x = th.tanh(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'output_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = libdevice.tanh(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, None)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (128, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 128), (128, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(8192)](buf1, 8192, XBLOCK=128,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 128), (128, 1), 0),
reinterpret_tensor(primals_3, (128, 4), (1, 128), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused_tanh_1[grid(256)](buf3, 256, XBLOCK=256, num_warps
=4, num_stages=1)
return buf3, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0
), buf1, buf3, primals_3
class MLPNew(nn.Module):
def __init__(self, input_size, output_size, hidden=128):
super(MLPNew, self).__init__()
self.linear1 = nn.Linear(input_size, hidden, bias=False)
self.linear2 = nn.Linear(hidden, output_size, bias=False)
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_3 = self.linear2.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
bwubrian/cherry
|
MLP
| false
| 6,370
|
[
"Apache-2.0"
] | 1
|
de0cd2d833336144bce2a0b97e4dad40cbd78d7c
|
https://github.com/bwubrian/cherry/tree/de0cd2d833336144bce2a0b97e4dad40cbd78d7c
|
Parseval_Conv2d
|
import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
class Parseval_Conv2d(nn.Conv2d):
def forward(self, input):
new_weight = self.weight / np.sqrt(2 * self.kernel_size[0] * self.
kernel_size[1] + 1)
return F.conv2d(input, new_weight, self.bias, self.stride, self.
padding, self.dilation, self.groups)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 5.744562646538029
tmp2 = tmp0 / tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_sqrt_0[grid(256)](primals_1, buf0, 256, XBLOCK
=128, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(primals_3, buf0, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(16)](buf2, primals_2, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
return buf2, primals_3, buf0
class Parseval_Conv2dNew(nn.Conv2d):
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
cadurosar/laplacian_networks
|
Parseval_Conv2d
| false
| 6,371
|
[
"MIT"
] | 1
|
27f6f2d7145426b38f578e9c1beecae3e7392f1b
|
https://github.com/cadurosar/laplacian_networks/tree/27f6f2d7145426b38f578e9c1beecae3e7392f1b
|
SuperLoss
|
import torch
import torch.utils.data
from torch import nn
import torch
class netMSELoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, output, target):
return self.computeLoss(output, target)
def computeLoss(self, output, target):
loss = torch.mean((output - target) ** 2)
return loss
class SuperLoss(nn.Module):
def __init__(self, Losses=[], Weights=[], Names=[]):
super().__init__()
if not Losses:
self.Losses = [netMSELoss()]
self.Weights = [1.0]
self.Names = ['Default MSE Loss']
else:
if len(Losses) != len(Weights):
raise RuntimeError(
'SuperLoss() given Losses and Weights dont match.')
self.Losses = Losses
self.Weights = Weights
self.Names = [('Subloss ' + str(i).zfill(2)) for i in range(len
(self.Losses))]
for Ctr, n in enumerate(Names, 0):
self.Names[Ctr] = n
self.cleanUp()
def __len__(self):
return len(self.Losses)
def getItems(self, withoutWeights=False):
RetLossValsFloat = []
if withoutWeights:
for v in self.LossVals:
RetLossValsFloat.append(v.item())
else:
for v in self.LossValsWeighted:
RetLossValsFloat.append(v.item())
return RetLossValsFloat
def cleanUp(self):
self.LossVals = [0.0] * len(self.Losses)
self.LossValsWeighted = [0.0] * len(self.Losses)
def forward(self, output, target):
self.cleanUp()
return self.computeLoss(output, target)
def computeLoss(self, output, target):
TotalLossVal = 0.0
for Ctr, (l, w) in enumerate(zip(self.Losses, self.Weights), 0):
LossVal = l.forward(output, target)
self.LossVals[Ctr] = LossVal
self.LossValsWeighted[Ctr] = w * LossVal
TotalLossVal += self.LossValsWeighted[Ctr]
return TotalLossVal
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
from torch import nn
import torch
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mean_mul_pow_sub_0(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 256.0
tmp8 = tmp6 / tmp7
tmp9 = 1.0
tmp10 = tmp8 * tmp9
tmp11 = 0.0
tmp12 = tmp10 + tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp8, None)
tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None)
tl.store(out_ptr1 + tl.full([1], 0, tl.int32), tmp12, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_add_mean_mul_pow_sub_0[grid(1)](buf1, arg0_1,
arg1_1, buf2, buf3, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf3, buf2, buf1
class netMSELoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, output, target):
return self.computeLoss(output, target)
def computeLoss(self, output, target):
loss = torch.mean((output - target) ** 2)
return loss
class SuperLossNew(nn.Module):
def __init__(self, Losses=[], Weights=[], Names=[]):
super().__init__()
if not Losses:
self.Losses = [netMSELoss()]
self.Weights = [1.0]
self.Names = ['Default MSE Loss']
else:
if len(Losses) != len(Weights):
raise RuntimeError(
'SuperLoss() given Losses and Weights dont match.')
self.Losses = Losses
self.Weights = Weights
self.Names = [('Subloss ' + str(i).zfill(2)) for i in range(len
(self.Losses))]
for Ctr, n in enumerate(Names, 0):
self.Names[Ctr] = n
self.cleanUp()
def __len__(self):
return len(self.Losses)
def getItems(self, withoutWeights=False):
RetLossValsFloat = []
if withoutWeights:
for v in self.LossVals:
RetLossValsFloat.append(v.item())
else:
for v in self.LossValsWeighted:
RetLossValsFloat.append(v.item())
return RetLossValsFloat
def cleanUp(self):
self.LossVals = [0.0] * len(self.Losses)
self.LossValsWeighted = [0.0] * len(self.Losses)
def computeLoss(self, output, target):
TotalLossVal = 0.0
for Ctr, (l, w) in enumerate(zip(self.Losses, self.Weights), 0):
LossVal = l.forward(output, target)
self.LossVals[Ctr] = LossVal
self.LossValsWeighted[Ctr] = w * LossVal
TotalLossVal += self.LossValsWeighted[Ctr]
return TotalLossVal
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
brown-ivl/beacon
|
SuperLoss
| false
| 6,372
|
[
"MIT"
] | 1
|
66a1714473b362294f787f261561e39c52f00e42
|
https://github.com/brown-ivl/beacon/tree/66a1714473b362294f787f261561e39c52f00e42
|
Bicubic
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Bicubic(nn.Module):
def __init__(self, scale_factor=2):
super().__init__()
self.scale_factor = scale_factor
def forward(self, inputs):
bicubic_output = F.interpolate(inputs, scale_factor=self.
scale_factor, mode='bicubic')
return bicubic_output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_floor_mul_rsub_sub_0(
in_out_ptr1, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 8 % 8
x0 = xindex % 8
x2 = xindex // 64
x3 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = libdevice.floor(tmp5)
tmp7 = tmp6.to(tl.int32)
tmp8 = tl.full([1], 1, tl.int64)
tmp9 = tmp7 - tmp8
tmp10 = tl.full([1], 0, tl.int64)
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tmp14 = x0
tmp15 = tmp14.to(tl.float32)
tmp16 = tmp15 + tmp2
tmp17 = tmp16 * tmp2
tmp18 = tmp17 - tmp2
tmp19 = libdevice.floor(tmp18)
tmp20 = tmp19.to(tl.int32)
tmp21 = tmp20 - tmp8
tmp22 = triton_helpers.maximum(tmp21, tmp10)
tmp23 = triton_helpers.minimum(tmp22, tmp12)
tmp24 = tl.load(in_ptr0 + (tmp23 + 4 * tmp13 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp25 = tmp18 - tmp19
tmp26 = 0.0
tmp27 = triton_helpers.maximum(tmp25, tmp26)
tmp28 = 1.0
tmp29 = triton_helpers.minimum(tmp27, tmp28)
tmp30 = tmp29 + tmp28
tmp31 = -0.75
tmp32 = tmp30 * tmp31
tmp33 = -3.75
tmp34 = tmp32 - tmp33
tmp35 = tmp34 * tmp30
tmp36 = -6.0
tmp37 = tmp35 + tmp36
tmp38 = tmp37 * tmp30
tmp39 = -3.0
tmp40 = tmp38 - tmp39
tmp41 = tmp24 * tmp40
tmp42 = triton_helpers.maximum(tmp20, tmp10)
tmp43 = triton_helpers.minimum(tmp42, tmp12)
tmp44 = tl.load(in_ptr0 + (tmp43 + 4 * tmp13 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp45 = 1.25
tmp46 = tmp29 * tmp45
tmp47 = 2.25
tmp48 = tmp46 - tmp47
tmp49 = tmp48 * tmp29
tmp50 = tmp49 * tmp29
tmp51 = tmp50 + tmp28
tmp52 = tmp44 * tmp51
tmp53 = tmp20 + tmp8
tmp54 = triton_helpers.maximum(tmp53, tmp10)
tmp55 = triton_helpers.minimum(tmp54, tmp12)
tmp56 = tl.load(in_ptr0 + (tmp55 + 4 * tmp13 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp57 = tmp28 - tmp29
tmp58 = tmp57 * tmp45
tmp59 = tmp58 - tmp47
tmp60 = tmp59 * tmp57
tmp61 = tmp60 * tmp57
tmp62 = tmp61 + tmp28
tmp63 = tmp56 * tmp62
tmp64 = tl.full([1], 2, tl.int64)
tmp65 = tmp20 + tmp64
tmp66 = triton_helpers.maximum(tmp65, tmp10)
tmp67 = triton_helpers.minimum(tmp66, tmp12)
tmp68 = tl.load(in_ptr0 + (tmp67 + 4 * tmp13 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp69 = 2.0
tmp70 = tmp69 - tmp29
tmp71 = tmp70 * tmp31
tmp72 = tmp71 - tmp33
tmp73 = tmp72 * tmp70
tmp74 = tmp73 + tmp36
tmp75 = tmp74 * tmp70
tmp76 = tmp75 - tmp39
tmp77 = tmp68 * tmp76
tmp78 = tmp41 + tmp52
tmp79 = tmp78 + tmp63
tmp80 = tmp79 + tmp77
tmp81 = tmp5 - tmp6
tmp82 = triton_helpers.maximum(tmp81, tmp26)
tmp83 = triton_helpers.minimum(tmp82, tmp28)
tmp84 = tmp83 + tmp28
tmp85 = tmp84 * tmp31
tmp86 = tmp85 - tmp33
tmp87 = tmp86 * tmp84
tmp88 = tmp87 + tmp36
tmp89 = tmp88 * tmp84
tmp90 = tmp89 - tmp39
tmp91 = tmp80 * tmp90
tmp92 = triton_helpers.maximum(tmp7, tmp10)
tmp93 = triton_helpers.minimum(tmp92, tmp12)
tmp94 = tl.load(in_ptr0 + (tmp23 + 4 * tmp93 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp95 = tmp94 * tmp40
tmp96 = tl.load(in_ptr0 + (tmp43 + 4 * tmp93 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp97 = tmp96 * tmp51
tmp98 = tl.load(in_ptr0 + (tmp55 + 4 * tmp93 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp99 = tmp98 * tmp62
tmp100 = tl.load(in_ptr0 + (tmp67 + 4 * tmp93 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp101 = tmp100 * tmp76
tmp102 = tmp7 + tmp8
tmp103 = triton_helpers.maximum(tmp102, tmp10)
tmp104 = triton_helpers.minimum(tmp103, tmp12)
tmp105 = tl.load(in_ptr0 + (tmp23 + 4 * tmp104 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp106 = tmp105 * tmp40
tmp107 = tl.load(in_ptr0 + (tmp43 + 4 * tmp104 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp108 = tmp107 * tmp51
tmp109 = tl.load(in_ptr0 + (tmp55 + 4 * tmp104 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp110 = tmp109 * tmp62
tmp111 = tl.load(in_ptr0 + (tmp67 + 4 * tmp104 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp112 = tmp111 * tmp76
tmp113 = tmp95 + tmp97
tmp114 = tmp113 + tmp99
tmp115 = tmp114 + tmp101
tmp116 = tmp83 * tmp45
tmp117 = tmp116 - tmp47
tmp118 = tmp117 * tmp83
tmp119 = tmp118 * tmp83
tmp120 = tmp119 + tmp28
tmp121 = tmp115 * tmp120
tmp122 = tmp91 + tmp121
tmp123 = tmp106 + tmp108
tmp124 = tmp123 + tmp110
tmp125 = tmp124 + tmp112
tmp126 = tmp28 - tmp83
tmp127 = tmp126 * tmp45
tmp128 = tmp127 - tmp47
tmp129 = tmp128 * tmp126
tmp130 = tmp129 * tmp126
tmp131 = tmp130 + tmp28
tmp132 = tmp125 * tmp131
tmp133 = tmp122 + tmp132
tmp134 = tmp7 + tmp64
tmp135 = triton_helpers.maximum(tmp134, tmp10)
tmp136 = triton_helpers.minimum(tmp135, tmp12)
tmp137 = tl.load(in_ptr0 + (tmp23 + 4 * tmp136 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp138 = tmp137 * tmp40
tmp139 = tl.load(in_ptr0 + (tmp43 + 4 * tmp136 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp140 = tmp139 * tmp51
tmp141 = tl.load(in_ptr0 + (tmp55 + 4 * tmp136 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp142 = tmp141 * tmp62
tmp143 = tl.load(in_ptr0 + (tmp67 + 4 * tmp136 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp144 = tmp143 * tmp76
tmp145 = tmp138 + tmp140
tmp146 = tmp145 + tmp142
tmp147 = tmp146 + tmp144
tmp148 = tmp69 - tmp83
tmp149 = tmp148 * tmp31
tmp150 = tmp149 - tmp33
tmp151 = tmp150 * tmp148
tmp152 = tmp151 + tmp36
tmp153 = tmp152 * tmp148
tmp154 = tmp153 - tmp39
tmp155 = tmp147 * tmp154
tmp156 = tmp133 + tmp155
tl.store(in_out_ptr1 + x3, tmp156, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf10 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32
)
buf13 = buf10
del buf10
buf19 = buf13
del buf13
get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_floor_mul_rsub_sub_0[
grid(1024)](buf19, arg0_1, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
del arg0_1
return buf19,
class BicubicNew(nn.Module):
def __init__(self, scale_factor=2):
super().__init__()
self.scale_factor = scale_factor
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
bui-thanh-lam/image-super-resolution
|
Bicubic
| false
| 6,373
|
[
"BSD-2-Clause"
] | 1
|
8eee69c9fdd3aaf760fabfb5a294f083c7ddf4ac
|
https://github.com/bui-thanh-lam/image-super-resolution/tree/8eee69c9fdd3aaf760fabfb5a294f083c7ddf4ac
|
FCBottleNeck
|
import torch
import torch.utils.data
import torch.nn.functional as F
from torch import nn
import torch
class FCBottleNeck(nn.Module):
def __init__(self, InFeatureSize):
super().__init__()
self.FC1 = nn.Linear(InFeatureSize, 2048)
self.FC2 = nn.Linear(2048, 2048)
self.FC3 = nn.Linear(2048, InFeatureSize)
def forward(self, x):
x_pe = x
x_pe = F.relu(self.FC1(x_pe))
x_pe = F.relu(self.FC2(x_pe))
x_pe = self.FC3(x_pe)
return x_pe
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'InFeatureSize': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
from torch import nn
import torch
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 2048
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2048, 4), (4, 1))
assert_size_stride(primals_3, (2048,), (1,))
assert_size_stride(primals_4, (2048, 2048), (2048, 1))
assert_size_stride(primals_5, (2048,), (1,))
assert_size_stride(primals_6, (4, 2048), (2048, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 2048), (2048, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 2048), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 2048), (32768, 8192, 2048,
1), 0)
del buf0
buf6 = empty_strided_cuda((4, 4, 4, 2048), (32768, 8192, 2048, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(131072)](buf1,
primals_3, buf6, 131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 2048), (2048, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 2048), (2048, 1), 0
), reinterpret_tensor(primals_4, (2048, 2048), (1, 2048), 0),
out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 2048), (32768, 8192, 2048,
1), 0)
del buf2
buf5 = empty_strided_cuda((4, 4, 4, 2048), (32768, 8192, 2048, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(131072)](buf3,
primals_5, buf5, 131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 2048),
(2048, 1), 0), reinterpret_tensor(primals_6, (2048, 4), (1,
2048), 0), alpha=1, beta=1, out=buf4)
del primals_7
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 2048), (2048, 1), 0
), reinterpret_tensor(buf3, (64, 2048), (2048, 1), 0
), primals_6, buf5, primals_4, buf6
class FCBottleNeckNew(nn.Module):
def __init__(self, InFeatureSize):
super().__init__()
self.FC1 = nn.Linear(InFeatureSize, 2048)
self.FC2 = nn.Linear(2048, 2048)
self.FC3 = nn.Linear(2048, InFeatureSize)
def forward(self, input_0):
primals_2 = self.FC1.weight
primals_3 = self.FC1.bias
primals_4 = self.FC2.weight
primals_5 = self.FC2.bias
primals_6 = self.FC3.weight
primals_7 = self.FC3.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
brown-ivl/beacon
|
FCBottleNeck
| false
| 6,374
|
[
"MIT"
] | 1
|
66a1714473b362294f787f261561e39c52f00e42
|
https://github.com/brown-ivl/beacon/tree/66a1714473b362294f787f261561e39c52f00e42
|
CustomizedNet
|
import torch
import torch.nn as nn
import torch.utils.data.distributed
class CustomizedNet(nn.Module):
def __init__(self, dropout, input_size, input_feature_num, hidden_dim,
output_size):
"""
Simply use linear layers for multi-variate single-step forecasting.
"""
super().__init__()
self.fc1 = nn.Linear(input_size * input_feature_num, hidden_dim)
self.dropout = nn.Dropout(dropout)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(hidden_dim, output_size)
def forward(self, x):
x = x.view(-1, x.shape[1] * x.shape[2])
x = self.fc1(x)
x = self.dropout(x)
x = self.relu1(x)
x = self.fc2(x)
x = torch.unsqueeze(x, 1)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dropout': 0.5, 'input_size': 4, 'input_feature_num': 4,
'hidden_dim': 4, 'output_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 16), (16, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 16), (16, 1),
0), reinterpret_tensor(primals_2, (16, 4), (1, 16), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(64)](buf1, primals_3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (16, 1, 4), (4, 4, 1), 0
), reinterpret_tensor(primals_1, (16, 16), (16, 1), 0), buf1, primals_4
class CustomizedNetNew(nn.Module):
def __init__(self, dropout, input_size, input_feature_num, hidden_dim,
output_size):
"""
Simply use linear layers for multi-variate single-step forecasting.
"""
super().__init__()
self.fc1 = nn.Linear(input_size * input_feature_num, hidden_dim)
self.dropout = nn.Dropout(dropout)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(hidden_dim, output_size)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
cabuliwallah/analytics-zoo
|
CustomizedNet
| false
| 6,375
|
[
"Apache-2.0"
] | 1
|
5e662bd01c5fc7eed412973119594cf2ecea8b11
|
https://github.com/cabuliwallah/analytics-zoo/tree/5e662bd01c5fc7eed412973119594cf2ecea8b11
|
Policy
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Policy(nn.Module):
"""
implements both actor and critic in one model
"""
def __init__(self):
super(Policy, self).__init__()
self.affine1 = nn.Linear(4, 128)
self.action_head = nn.Linear(128, 2)
self.value_head = nn.Linear(128, 1)
self.saved_actions = []
self.rewards = []
def forward(self, x):
"""
forward of both actor and critic
"""
x = F.relu(self.affine1(x))
action_prob = F.softmax(self.action_head(x), dim=-1)
state_values = self.value_head(x)
return action_prob, state_values
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 2
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 2 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 2 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp1 - tmp3
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp2 - tmp3
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tmp5 / tmp10
tl.store(out_ptr0 + x2, tmp11, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (128, 4), (4, 1))
assert_size_stride(primals_2, (128,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (2, 128), (128, 1))
assert_size_stride(primals_5, (2,), (1,))
assert_size_stride(primals_6, (1, 128), (128, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf0
buf6 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf1,
primals_2, buf6, 8192, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 128),
(128, 1), 0), reinterpret_tensor(primals_4, (128, 2), (1, 128),
0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
triton_poi_fused__softmax_1[grid(128)](buf2, buf3, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del buf2
buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 128),
(128, 1), 0), reinterpret_tensor(primals_6, (128, 1), (1, 128),
0), alpha=1, beta=1, out=buf5)
del primals_7
return buf3, reinterpret_tensor(buf5, (4, 4, 4, 1), (16, 4, 1, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 128), (128, 1), 0
), buf3, primals_6, primals_4, buf6
class PolicyNew(nn.Module):
"""
implements both actor and critic in one model
"""
def __init__(self):
super(PolicyNew, self).__init__()
self.affine1 = nn.Linear(4, 128)
self.action_head = nn.Linear(128, 2)
self.value_head = nn.Linear(128, 1)
self.saved_actions = []
self.rewards = []
def forward(self, input_0):
primals_1 = self.affine1.weight
primals_2 = self.affine1.bias
primals_4 = self.action_head.weight
primals_5 = self.action_head.bias
primals_6 = self.value_head.weight
primals_7 = self.value_head.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0], output[1]
|
caimingxue/Reinforcement-Learning
|
Policy
| false
| 6,376
|
[
"MIT"
] | 1
|
5ccb8a6a25b41526f4d6195e69964245abc46d38
|
https://github.com/caimingxue/Reinforcement-Learning/tree/5ccb8a6a25b41526f4d6195e69964245abc46d38
|
Decoder
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class RC(nn.Module):
"""
A wrapper class for ReflectionPad2d, Conv2d and an optional relu
"""
def __init__(self, in_dim, out_dim, kernel_size=3, padding=1,
activation_function=True):
super().__init__()
self.pad = nn.ReflectionPad2d((padding, padding, padding, padding))
self.conv = nn.Conv2d(in_dim, out_dim, kernel_size)
self.activation_function = activation_function
def forward(self, x):
x = self.pad(x)
x = self.conv(x)
return F.relu(x) if self.activation_function else x
class Decoder(nn.Module):
def __init__(self):
super(Decoder, self).__init__()
self.rc1 = RC(512, 256, 3, 1)
self.upsample1 = nn.Upsample(scale_factor=2, mode='nearest')
self.rc2 = RC(256, 256, 3, 1)
self.rc3 = RC(256, 256, 3, 1)
self.rc4 = RC(256, 256, 3, 1)
self.rc5 = RC(256, 128, 3, 1)
self.upsample2 = nn.Upsample(scale_factor=2, mode='nearest')
self.rc6 = RC(128, 128, 3, 1)
self.rc7 = RC(128, 64, 3, 1)
self.upsample3 = nn.Upsample(scale_factor=2, mode='nearest')
self.rc8 = RC(64, 64, 3, 1)
self.rc9 = RC(64, 3, 3, 1, False)
def forward(self, x):
x = self.rc1(x)
x = self.upsample1(x)
x = self.rc2(x)
x = self.rc3(x)
x = self.rc4(x)
x = self.rc5(x)
x = self.upsample2(x)
x = self.rc6(x)
x = self.rc7(x)
x = self.upsample3(x)
x = self.rc8(x)
x = self.rc9(x)
return x
def get_inputs():
return [torch.rand([4, 512, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), None,
eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_1(out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_2(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 10 % 10
x0 = xindex % 10
x4 = xindex // 100
x2 = xindex // 100 % 256
x7 = xindex
tmp0 = tl.load(in_ptr0 + (7 + -1 * tl_math.abs(-7 + tl_math.abs(-1 + x1
))), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (7 + -1 * tl_math.abs(-7 + tl_math.abs(-1 + x0
))), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + x2, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (tmp8 + 4 * tmp4 + 16 * x4), None,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x7, tmp13, None)
@triton.jit
def triton_poi_fused_convolution_reflection_pad2d_relu_3(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 10
x1 = xindex // 10 % 10
x4 = xindex // 100
x2 = xindex // 100 % 256
x5 = xindex
tmp0 = tl.load(in_ptr0 + (63 + -1 * tl_math.abs(-7 + tl_math.abs(-1 +
x0)) + -8 * tl_math.abs(-7 + tl_math.abs(-1 + x1)) + 64 * x4), None,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + x5, tmp4, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_4(out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_5(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 18 % 18
x0 = xindex % 18
x4 = xindex // 324
x2 = xindex // 324 % 128
x7 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-15 + tl_math.abs(-1 +
x1))), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-15 + tl_math.abs(-1 +
x0))), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + x2, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 8, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (tmp8 + 8 * tmp4 + 64 * x4), None,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x7, tmp13, None)
@triton.jit
def triton_poi_fused_convolution_reflection_pad2d_relu_6(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 18
x1 = xindex // 18 % 18
x4 = xindex // 324
x2 = xindex // 324 % 128
x5 = xindex
tmp0 = tl.load(in_ptr0 + (255 + -1 * tl_math.abs(-15 + tl_math.abs(-1 +
x0)) + -16 * tl_math.abs(-15 + tl_math.abs(-1 + x1)) + 256 * x4),
None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + x5, tmp4, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_7(out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_8(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 295936
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 34 % 34
x0 = xindex % 34
x4 = xindex // 1156
x2 = xindex // 1156 % 64
x7 = xindex
tmp0 = tl.load(in_ptr0 + (31 + -1 * tl_math.abs(-31 + tl_math.abs(-1 +
x1))), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (31 + -1 * tl_math.abs(-31 + tl_math.abs(-1 +
x0))), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 16, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (tmp8 + 16 * tmp4 + 256 * x4), xmask,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x7, tmp13, xmask)
@triton.jit
def triton_poi_fused_convolution_reflection_pad2d_relu_9(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 295936
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 34
x1 = xindex // 34 % 34
x4 = xindex // 1156
x2 = xindex // 1156 % 64
x5 = xindex
tmp0 = tl.load(in_ptr0 + (1023 + -1 * tl_math.abs(-31 + tl_math.abs(-1 +
x0)) + -32 * tl_math.abs(-31 + tl_math.abs(-1 + x1)) + 1024 * x4),
xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + x5, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 3
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_11(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_12(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_13(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 128
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_14(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 128
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_15(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 256
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_16(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 256
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19) = args
args.clear()
assert_size_stride(primals_1, (4, 512, 4, 4), (8192, 16, 4, 1))
assert_size_stride(primals_2, (256, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_3, (256,), (1,))
assert_size_stride(primals_4, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_7, (256,), (1,))
assert_size_stride(primals_8, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_9, (256,), (1,))
assert_size_stride(primals_10, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_11, (128,), (1,))
assert_size_stride(primals_12, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_13, (128,), (1,))
assert_size_stride(primals_14, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_15, (64,), (1,))
assert_size_stride(primals_16, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_17, (64,), (1,))
assert_size_stride(primals_18, (3, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_19, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 512, 6, 6), (18432, 36, 6, 1), torch.
float32)
get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0[grid(73728)](primals_1, buf0,
73728, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 256, 4, 4), (4096, 16, 4, 1))
buf2 = empty_strided_cuda((8,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_1[grid(8)](buf2, 8, XBLOCK
=8, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 256, 10, 10), (25600, 100, 10, 1),
torch.float32)
triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_2[grid
(102400)](buf2, buf1, primals_3, buf3, 102400, XBLOCK=512,
num_warps=8, num_stages=1)
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 256, 8, 8), (16384, 64, 8, 1))
buf5 = empty_strided_cuda((4, 256, 10, 10), (25600, 100, 10, 1),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_3[grid(102400)](buf4
, primals_5, buf5, 102400, XBLOCK=512, num_warps=8, num_stages=1)
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 256, 8, 8), (16384, 64, 8, 1))
buf7 = empty_strided_cuda((4, 256, 10, 10), (25600, 100, 10, 1),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_3[grid(102400)](buf6
, primals_7, buf7, 102400, XBLOCK=512, num_warps=8, num_stages=1)
buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 256, 8, 8), (16384, 64, 8, 1))
buf9 = empty_strided_cuda((4, 256, 10, 10), (25600, 100, 10, 1),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_3[grid(102400)](buf8
, primals_9, buf9, 102400, XBLOCK=512, num_warps=8, num_stages=1)
buf10 = extern_kernels.convolution(buf9, primals_10, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 128, 8, 8), (8192, 64, 8, 1))
buf11 = empty_strided_cuda((16,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_4[grid(16)](buf11, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf12 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1),
torch.float32)
triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_5[grid
(165888)](buf11, buf10, primals_11, buf12, 165888, XBLOCK=1024,
num_warps=4, num_stages=1)
buf13 = extern_kernels.convolution(buf12, primals_12, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 128, 16, 16), (32768, 256, 16, 1))
buf14 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_6[grid(165888)](
buf13, primals_13, buf14, 165888, XBLOCK=512, num_warps=8,
num_stages=1)
buf15 = extern_kernels.convolution(buf14, primals_14, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 64, 16, 16), (16384, 256, 16, 1))
buf16 = empty_strided_cuda((32,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_7[grid(32)](buf16, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf17 = empty_strided_cuda((4, 64, 34, 34), (73984, 1156, 34, 1),
torch.float32)
triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_8[grid
(295936)](buf16, buf15, primals_15, buf17, 295936, XBLOCK=1024,
num_warps=4, num_stages=1)
buf18 = extern_kernels.convolution(buf17, primals_16, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf19 = empty_strided_cuda((4, 64, 34, 34), (73984, 1156, 34, 1),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_9[grid(295936)](
buf18, primals_17, buf19, 295936, XBLOCK=512, num_warps=8,
num_stages=1)
buf20 = extern_kernels.convolution(buf19, primals_18, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 3, 32, 32), (3072, 1024, 32, 1))
buf21 = buf20
del buf20
triton_poi_fused_convolution_10[grid(12288)](buf21, primals_19,
12288, XBLOCK=256, num_warps=4, num_stages=1)
del primals_19
buf22 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_11[grid(262144)](
buf18, primals_17, buf22, 262144, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf18
del primals_17
buf23 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_12[grid(65536)](
buf15, primals_15, buf23, 65536, XBLOCK=256, num_warps=4,
num_stages=1)
del buf15
del primals_15
buf24 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_13[grid(131072)](
buf13, primals_13, buf24, 131072, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf13
del primals_13
buf25 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.bool
)
triton_poi_fused_convolution_relu_threshold_backward_14[grid(32768)](
buf10, primals_11, buf25, 32768, XBLOCK=256, num_warps=4,
num_stages=1)
del buf10
del primals_11
buf26 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.bool)
triton_poi_fused_convolution_relu_threshold_backward_15[grid(65536)](
buf8, primals_9, buf26, 65536, XBLOCK=512, num_warps=4,
num_stages=1)
del buf8
del primals_9
buf27 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.bool)
triton_poi_fused_convolution_relu_threshold_backward_15[grid(65536)](
buf6, primals_7, buf27, 65536, XBLOCK=512, num_warps=4,
num_stages=1)
del buf6
del primals_7
buf28 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.bool)
triton_poi_fused_convolution_relu_threshold_backward_15[grid(65536)](
buf4, primals_5, buf28, 65536, XBLOCK=512, num_warps=4,
num_stages=1)
del buf4
del primals_5
buf29 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.bool
)
triton_poi_fused_convolution_relu_threshold_backward_16[grid(16384)](
buf1, primals_3, buf29, 16384, XBLOCK=128, num_warps=4,
num_stages=1)
del buf1
del primals_3
return (buf21, primals_2, primals_4, primals_6, primals_8, primals_10,
primals_12, primals_14, primals_16, primals_18, buf0, buf2, buf3,
buf5, buf7, buf9, buf11, buf12, buf14, buf16, buf17, buf19, buf22,
buf23, buf24, buf25, buf26, buf27, buf28, buf29)
class RC(nn.Module):
"""
A wrapper class for ReflectionPad2d, Conv2d and an optional relu
"""
def __init__(self, in_dim, out_dim, kernel_size=3, padding=1,
activation_function=True):
super().__init__()
self.pad = nn.ReflectionPad2d((padding, padding, padding, padding))
self.conv = nn.Conv2d(in_dim, out_dim, kernel_size)
self.activation_function = activation_function
def forward(self, x):
x = self.pad(x)
x = self.conv(x)
return F.relu(x) if self.activation_function else x
class DecoderNew(nn.Module):
def __init__(self):
super(DecoderNew, self).__init__()
self.rc1 = RC(512, 256, 3, 1)
self.upsample1 = nn.Upsample(scale_factor=2, mode='nearest')
self.rc2 = RC(256, 256, 3, 1)
self.rc3 = RC(256, 256, 3, 1)
self.rc4 = RC(256, 256, 3, 1)
self.rc5 = RC(256, 128, 3, 1)
self.upsample2 = nn.Upsample(scale_factor=2, mode='nearest')
self.rc6 = RC(128, 128, 3, 1)
self.rc7 = RC(128, 64, 3, 1)
self.upsample3 = nn.Upsample(scale_factor=2, mode='nearest')
self.rc8 = RC(64, 64, 3, 1)
self.rc9 = RC(64, 3, 3, 1, False)
def forward(self, input_0):
primals_2 = self.rc1.conv.weight
primals_3 = self.rc1.conv.bias
primals_4 = self.rc2.conv.weight
primals_5 = self.rc2.conv.bias
primals_6 = self.rc3.conv.weight
primals_7 = self.rc3.conv.bias
primals_8 = self.rc4.conv.weight
primals_9 = self.rc4.conv.bias
primals_10 = self.rc5.conv.weight
primals_11 = self.rc5.conv.bias
primals_12 = self.rc6.conv.weight
primals_13 = self.rc6.conv.bias
primals_14 = self.rc7.conv.weight
primals_15 = self.rc7.conv.bias
primals_16 = self.rc8.conv.weight
primals_17 = self.rc8.conv.bias
primals_18 = self.rc9.conv.weight
primals_19 = self.rc9.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19])
return output[0]
|
benningtonlee7/AdaIn_Style_Transfer_From_Scratch_In_Pytorch
|
Decoder
| false
| 6,377
|
[
"MIT"
] | 1
|
50dfe4bdcbcdd0f4e647f9ee45de2a3f81eb6722
|
https://github.com/benningtonlee7/AdaIn_Style_Transfer_From_Scratch_In_Pytorch/tree/50dfe4bdcbcdd0f4e647f9ee45de2a3f81eb6722
|
DurationPredictorLoss
|
import torch
class DurationPredictorLoss(torch.nn.Module):
"""Loss function module for duration predictor.
The loss value is Calculated in log domain to make it Gaussian.
"""
def __init__(self, offset=1.0):
"""Initilize duration predictor loss module.
Args:
offset (float, optional): Offset value to avoid nan in log domain.
"""
super(DurationPredictorLoss, self).__init__()
self.criterion = torch.nn.MSELoss()
self.offset = offset
def forward(self, outputs, targets):
"""Calculate forward propagation.
Args:
outputs (Tensor): Batch of prediction durations in log domain (B, T)
targets (LongTensor): Batch of groundtruth durations in linear domain (B, T)
Returns:
Tensor: Mean squared error loss value.
Note:
`outputs` is in log domain but `targets` is in linear domain.
"""
targets = torch.log(targets.float() + self.offset)
loss = self.criterion(outputs, targets)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_log_mse_loss_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp4 = tl_math.log(tmp3)
tmp5 = tmp0 - tmp4
tmp6 = tmp5 * tmp5
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = 256.0
tmp11 = tmp9 / tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp11, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_log_mse_loss_0[grid(1)](buf1, arg1_1, arg0_1,
1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class DurationPredictorLossNew(torch.nn.Module):
"""Loss function module for duration predictor.
The loss value is Calculated in log domain to make it Gaussian.
"""
def __init__(self, offset=1.0):
"""Initilize duration predictor loss module.
Args:
offset (float, optional): Offset value to avoid nan in log domain.
"""
super(DurationPredictorLossNew, self).__init__()
self.criterion = torch.nn.MSELoss()
self.offset = offset
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
carankt/FastSpeech2-1
|
DurationPredictorLoss
| false
| 6,378
|
[
"Apache-2.0"
] | 1
|
42c06e4fbdf741a0719154d1cb4617b7d3f15a5c
|
https://github.com/carankt/FastSpeech2-1/tree/42c06e4fbdf741a0719154d1cb4617b7d3f15a5c
|
MessageNorm
|
import torch
from torch import Tensor
import torch.nn.functional as F
from torch.nn import Parameter
import torch.fx
import torch.utils.data
from inspect import Parameter
from torch.nn.parameter import Parameter
class MessageNorm(torch.nn.Module):
"""Applies message normalization over the aggregated messages as described
in the `"DeeperGCNs: All You Need to Train Deeper GCNs"
<https://arxiv.org/abs/2006.07739>`_ paper
.. math::
\\mathbf{x}_i^{\\prime} = \\mathrm{MLP} \\left( \\mathbf{x}_{i} + s \\cdot
{\\| \\mathbf{x}_i \\|}_2 \\cdot
\\frac{\\mathbf{m}_{i}}{{\\|\\mathbf{m}_i\\|}_2} \\right)
Args:
learn_scale (bool, optional): If set to :obj:`True`, will learn the
scaling factor :math:`s` of message normalization.
(default: :obj:`False`)
"""
def __init__(self, learn_scale: 'bool'=False):
super(MessageNorm, self).__init__()
self.scale = Parameter(torch.Tensor([1.0]), requires_grad=learn_scale)
def reset_parameters(self):
self.scale.data.fill_(1.0)
def forward(self, x: 'Tensor', msg: 'Tensor', p: 'int'=2):
""""""
msg = F.normalize(msg, p=p, dim=-1)
x_norm = x.norm(p=p, dim=-1, keepdim=True)
return msg * x_norm * self.scale
def __repr__(self):
return '{}(learn_scale={})'.format(self.__class__.__name__, self.
scale.requires_grad)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch.nn import Parameter
import torch.fx
import torch.utils.data
from inspect import Parameter
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_linalg_vector_norm_mul_0(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp24 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp29 = tl.load(in_ptr2 + 0)
tmp30 = tl.broadcast_to(tmp29, [XBLOCK])
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tmp17 = tmp16 * tmp16
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp25 = tmp24 * tmp24
tmp26 = tmp23 + tmp25
tmp27 = libdevice.sqrt(tmp26)
tmp28 = tmp15 * tmp27
tmp31 = tmp28 * tmp30
tl.store(in_out_ptr0 + x2, tmp31, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_div_linalg_vector_norm_mul_0[grid(256)](buf1,
arg0_1, arg1_1, arg2_1, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf1,
class MessageNormNew(torch.nn.Module):
"""Applies message normalization over the aggregated messages as described
in the `"DeeperGCNs: All You Need to Train Deeper GCNs"
<https://arxiv.org/abs/2006.07739>`_ paper
.. math::
\\mathbf{x}_i^{\\prime} = \\mathrm{MLP} \\left( \\mathbf{x}_{i} + s \\cdot
{\\| \\mathbf{x}_i \\|}_2 \\cdot
\\frac{\\mathbf{m}_{i}}{{\\|\\mathbf{m}_i\\|}_2} \\right)
Args:
learn_scale (bool, optional): If set to :obj:`True`, will learn the
scaling factor :math:`s` of message normalization.
(default: :obj:`False`)
"""
def __init__(self, learn_scale: 'bool'=False):
super(MessageNormNew, self).__init__()
self.scale = Parameter(torch.Tensor([1.0]), requires_grad=learn_scale)
def reset_parameters(self):
self.scale.data.fill_(1.0)
def __repr__(self):
return '{}(learn_scale={})'.format(self.__class__.__name__, self.
scale.requires_grad)
def forward(self, input_0, input_1):
arg2_1 = self.scale
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
camus1337/pytorch_geometric
|
MessageNorm
| false
| 6,379
|
[
"MIT"
] | 1
|
38514197a327541eb47abb69d4ab224910852605
|
https://github.com/camus1337/pytorch_geometric/tree/38514197a327541eb47abb69d4ab224910852605
|
PGNetwork
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class PGNetwork(nn.Module):
def __init__(self, state_dim, action_dim):
super(PGNetwork, self).__init__()
self.fc1 = nn.Linear(state_dim, 20)
self.fc2 = nn.Linear(20, action_dim)
def forward(self, x):
out = F.relu(self.fc1(x))
out = self.fc2(out)
return out
def initialize_weights(self):
for m in self.modules():
nn.init.normal_(m.weight.data, 0, 0.1)
nn.init.constant_(m.bias.data, 0.01)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4, 'action_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1280
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 20
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (20, 4), (4, 1))
assert_size_stride(primals_2, (20,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 20), (20, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 20), (20, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 20), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 20), (320, 80, 20, 1), 0)
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 20), (320, 80, 20, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(1280)](buf1,
primals_2, buf3, 1280, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 20),
(20, 1), 0), reinterpret_tensor(primals_4, (20, 4), (1, 20), 0),
alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 20), (20, 1), 0), primals_4, buf3
class PGNetworkNew(nn.Module):
def __init__(self, state_dim, action_dim):
super(PGNetworkNew, self).__init__()
self.fc1 = nn.Linear(state_dim, 20)
self.fc2 = nn.Linear(20, action_dim)
def initialize_weights(self):
for m in self.modules():
nn.init.normal_(m.weight.data, 0, 0.1)
nn.init.constant_(m.bias.data, 0.01)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
caimingxue/Reinforcement-Learning
|
PGNetwork
| false
| 6,380
|
[
"MIT"
] | 1
|
5ccb8a6a25b41526f4d6195e69964245abc46d38
|
https://github.com/caimingxue/Reinforcement-Learning/tree/5ccb8a6a25b41526f4d6195e69964245abc46d38
|
LayerNorm
|
import torch
class LayerNorm(torch.nn.Module):
def __init__(self, nout: 'int'):
super(LayerNorm, self).__init__()
self.layer_norm = torch.nn.LayerNorm(nout, eps=1e-12)
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
x = self.layer_norm(x.transpose(1, -1))
x = x.transpose(1, -1)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'nout': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-12
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x2, tmp8, xmask)
tl.store(out_ptr1 + x2, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.
constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x3 = xindex
y2 = yindex // 16
y4 = yindex % 16
y5 = yindex
y0 = yindex % 4
y1 = yindex // 4 % 4
tmp0 = tl.load(in_ptr0 + (y4 + 16 * x3 + 64 * y2), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y5, ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + y5, ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x3, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x3, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x3 + 4 * y1 + 16 * y0 + 64 * y2), tmp8, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(64)](primals_1, buf0,
buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(64, 4)](primals_1, buf0,
buf1, primals_2, primals_3, buf2, 64, 4, XBLOCK=4, YBLOCK=32,
num_warps=4, num_stages=1)
del buf0
del buf1
del primals_2
del primals_3
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 1, 4, 16), 0), primals_1
class LayerNormNew(torch.nn.Module):
def __init__(self, nout: 'int'):
super(LayerNormNew, self).__init__()
self.layer_norm = torch.nn.LayerNorm(nout, eps=1e-12)
def forward(self, input_0):
primals_2 = self.layer_norm.weight
primals_3 = self.layer_norm.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
carankt/FastSpeech2-1
|
LayerNorm
| false
| 6,381
|
[
"Apache-2.0"
] | 1
|
42c06e4fbdf741a0719154d1cb4617b7d3f15a5c
|
https://github.com/carankt/FastSpeech2-1/tree/42c06e4fbdf741a0719154d1cb4617b7d3f15a5c
|
LayerNorm
|
import torch
from torch import Tensor
from torch.nn import Parameter
from torch.nn import LayerNorm
from typing import Optional
import torch.fx
from typing import Any
import torch.utils.data
from inspect import Parameter
from torch.nn.parameter import Parameter
def maybe_num_nodes(edge_index, num_nodes=None):
if num_nodes is not None:
return num_nodes
elif isinstance(edge_index, Tensor):
return int(edge_index.max()) + 1 if edge_index.numel() > 0 else 0
else:
return max(edge_index.size(0), edge_index.size(1))
def constant(value: 'Any', fill_value: 'float'):
if isinstance(value, Tensor):
value.data.fill_(fill_value)
else:
for v in (value.parameters() if hasattr(value, 'parameters') else []):
constant(v, fill_value)
for v in (value.buffers() if hasattr(value, 'buffers') else []):
constant(v, fill_value)
def zeros(value: 'Any'):
constant(value, 0.0)
def ones(tensor: 'Any'):
constant(tensor, 1.0)
def degree(index, num_nodes: 'Optional[int]'=None, dtype: 'Optional[int]'=None
):
"""Computes the (unweighted) degree of a given one-dimensional index
tensor.
Args:
index (LongTensor): Index tensor.
num_nodes (int, optional): The number of nodes, *i.e.*
:obj:`max_val + 1` of :attr:`index`. (default: :obj:`None`)
dtype (:obj:`torch.dtype`, optional): The desired data type of the
returned tensor.
:rtype: :class:`Tensor`
"""
N = maybe_num_nodes(index, num_nodes)
out = torch.zeros((N,), dtype=dtype, device=index.device)
one = torch.ones((index.size(0),), dtype=out.dtype, device=out.device)
return out.scatter_add_(0, index, one)
class LayerNorm(torch.nn.Module):
"""Applies layer normalization over each individual example in a batch
of node features as described in the `"Layer Normalization"
<https://arxiv.org/abs/1607.06450>`_ paper
.. math::
\\mathbf{x}^{\\prime}_i = \\frac{\\mathbf{x} -
\\textrm{E}[\\mathbf{x}]}{\\sqrt{\\textrm{Var}[\\mathbf{x}] + \\epsilon}}
\\odot \\gamma + \\beta
The mean and standard-deviation are calculated across all nodes and all
node channels separately for each object in a mini-batch.
Args:
in_channels (int): Size of each input sample.
eps (float, optional): A value added to the denominator for numerical
stability. (default: :obj:`1e-5`)
affine (bool, optional): If set to :obj:`True`, this module has
learnable affine parameters :math:`\\gamma` and :math:`\\beta`.
(default: :obj:`True`)
"""
def __init__(self, in_channels, eps=1e-05, affine=True):
super(LayerNorm, self).__init__()
self.in_channels = in_channels
self.eps = eps
if affine:
self.weight = Parameter(torch.Tensor([in_channels]))
self.bias = Parameter(torch.Tensor([in_channels]))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
ones(self.weight)
zeros(self.bias)
def forward(self, x: 'Tensor', batch: 'OptTensor'=None) ->Tensor:
""""""
if batch is None:
x = x - x.mean()
out = x / (x.std(unbiased=False) + self.eps)
else:
batch_size = int(batch.max()) + 1
norm = degree(batch, batch_size, dtype=x.dtype).clamp_(min=1)
norm = norm.mul_(x.size(-1)).view(-1, 1)
mean = scatter(x, batch, dim=0, dim_size=batch_size, reduce='add'
).sum(dim=-1, keepdim=True) / norm
x = x - mean[batch]
var = scatter(x * x, batch, dim=0, dim_size=batch_size, reduce=
'add').sum(dim=-1, keepdim=True)
var = var / norm
out = x / (var + self.eps).sqrt()[batch]
if self.weight is not None and self.bias is not None:
out = out * self.weight + self.bias
return out
def __repr__(self):
return f'{self.__class__.__name__}({self.in_channels})'
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import Tensor
from torch.nn import Parameter
from typing import Optional
import torch.fx
from typing import Any
import torch.utils.data
from inspect import Parameter
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mean_mul_std_sub_0(in_out_ptr0, in_out_ptr1,
in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp25 = tl.load(in_ptr1 + 0)
tmp26 = tl.broadcast_to(tmp25, [RBLOCK])
tmp28 = tl.load(in_ptr2 + 0)
tmp29 = tl.broadcast_to(tmp28, [RBLOCK])
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0))
tmp4 = 256.0
tmp5 = tmp3 / tmp4
tmp6 = tmp0 - tmp5
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = tl.broadcast_to(tmp7, [RBLOCK])
tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0))
tmp12 = tl.full([1], 256, tl.int32)
tmp13 = tmp12.to(tl.float32)
tmp14 = tmp11 / tmp13
tmp15 = tmp7 - tmp14
tmp16 = tmp15 * tmp15
tmp17 = tl.broadcast_to(tmp16, [RBLOCK])
tmp19 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0))
tmp20 = tmp19 / tmp4
tmp21 = libdevice.sqrt(tmp20)
tmp22 = 1e-05
tmp23 = tmp21 + tmp22
tmp24 = tmp6 / tmp23
tmp27 = tmp24 * tmp26
tmp30 = tmp27 + tmp29
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp5, None)
tl.debug_barrier()
tl.store(in_out_ptr1 + tl.full([1], 0, tl.int32), tmp23, None)
tl.store(out_ptr0 + tl.broadcast_to(r0, [RBLOCK]), tmp30, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
buf3 = empty_strided_cuda((), (), torch.float32)
buf5 = buf3
del buf3
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_add_div_mean_mul_std_sub_0[grid(1)](buf1, buf5,
primals_1, primals_2, primals_3, buf6, 1, 256, num_warps=2,
num_stages=1)
del primals_2
del primals_3
return buf6, primals_1, buf1, buf5
def maybe_num_nodes(edge_index, num_nodes=None):
if num_nodes is not None:
return num_nodes
elif isinstance(edge_index, Tensor):
return int(edge_index.max()) + 1 if edge_index.numel() > 0 else 0
else:
return max(edge_index.size(0), edge_index.size(1))
def constant(value: 'Any', fill_value: 'float'):
if isinstance(value, Tensor):
value.data.fill_(fill_value)
else:
for v in (value.parameters() if hasattr(value, 'parameters') else []):
constant(v, fill_value)
for v in (value.buffers() if hasattr(value, 'buffers') else []):
constant(v, fill_value)
def zeros(value: 'Any'):
constant(value, 0.0)
def ones(tensor: 'Any'):
constant(tensor, 1.0)
def degree(index, num_nodes: 'Optional[int]'=None, dtype: 'Optional[int]'=None
):
"""Computes the (unweighted) degree of a given one-dimensional index
tensor.
Args:
index (LongTensor): Index tensor.
num_nodes (int, optional): The number of nodes, *i.e.*
:obj:`max_val + 1` of :attr:`index`. (default: :obj:`None`)
dtype (:obj:`torch.dtype`, optional): The desired data type of the
returned tensor.
:rtype: :class:`Tensor`
"""
N = maybe_num_nodes(index, num_nodes)
out = torch.zeros((N,), dtype=dtype, device=index.device)
one = torch.ones((index.size(0),), dtype=out.dtype, device=out.device)
return out.scatter_add_(0, index, one)
class LayerNormNew(torch.nn.Module):
"""Applies layer normalization over each individual example in a batch
of node features as described in the `"Layer Normalization"
<https://arxiv.org/abs/1607.06450>`_ paper
.. math::
\\mathbf{x}^{\\prime}_i = \\frac{\\mathbf{x} -
\\textrm{E}[\\mathbf{x}]}{\\sqrt{\\textrm{Var}[\\mathbf{x}] + \\epsilon}}
\\odot \\gamma + \\beta
The mean and standard-deviation are calculated across all nodes and all
node channels separately for each object in a mini-batch.
Args:
in_channels (int): Size of each input sample.
eps (float, optional): A value added to the denominator for numerical
stability. (default: :obj:`1e-5`)
affine (bool, optional): If set to :obj:`True`, this module has
learnable affine parameters :math:`\\gamma` and :math:`\\beta`.
(default: :obj:`True`)
"""
def __init__(self, in_channels, eps=1e-05, affine=True):
super(LayerNormNew, self).__init__()
self.in_channels = in_channels
self.eps = eps
if affine:
self.weight = Parameter(torch.Tensor([in_channels]))
self.bias = Parameter(torch.Tensor([in_channels]))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
ones(self.weight)
zeros(self.bias)
def __repr__(self):
return f'{self.__class__.__name__}({self.in_channels})'
def forward(self, input_0):
primals_2 = self.weight
primals_3 = self.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
camus1337/pytorch_geometric
|
LayerNorm
| false
| 6,382
|
[
"MIT"
] | 1
|
38514197a327541eb47abb69d4ab224910852605
|
https://github.com/camus1337/pytorch_geometric/tree/38514197a327541eb47abb69d4ab224910852605
|
MultiLayeredConv1d
|
import torch
class MultiLayeredConv1d(torch.nn.Module):
"""Multi-layered conv1d for Transformer block.
This is a module of multi-leyered conv1d designed to replace positionwise feed-forward network
in Transforner block, which is introduced in `FastSpeech: Fast, Robust and Controllable Text to Speech`_.
Args:
in_chans (int): Number of input channels.
hidden_chans (int): Number of hidden channels.
kernel_size (int): Kernel size of conv1d.
dropout_rate (float): Dropout rate.
.. _`FastSpeech: Fast, Robust and Controllable Text to Speech`:
https://arxiv.org/pdf/1905.09263.pdf
"""
def __init__(self, in_chans: 'int', hidden_chans: 'int', kernel_size:
'int', dropout_rate: 'float'):
super(MultiLayeredConv1d, self).__init__()
self.w_1 = torch.nn.Conv1d(in_chans, hidden_chans, kernel_size,
stride=1, padding=(kernel_size - 1) // 2)
self.w_2 = torch.nn.Conv1d(hidden_chans, in_chans, 1, stride=1,
padding=(1 - 1) // 2)
self.dropout = torch.nn.Dropout(dropout_rate)
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
"""Calculate forward propagation.
Args:
x (Tensor): Batch of input tensors (B, *, in_chans).
Returns:
Tensor: Batch of output tensors (B, *, hidden_chans)
"""
x = torch.relu(self.w_1(x.transpose(-1, 1))).transpose(-1, 1)
return self.w_2(self.dropout(x).transpose(-1, 1)).transpose(-1, 1)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_chans': 4, 'hidden_chans': 4, 'kernel_size': 4,
'dropout_rate': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 12
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 3
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 12
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 3
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1,
4, 4), (16, 4, 1), 0), primals_2, stride=(1,), padding=(1,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf0, (1, 4, 3), (12, 3, 1))
buf1 = reinterpret_tensor(buf0, (4, 3), (3, 1), 0)
del buf0
buf4 = empty_strided_cuda((4, 3), (3, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(12)](buf1,
primals_3, buf4, 12, XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (1, 4, 3
), (0, 3, 1), 0), primals_4, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf2, (1, 4, 3), (12, 3, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_1[grid(12)](buf3, primals_5, 12,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_5
return reinterpret_tensor(buf3, (4, 3), (3, 1), 0
), primals_2, primals_4, reinterpret_tensor(primals_1, (1, 4, 4), (
16, 4, 1), 0), reinterpret_tensor(buf1, (1, 4, 3), (12, 3, 1), 0), buf4
class MultiLayeredConv1dNew(torch.nn.Module):
"""Multi-layered conv1d for Transformer block.
This is a module of multi-leyered conv1d designed to replace positionwise feed-forward network
in Transforner block, which is introduced in `FastSpeech: Fast, Robust and Controllable Text to Speech`_.
Args:
in_chans (int): Number of input channels.
hidden_chans (int): Number of hidden channels.
kernel_size (int): Kernel size of conv1d.
dropout_rate (float): Dropout rate.
.. _`FastSpeech: Fast, Robust and Controllable Text to Speech`:
https://arxiv.org/pdf/1905.09263.pdf
"""
def __init__(self, in_chans: 'int', hidden_chans: 'int', kernel_size:
'int', dropout_rate: 'float'):
super(MultiLayeredConv1dNew, self).__init__()
self.w_1 = torch.nn.Conv1d(in_chans, hidden_chans, kernel_size,
stride=1, padding=(kernel_size - 1) // 2)
self.w_2 = torch.nn.Conv1d(hidden_chans, in_chans, 1, stride=1,
padding=(1 - 1) // 2)
self.dropout = torch.nn.Dropout(dropout_rate)
def forward(self, input_0):
primals_2 = self.w_1.weight
primals_3 = self.w_1.bias
primals_4 = self.w_2.weight
primals_5 = self.w_2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
carankt/FastSpeech2-1
|
MultiLayeredConv1d
| false
| 6,383
|
[
"Apache-2.0"
] | 1
|
42c06e4fbdf741a0719154d1cb4617b7d3f15a5c
|
https://github.com/carankt/FastSpeech2-1/tree/42c06e4fbdf741a0719154d1cb4617b7d3f15a5c
|
DepthConv2d
|
import torch
import torch.nn as nn
class DepthConv2d(nn.Module):
def __init__(self, input_channel, hidden_channel, kernel, padding,
dilation=1):
super(DepthConv2d, self).__init__()
self.conv2d = nn.Conv2d(input_channel, hidden_channel, 1)
self.padding = padding
self.dconv2d = nn.Conv2d(hidden_channel, hidden_channel, kernel,
dilation=dilation, groups=hidden_channel, padding=self.padding)
self.res_out = nn.Conv2d(hidden_channel, input_channel, 1)
self.nonlinearity1 = nn.PReLU()
self.nonlinearity2 = nn.PReLU()
self.reg1 = nn.GroupNorm(1, hidden_channel, eps=1e-08)
self.reg2 = nn.GroupNorm(1, hidden_channel, eps=1e-08)
def forward(self, input):
output = self.reg1(self.nonlinearity1(self.conv2d(input)))
output = self.reg2(self.nonlinearity2(self.dconv2d(output)))
residual = self.res_out(output)
return residual
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_channel': 4, 'hidden_channel': 4, 'kernel': 4,
'padding': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_convolution_native_group_norm_0(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r3 = rindex
x0 = xindex
r2 = rindex // 16
tmp0 = tl.load(in_out_ptr0 + (r3 + 64 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + r2, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp32 = tl.load(in_ptr2 + r2, None, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr3 + r2, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp7 = tmp6 * tmp2
tmp8 = tl.where(tmp4, tmp2, tmp7)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tl.where(xmask, tmp9, 0)
tmp12 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp14 = tl.where(xmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tmp16 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp17 = tmp16.to(tl.float32)
tmp18 = tmp15 / tmp17
tmp19 = tmp9 - tmp18
tmp20 = tmp19 * tmp19
tmp21 = tl.broadcast_to(tmp20, [XBLOCK, RBLOCK])
tmp23 = tl.where(xmask, tmp21, 0)
tmp24 = tl.sum(tmp23, 1)[:, None]
tmp25 = tmp8 - tmp18
tmp26 = 64.0
tmp27 = tmp24 / tmp26
tmp28 = 1e-08
tmp29 = tmp27 + tmp28
tmp30 = libdevice.rsqrt(tmp29)
tmp31 = tmp25 * tmp30
tmp33 = tmp31 * tmp32
tmp35 = tmp33 + tmp34
tl.store(in_out_ptr0 + (r3 + 64 * x0), tmp2, xmask)
tl.store(out_ptr2 + (r3 + 64 * x0), tmp35, xmask)
tl.store(out_ptr3 + x0, tmp30, xmask)
tl.store(out_ptr0 + x0, tmp18, xmask)
@triton.jit
def triton_per_fused_convolution_native_group_norm_1(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
rnumel = 324
RBLOCK: tl.constexpr = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
rmask = rindex < rnumel
r3 = rindex
x0 = xindex
r2 = rindex // 81
tmp0 = tl.load(in_out_ptr0 + (r3 + 324 * x0), rmask, other=0.0)
tmp1 = tl.load(in_ptr0 + r2, rmask, eviction_policy='evict_last', other=0.0
)
tmp5 = tl.load(in_ptr1 + 0)
tmp6 = tl.broadcast_to(tmp5, [RBLOCK])
tmp32 = tl.load(in_ptr2 + r2, rmask, eviction_policy='evict_last',
other=0.0)
tmp34 = tl.load(in_ptr3 + r2, rmask, eviction_policy='evict_last',
other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp7 = tmp6 * tmp2
tmp8 = tl.where(tmp4, tmp2, tmp7)
tmp9 = tl.broadcast_to(tmp8, [RBLOCK])
tl.where(rmask, tmp9, 0)
tmp12 = tl.broadcast_to(tmp9, [RBLOCK])
tmp14 = tl.where(rmask, tmp12, 0)
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0))
tmp16 = tl.full([1], 324, tl.int32)
tmp17 = tmp16.to(tl.float32)
tmp18 = tmp15 / tmp17
tmp19 = tmp9 - tmp18
tmp20 = tmp19 * tmp19
tmp21 = tl.broadcast_to(tmp20, [RBLOCK])
tmp23 = tl.where(rmask, tmp21, 0)
tmp24 = triton_helpers.promote_to_tensor(tl.sum(tmp23, 0))
tmp25 = tmp8 - tmp18
tmp26 = 324.0
tmp27 = tmp24 / tmp26
tmp28 = 1e-08
tmp29 = tmp27 + tmp28
tmp30 = libdevice.rsqrt(tmp29)
tmp31 = tmp25 * tmp30
tmp33 = tmp31 * tmp32
tmp35 = tmp33 + tmp34
tl.store(in_out_ptr0 + (r3 + 324 * x0), tmp2, rmask)
tl.store(out_ptr2 + (r3 + 324 * x0), tmp35, rmask)
tl.store(out_ptr3 + x0, tmp30, None)
tl.store(out_ptr0 + x0, tmp18, None)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 1296
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 81 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1,), (1,))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (1,), (1,))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
get_raw_stream(0)
triton_per_fused_convolution_native_group_norm_0[grid(4)](buf1,
primals_2, primals_4, primals_5, primals_6, buf2, buf5, buf6, 4,
64, XBLOCK=1, num_warps=2, num_stages=1)
del primals_2
del primals_6
buf7 = extern_kernels.convolution(buf5, primals_7, stride=(1, 1),
padding=(4, 4), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf7, (4, 4, 9, 9), (324, 81, 9, 1))
buf8 = buf7
del buf7
buf9 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf12 = empty_strided_cuda((4, 4, 9, 9), (324, 81, 9, 1), torch.float32
)
buf13 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
triton_per_fused_convolution_native_group_norm_1[grid(4)](buf8,
primals_8, primals_9, primals_10, primals_11, buf9, buf12,
buf13, 4, 324, num_warps=4, num_stages=1)
del primals_11
del primals_8
buf14 = extern_kernels.convolution(buf12, primals_12, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 4, 9, 9), (324, 81, 9, 1))
buf15 = buf14
del buf14
triton_poi_fused_convolution_2[grid(1296)](buf15, primals_13, 1296,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_13
return (buf15, primals_1, primals_3, primals_4, primals_5, primals_7,
primals_9, primals_10, primals_12, buf1, buf5, reinterpret_tensor(
buf2, (4, 1), (1, 1), 0), reinterpret_tensor(buf6, (4, 1), (1, 1),
0), buf8, buf12, reinterpret_tensor(buf9, (4, 1), (1, 1), 0),
reinterpret_tensor(buf13, (4, 1), (1, 1), 0))
class DepthConv2dNew(nn.Module):
def __init__(self, input_channel, hidden_channel, kernel, padding,
dilation=1):
super(DepthConv2dNew, self).__init__()
self.conv2d = nn.Conv2d(input_channel, hidden_channel, 1)
self.padding = padding
self.dconv2d = nn.Conv2d(hidden_channel, hidden_channel, kernel,
dilation=dilation, groups=hidden_channel, padding=self.padding)
self.res_out = nn.Conv2d(hidden_channel, input_channel, 1)
self.nonlinearity1 = nn.PReLU()
self.nonlinearity2 = nn.PReLU()
self.reg1 = nn.GroupNorm(1, hidden_channel, eps=1e-08)
self.reg2 = nn.GroupNorm(1, hidden_channel, eps=1e-08)
def forward(self, input_0):
primals_1 = self.conv2d.weight
primals_2 = self.conv2d.bias
primals_7 = self.dconv2d.weight
primals_5 = self.dconv2d.bias
primals_12 = self.res_out.weight
primals_6 = self.res_out.bias
primals_4 = self.nonlinearity1.weight
primals_9 = self.nonlinearity2.weight
primals_8 = self.reg1.weight
primals_10 = self.reg1.bias
primals_11 = self.reg2.weight
primals_13 = self.reg2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
|
c-ma13/sepTFNet
|
DepthConv2d
| false
| 6,384
|
[
"MIT"
] | 1
|
a06c89c080f9449ac2e5090f80d9645deea7f23a
|
https://github.com/c-ma13/sepTFNet/tree/a06c89c080f9449ac2e5090f80d9645deea7f23a
|
SequenceQuantizerSoftEMA
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.cuda
class SequenceQuantizerSoftEMA(nn.Module):
def __init__(self, codebook_size, d_model, l1_cost=1000, entropy_cost=
5e-05, num_samples=10, temp=1.0, epsilon=1e-05, padding_idx=None):
super(SequenceQuantizerSoftEMA, self).__init__()
self.d_model = d_model
self.codebook_size = codebook_size
self.padding_idx = padding_idx
self.codebook = nn.Parameter(torch.FloatTensor(self.codebook_size,
self.d_model), requires_grad=True)
torch.nn.init.xavier_uniform_(self.codebook)
self.l1_cost = l1_cost
self.entropy_cost = entropy_cost
self.num_samples = num_samples
self.temp = temp
self._epsilon = epsilon
def entropy(self, tensor):
return torch.mean(torch.sum(-1 * torch.matmul(F.log_softmax(tensor,
dim=1), tensor.t()), dim=1))
def forward(self, inputs, l1_cost=None, entropy_cost=None, temp=None):
if l1_cost is None:
l1_cost = self.l1_cost
if entropy_cost is None:
entropy_cost = self.entropy_cost
if temp is None:
temp = self.temp
input_shape = inputs.size()
flat_input = inputs.reshape(-1, self.d_model)
norm_C = self.codebook / self.codebook.norm(2, dim=1)[:, None]
flat_input = flat_input / flat_input.norm(2, dim=1)[:, None]
distances = F.softmax(torch.matmul(flat_input, norm_C.t()), dim=1)
reconstruction = torch.matmul(distances, norm_C).view(input_shape)
l1_loss = nn.L1Loss()
loss = l1_cost * l1_loss(distances, torch.zeros_like(distances)
) + entropy_cost * self.entropy(distances)
return reconstruction, loss
def cluster(self, inputs):
input_shape = inputs.size()
inputs.dim()
flat_input = inputs.reshape(-1, self.d_model)
flat_input = flat_input / flat_input.norm(2, dim=1)[:, None]
codebook = self.codebook / self.codebook.norm(2, dim=1)[:, None]
distances = F.softmax(torch.matmul(flat_input, codebook.t()).
reshape(-1, self.output_nheads, codebook.shape[0]), dim=2)
reconstruction = torch.matmul(distances, codebook).view(input_shape)
encoding_indices = torch.argmax(distances, dim=1).reshape(-1, self.
output_nheads)
return reconstruction, encoding_indices, distances
def set_codebook(self, new_codebook):
self.codebook.copy_(new_codebook)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'codebook_size': 4, 'd_model': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.functional as F
import torch.cuda
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = libdevice.sqrt(tmp10)
tl.store(out_ptr0 + x0, tmp11, xmask)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 / tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_div_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tmp0 / tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_red_fused__softmax_abs_mean_sub_4(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
rnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
_tmp17 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
r1 = rindex // 4
tmp0 = tl.load(in_ptr0 + r2, rmask, eviction_policy='evict_first',
other=0.0)
tmp1 = tl.load(in_ptr0 + 4 * r1, rmask, eviction_policy=
'evict_last', other=0.0)
tmp2 = tl.load(in_ptr0 + (1 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp4 = tl.load(in_ptr0 + (2 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (3 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = 0.0
tmp10 = tmp7 >= tmp9
tmp11 = 1.0
tmp12 = -1.0
tmp13 = tl.where(tmp10, tmp11, tmp12)
tmp14 = tmp0 * tmp13
tmp15 = tl_math.abs(tmp8)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = _tmp17 + tmp16
_tmp17 = tl.where(rmask, tmp18, _tmp17)
tl.store(out_ptr0 + tl.broadcast_to(r2, [XBLOCK, RBLOCK]), tmp8, rmask)
tl.store(out_ptr1 + tl.broadcast_to(r2, [XBLOCK, RBLOCK]), tmp14, rmask
)
tmp17 = tl.sum(_tmp17, 1)[:, None]
tl.store(out_ptr2 + tl.full([XBLOCK, 1], 0, tl.int32), tmp17, None)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = 0.0
tmp17 = tmp15 >= tmp16
tmp18 = 1.0
tmp19 = -1.0
tmp20 = tl.where(tmp17, tmp18, tmp19)
tmp21 = tmp20 * tmp15
tmp22 = tmp8 / tmp21
tl.store(out_ptr0 + x2, tmp22, xmask)
@triton.jit
def triton_poi_fused__log_softmax_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_per_fused_mul_sum_7(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = -1.0
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tl.store(out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_per_fused_abs_add_mean_mul_8(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, 1])
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.sum(tmp1, 1)[:, None]
tmp6 = 256.0
tmp7 = tmp5 / tmp6
tmp8 = 1000.0
tmp9 = tmp7 * tmp8
tmp10 = 64.0
tmp11 = tmp3 / tmp10
tmp12 = 5e-05
tmp13 = tmp11 * tmp12
tmp14 = tmp9 + tmp13
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp14, None)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
get_raw_stream(0)
triton_poi_fused_linalg_vector_norm_0[grid(4)](primals_2, buf0, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_div_1[grid(16)](primals_2, buf0, buf1, 16, XBLOCK=
16, num_warps=1, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
triton_poi_fused_div_2[grid(256)](primals_1, buf2, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_1
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(buf1, (4, 4), (1, 4), 0),
out=buf3)
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(256)](buf3, buf4, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf5 = buf3
del buf3
buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
buf7 = empty_strided_cuda((), (), torch.float32)
triton_red_fused__softmax_abs_mean_sub_4[grid(1)](buf4, buf5, buf8,
buf7, 1, 256, XBLOCK=1, RBLOCK=256, num_warps=2, num_stages=1)
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(buf5, buf1, out=buf6)
buf9 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(256)](buf8, buf4, buf9, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del buf4
buf10 = buf8
del buf8
triton_poi_fused__log_softmax_6[grid(256)](buf9, buf10, 256, XBLOCK
=128, num_warps=4, num_stages=1)
del buf9
buf11 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(buf10, reinterpret_tensor(buf5, (4, 64), (1, 4),
0), out=buf11)
buf12 = empty_strided_cuda((64,), (1,), torch.float32)
triton_per_fused_mul_sum_7[grid(64)](buf11, buf12, 64, 64, XBLOCK=8,
num_warps=4, num_stages=1)
del buf11
buf13 = empty_strided_cuda((), (), torch.float32)
buf14 = buf13
del buf13
triton_per_fused_abs_add_mean_mul_8[grid(1)](buf14, buf12, buf7, 1,
64, XBLOCK=1, num_warps=2, num_stages=1)
del buf12
del buf7
return reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0
), buf14, reinterpret_tensor(buf0, (4, 1), (1, 1), 0
), buf1, buf2, buf5, buf10
class SequenceQuantizerSoftEMANew(nn.Module):
def __init__(self, codebook_size, d_model, l1_cost=1000, entropy_cost=
5e-05, num_samples=10, temp=1.0, epsilon=1e-05, padding_idx=None):
super(SequenceQuantizerSoftEMANew, self).__init__()
self.d_model = d_model
self.codebook_size = codebook_size
self.padding_idx = padding_idx
self.codebook = nn.Parameter(torch.FloatTensor(self.codebook_size,
self.d_model), requires_grad=True)
torch.nn.init.xavier_uniform_(self.codebook)
self.l1_cost = l1_cost
self.entropy_cost = entropy_cost
self.num_samples = num_samples
self.temp = temp
self._epsilon = epsilon
def entropy(self, tensor):
return torch.mean(torch.sum(-1 * torch.matmul(F.log_softmax(tensor,
dim=1), tensor.t()), dim=1))
def cluster(self, inputs):
input_shape = inputs.size()
inputs.dim()
flat_input = inputs.reshape(-1, self.d_model)
flat_input = flat_input / flat_input.norm(2, dim=1)[:, None]
codebook = self.codebook / self.codebook.norm(2, dim=1)[:, None]
distances = F.softmax(torch.matmul(flat_input, codebook.t()).
reshape(-1, self.output_nheads, codebook.shape[0]), dim=2)
reconstruction = torch.matmul(distances, codebook).view(input_shape)
encoding_indices = torch.argmax(distances, dim=1).reshape(-1, self.
output_nheads)
return reconstruction, encoding_indices, distances
def set_codebook(self, new_codebook):
self.codebook.copy_(new_codebook)
def forward(self, input_0):
primals_2 = self.codebook
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0], output[1]
|
brcsomnath/SemAE
|
SequenceQuantizerSoftEMA
| false
| 6,385
|
[
"MIT"
] | 1
|
8da5de73a5b334c6cb0b22eadaaacc35e98126ed
|
https://github.com/brcsomnath/SemAE/tree/8da5de73a5b334c6cb0b22eadaaacc35e98126ed
|
BertAttention
|
from _paritybench_helpers import _mock_config
import math
import torch
from torch import nn
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-05):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
'The hidden size (%d) is not a multiple of the number of attention heads (%d)'
% (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1,
-2))
attention_scores = attention_scores / math.sqrt(self.
attention_head_size)
attention_scores = attention_scores + attention_mask
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.
all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, num_attention_heads=
4, attention_probs_dropout_prob=0.5, hidden_dropout_prob=0.5)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = triton_helpers.maximum(tmp2, tmp5)
tmp9 = tmp7 + tmp8
tmp10 = triton_helpers.maximum(tmp6, tmp9)
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp10, tmp13)
tmp15 = tmp2 - tmp14
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp5 - tmp14
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp16 + tmp18
tmp20 = tmp9 - tmp14
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp23 = tmp13 - tmp14
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = float('-inf')
tmp27 = tmp2 == tmp26
tmp28 = tmp27 == 0
tmp29 = tmp28.to(tl.int64)
tmp30 = tmp29 != 0
tmp31 = tmp5 == tmp26
tmp32 = tmp31 == 0
tmp33 = tmp32.to(tl.int64)
tmp34 = tmp33 != 0
tmp35 = tmp30 | tmp34
tmp36 = tmp9 == tmp26
tmp37 = tmp36 == 0
tmp38 = tmp37.to(tl.int64)
tmp39 = tmp38 != 0
tmp40 = tmp35 | tmp39
tmp41 = tmp13 == tmp26
tmp42 = tmp41 == 0
tmp43 = tmp42.to(tl.int64)
tmp44 = tmp43 != 0
tmp45 = tmp40 | tmp44
tl.store(out_ptr0 + x2, tmp14, xmask)
tl.store(out_ptr1 + x2, tmp25, xmask)
tl.store(out_ptr2 + x2, tmp45, xmask)
@triton.jit
def triton_poi_fused_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 4
x4 = xindex
x5 = xindex % 64
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp2 = tl.load(in_out_ptr0 + x4, xmask)
tmp3 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + x3, xmask, eviction_policy='evict_last')
tmp1 = tmp0 == 0
tmp4 = tmp2 + tmp3
tmp6 = tmp4 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp7 / tmp8
tmp10 = 0.0
tmp11 = tl.where(tmp1, tmp10, tmp9)
tl.store(in_out_ptr0 + x4, tmp11, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_mean_pow_sub_5(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_sqrt_sub_6(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x2, xmask)
tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 - tmp4
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tmp10 = tmp5 / tmp9
tmp11 = tmp0 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_2
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf0
triton_poi_fused_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf1
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.bool)
triton_poi_fused_1[grid(64)](buf5, primals_8, buf6, buf7, buf8, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused_2[grid(256)](buf9, buf8, primals_8, buf6, buf7,
256, XBLOCK=256, num_warps=4, num_stages=1)
del buf8
del primals_8
buf10 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf7
triton_poi_fused_3[grid(16, 4)](buf2, primals_7, buf10, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
del primals_7
buf11 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11)
buf12 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf6
triton_poi_fused_clone_4[grid(16, 4)](buf11, buf12, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0)
del buf11
extern_kernels.addmm(primals_10, reinterpret_tensor(buf12, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf13)
del primals_10
buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf15 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_mean_pow_sub_5[grid(16)](buf13, primals_3,
buf14, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_sqrt_sub_6[grid(64)](primals_11,
buf13, primals_3, buf14, buf15, primals_12, buf16, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del buf14
del buf15
del primals_12
return buf16, primals_3, primals_11, buf9, reinterpret_tensor(buf10, (
16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4,
1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0
), reinterpret_tensor(buf12, (16, 4), (4, 1), 0), buf13, primals_9
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-05):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
'The hidden size (%d) is not a multiple of the number of attention heads (%d)'
% (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1,
-2))
attention_scores = attention_scores / math.sqrt(self.
attention_head_size)
attention_scores = attention_scores + attention_mask
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.
all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttentionNew(nn.Module):
def __init__(self, config):
super(BertAttentionNew, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_0, input_1):
primals_1 = self.self.query.weight
primals_2 = self.self.query.bias
primals_4 = self.self.key.weight
primals_5 = self.self.key.bias
primals_6 = self.self.value.weight
primals_7 = self.self.value.bias
primals_9 = self.output.dense.weight
primals_10 = self.output.dense.bias
primals_11 = self.output.LayerNorm.weight
primals_12 = self.output.LayerNorm.bias
primals_3 = input_0
primals_8 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
|
caldoe/BERT-NL2SPARQL
|
BertAttention
| false
| 6,386
|
[
"MIT"
] | 1
|
2e09c1aeffc855bc7f1dc8c182e21153b2bc73a8
|
https://github.com/caldoe/BERT-NL2SPARQL/tree/2e09c1aeffc855bc7f1dc8c182e21153b2bc73a8
|
CTLoss
|
import torch
import torch.nn as nn
import torch.onnx
def _neg_loss(preds, gt):
pos_inds = gt.eq(1)
neg_inds = gt.lt(1)
neg_weights = torch.pow(1 - gt[neg_inds], 4)
loss = 0
for pred in preds:
pos_pred = pred[pos_inds]
neg_pred = pred[neg_inds]
pos_loss = torch.log(pos_pred) * torch.pow(1 - pos_pred, 2)
neg_loss = torch.log(1 - neg_pred) * torch.pow(neg_pred, 2
) * neg_weights
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if pos_pred.nelement() == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
def _regr_loss(regr, gt_regr, mask):
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr)
regr = regr[mask]
gt_regr = gt_regr[mask]
regr_loss = nn.functional.smooth_l1_loss(regr, gt_regr, size_average=False)
regr_loss = regr_loss / (num + 0.0001)
return regr_loss
def _sigmoid(x):
x = torch.clamp(x.sigmoid_(), min=0.0001, max=1 - 0.0001)
return x
class CTLoss(nn.Module):
def __init__(self, regr_weight=1, focal_loss=_neg_loss):
super(CTLoss, self).__init__()
self.regr_weight = regr_weight
self.focal_loss = focal_loss
self.regr_loss = _regr_loss
def forward(self, outs, targets):
stride = 9
t_heats = outs[0::stride]
l_heats = outs[1::stride]
b_heats = outs[2::stride]
r_heats = outs[3::stride]
ct_heats = outs[4::stride]
t_regrs = outs[5::stride]
l_regrs = outs[6::stride]
b_regrs = outs[7::stride]
r_regrs = outs[8::stride]
gt_t_heat = targets[0]
gt_l_heat = targets[1]
gt_b_heat = targets[2]
gt_r_heat = targets[3]
gt_ct_heat = targets[4]
gt_mask = targets[5]
gt_t_regr = targets[6]
gt_l_regr = targets[7]
gt_b_regr = targets[8]
gt_r_regr = targets[9]
focal_loss = 0
t_heats = [_sigmoid(t) for t in t_heats]
l_heats = [_sigmoid(l) for l in l_heats]
b_heats = [_sigmoid(b) for b in b_heats]
r_heats = [_sigmoid(r) for r in r_heats]
ct_heats = [_sigmoid(ct) for ct in ct_heats]
focal_loss += self.focal_loss(t_heats, gt_t_heat)
focal_loss += self.focal_loss(l_heats, gt_l_heat)
focal_loss += self.focal_loss(b_heats, gt_b_heat)
focal_loss += self.focal_loss(r_heats, gt_r_heat)
focal_loss += self.focal_loss(ct_heats, gt_ct_heat)
regr_loss = 0
for t_regr, l_regr, b_regr, r_regr in zip(t_regrs, l_regrs, b_regrs,
r_regrs):
regr_loss += self.regr_loss(t_regr, gt_t_regr, gt_mask)
regr_loss += self.regr_loss(l_regr, gt_l_regr, gt_mask)
regr_loss += self.regr_loss(b_regr, gt_b_regr, gt_mask)
regr_loss += self.regr_loss(r_regr, gt_r_regr, gt_mask)
regr_loss = self.regr_weight * regr_loss
loss = (focal_loss + regr_loss) / len(t_heats)
return loss.unsqueeze(0)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([10, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clamp_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp9 = tl.load(in_ptr0 + x0, xmask)
tmp0 = tl.full([1], 0, tl.int64)
tmp1 = tmp0 == tmp0
tmp2 = tl.full([1], 0, tl.int32)
tmp3 = tmp2 == tmp2
tmp4 = tl.load(in_ptr0 + x0, tmp1 & xmask, other=0.0)
tmp5 = tl.sigmoid(tmp4)
tmp6 = tl.where(tmp3, tmp5, tmp4)
tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype)
tmp8 = tl.where(tmp1, tmp6, tmp7)
tmp10 = tl.where(tmp1, tmp8, tmp9)
tmp11 = 0.0001
tmp12 = triton_helpers.maximum(tmp10, tmp11)
tmp13 = 0.9999
tmp14 = triton_helpers.minimum(tmp12, tmp13)
tl.store(out_ptr0 + x0, tmp14, xmask)
@triton.jit
def triton_poi_fused_sigmoid_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 64
x0 = xindex % 64
x2 = xindex
tmp35 = tl.load(in_ptr0 + x2, xmask)
tmp0 = x1
tmp1 = tl.full([1], 1, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = (-1 + x1) % 9
tmp4 = tl.full([1], 0, tl.int64)
tmp5 = tmp3 == tmp4
tmp6 = tmp2 & tmp5
tmp7 = triton_helpers.div_floor_integer(-1 + x1, 9)
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = tmp7 == tmp8
tmp10 = tmp1 == tmp4
tmp11 = tmp10 & tmp6
tmp12 = tmp8 == tmp8
tmp13 = tl.load(in_ptr0 + x0, tmp11 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp14 = tl.sigmoid(tmp13)
tmp15 = tl.where(tmp12, tmp14, tmp13)
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp11, tmp15, tmp16)
tmp18 = tl.load(in_ptr0 + (64 + x0), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp19 = tl.where(tmp10, tmp17, tmp18)
tmp20 = tl.sigmoid(tmp19)
tmp21 = tl.where(tmp9, tmp14, tmp13)
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp11, tmp21, tmp22)
tmp24 = tl.load(in_ptr0 + (64 + x0 + 576 * triton_helpers.
div_floor_integer(-1 + x1, 9)), tmp6 & xmask, other=0.0)
tmp25 = tl.where(tmp10, tmp23, tmp24)
tmp26 = tl.where(tmp9, tmp20, tmp25)
tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype)
tmp28 = tl.where(tmp6, tmp26, tmp27)
tmp29 = tmp0 == tmp4
tmp30 = tl.load(in_ptr0 + x0, tmp29 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp31 = tl.sigmoid(tmp30)
tmp32 = tl.where(tmp12, tmp31, tmp30)
tmp33 = tl.full(tmp32.shape, 0.0, tmp32.dtype)
tmp34 = tl.where(tmp29, tmp32, tmp33)
tmp36 = tl.where(tmp29, tmp34, tmp35)
tmp37 = tl.where(tmp6, tmp28, tmp36)
tl.store(out_ptr0 + x2, tmp37, xmask)
@triton.jit
def triton_poi_fused_clamp_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp1 = 0.0001
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 0.9999
tmp4 = triton_helpers.minimum(tmp2, tmp3)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_clamp_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp12 = tl.load(in_ptr0 + (128 + x0), xmask)
tmp0 = tl.full([1], 2, tl.int64)
tmp1 = tmp0 >= tmp0
tmp2 = tl.full([1], 0, tl.int64)
tmp3 = tmp2 == tmp2
tmp4 = tmp1 & tmp3
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = tmp5 == tmp5
tmp7 = tl.load(in_ptr0 + (128 + x0), tmp4 & xmask, other=0.0)
tmp8 = tl.sigmoid(tmp7)
tmp9 = tl.where(tmp6, tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = 0.0001
tmp15 = triton_helpers.maximum(tmp13, tmp14)
tmp16 = 0.9999
tmp17 = triton_helpers.minimum(tmp15, tmp16)
tl.store(out_ptr0 + x0, tmp17, xmask)
@triton.jit
def triton_poi_fused_sigmoid_4(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 64
x0 = xindex % 64
x2 = xindex
tmp50 = tl.load(in_ptr0 + x2, xmask)
tmp0 = x1
tmp1 = tl.full([1], 3, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = (-3 + x1) % 9
tmp4 = tl.full([1], 0, tl.int64)
tmp5 = tmp3 == tmp4
tmp6 = tmp2 & tmp5
tmp7 = triton_helpers.div_floor_integer(-3 + x1, 9)
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = tmp7 == tmp8
tmp10 = tl.full([1], 2, tl.int64)
tmp11 = tmp1 >= tmp10
tmp12 = tl.full([1], 1, tl.int64)
tmp13 = tmp12 == tmp4
tmp14 = tmp11 & tmp13
tmp15 = tmp14 & tmp6
tmp16 = tmp8 == tmp8
tmp17 = tl.load(in_ptr0 + (128 + x0), tmp15 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp18 = tl.sigmoid(tmp17)
tmp19 = tl.where(tmp16, tmp18, tmp17)
tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype)
tmp21 = tl.where(tmp15, tmp19, tmp20)
tmp22 = tl.load(in_ptr0 + (192 + x0), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp23 = tl.where(tmp14, tmp21, tmp22)
tmp24 = tl.sigmoid(tmp23)
tmp25 = 3 + 9 * triton_helpers.div_floor_integer(-3 + x1, 9)
tmp26 = tmp25 >= tmp10
tmp27 = tmp26 & tmp13
tmp28 = tmp27 & tmp6
tmp29 = tl.load(in_ptr0 + (128 + x0), tmp28 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp30 = tl.sigmoid(tmp29)
tmp31 = tl.where(tmp9, tmp30, tmp29)
tmp32 = tl.full(tmp31.shape, 0.0, tmp31.dtype)
tmp33 = tl.where(tmp28, tmp31, tmp32)
tmp34 = tl.load(in_ptr0 + (192 + x0 + 576 * triton_helpers.
div_floor_integer(-3 + x1, 9)), tmp6 & xmask, other=0.0)
tmp35 = tl.where(tmp27, tmp33, tmp34)
tmp36 = tl.where(tmp9, tmp24, tmp35)
tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype)
tmp38 = tl.where(tmp6, tmp36, tmp37)
tmp39 = tmp0 >= tmp10
tmp40 = (-2 + x1) % 9
tmp41 = tmp40 == tmp4
tmp42 = tmp39 & tmp41
tmp43 = triton_helpers.div_floor_integer(-2 + x1, 9)
tmp44 = tmp43 == tmp8
tmp45 = tl.load(in_ptr0 + (128 + x0), tmp42 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp46 = tl.sigmoid(tmp45)
tmp47 = tl.where(tmp44, tmp46, tmp45)
tmp48 = tl.full(tmp47.shape, 0.0, tmp47.dtype)
tmp49 = tl.where(tmp42, tmp47, tmp48)
tmp51 = tl.where(tmp42, tmp49, tmp50)
tmp52 = tl.where(tmp6, tmp38, tmp51)
tl.store(out_ptr0 + x2, tmp52, xmask)
tl.store(out_ptr1 + x2, tmp52, xmask)
@triton.jit
def triton_poi_fused_clamp_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (192 + x0), xmask)
tmp1 = 0.0001
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 0.9999
tmp4 = triton_helpers.minimum(tmp2, tmp3)
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (10, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_sigmoid_1[grid(256)](arg0_1, buf1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clamp_2[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clamp_3[grid(64)](buf1, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_sigmoid_4[grid(256)](buf1, buf4, arg0_1, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del buf1
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clamp_5[grid(64)](buf4, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
return buf0, reinterpret_tensor(arg1_1, (4, 4, 4), (16, 4, 1), 0
), buf2, buf3, buf5, reinterpret_tensor(buf4, (0, 4, 4, 4), (576,
16, 4, 1), 256), reinterpret_tensor(buf4, (0, 4, 4, 4), (576, 16, 4,
1), 256), reinterpret_tensor(buf4, (0, 4, 4, 4), (576, 16, 4, 1), 256
), reinterpret_tensor(buf4, (0, 4, 4, 4), (576, 16, 4, 1), 256
), reinterpret_tensor(arg1_1, (4, 4, 4), (16, 4, 1), 64
), reinterpret_tensor(arg1_1, (4, 4, 4), (16, 4, 1), 128
), reinterpret_tensor(arg1_1, (4, 4, 4), (16, 4, 1), 192
), reinterpret_tensor(arg1_1, (4, 4, 4), (16, 4, 1), 256
), reinterpret_tensor(arg1_1, (4, 4, 4), (16, 4, 1), 320
), reinterpret_tensor(arg1_1, (4, 4, 4), (16, 4, 1), 384
), reinterpret_tensor(arg1_1, (4, 4, 4), (16, 4, 1), 448
), reinterpret_tensor(arg1_1, (4, 4, 4), (16, 4, 1), 512
), reinterpret_tensor(arg1_1, (4, 4, 4), (16, 4, 1), 576)
def _neg_loss(preds, gt):
pos_inds = gt.eq(1)
neg_inds = gt.lt(1)
neg_weights = torch.pow(1 - gt[neg_inds], 4)
loss = 0
for pred in preds:
pos_pred = pred[pos_inds]
neg_pred = pred[neg_inds]
pos_loss = torch.log(pos_pred) * torch.pow(1 - pos_pred, 2)
neg_loss = torch.log(1 - neg_pred) * torch.pow(neg_pred, 2
) * neg_weights
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if pos_pred.nelement() == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
def _regr_loss(regr, gt_regr, mask):
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr)
regr = regr[mask]
gt_regr = gt_regr[mask]
regr_loss = nn.functional.smooth_l1_loss(regr, gt_regr, size_average=False)
regr_loss = regr_loss / (num + 0.0001)
return regr_loss
def _sigmoid(x):
x = torch.clamp(x.sigmoid_(), min=0.0001, max=1 - 0.0001)
return x
class CTLossNew(nn.Module):
def __init__(self, regr_weight=1, focal_loss=_neg_loss):
super(CTLossNew, self).__init__()
self.regr_weight = regr_weight
self.focal_loss = focal_loss
self.regr_loss = _regr_loss
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
c464851257/extremenet-lite
|
CTLoss
| false
| 6,387
|
[
"BSD-3-Clause"
] | 1
|
331446f2c5d9524d46d2b33823eff02416f43052
|
https://github.com/c464851257/extremenet-lite/tree/331446f2c5d9524d46d2b33823eff02416f43052
|
upsampleBlock
|
import torch
import torch.nn as nn
def swish(x):
return x * torch.sigmoid(x)
class upsampleBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(upsampleBlock, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, 3, stride=1, padding=1
)
self.shuffler = nn.PixelShuffle(2)
def forward(self, x):
return swish(self.shuffler(self.conv(x)))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8 % 8
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (4 * (x1 // 2) + 16 * (x0 % 2) + 32 * (x1 % 2) +
64 * x2 + x0 // 2), xmask, eviction_policy='evict_last')
tmp1 = tl.sigmoid(tmp0)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(256)](buf1, primals_2, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 1, 8, 8), (64, 64, 8, 1), torch.float32)
triton_poi_fused_mul_sigmoid_1[grid(256)](buf1, buf2, 256, XBLOCK=
256, num_warps=4, num_stages=1)
return buf2, primals_1, primals_3, buf1
def swish(x):
return x * torch.sigmoid(x)
class upsampleBlockNew(nn.Module):
def __init__(self, in_channels, out_channels):
super(upsampleBlockNew, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, 3, stride=1, padding=1
)
self.shuffler = nn.PixelShuffle(2)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
carl-zjr/super-resolution-reconstruction
|
upsampleBlock
| false
| 6,388
|
[
"Apache-2.0"
] | 1
|
37b5b42ea6e8864c12a93a7e90d3bf0920f502d4
|
https://github.com/carl-zjr/super-resolution-reconstruction/tree/37b5b42ea6e8864c12a93a7e90d3bf0920f502d4
|
SeparableConvBlock
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
class SeparableConvBlock(nn.Module):
def __init__(self, inplanes, planes):
super(SeparableConvBlock, self).__init__()
self.depthwise_conv = nn.Conv2d(inplanes, inplanes, kernel_size=3,
stride=1, padding=1, groups=inplanes, bias=False)
self.pointwise_conv = nn.Conv2d(inplanes, planes, kernel_size=1,
stride=1, padding=0, bias=True)
def forward(self, x):
x = self.depthwise_conv(x)
x = self.pointwise_conv(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'inplanes': 4, 'planes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.parallel
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(256)](buf2, primals_4, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_4
return buf2, primals_1, primals_2, primals_3, buf0
class SeparableConvBlockNew(nn.Module):
def __init__(self, inplanes, planes):
super(SeparableConvBlockNew, self).__init__()
self.depthwise_conv = nn.Conv2d(inplanes, inplanes, kernel_size=3,
stride=1, padding=1, groups=inplanes, bias=False)
self.pointwise_conv = nn.Conv2d(inplanes, planes, kernel_size=1,
stride=1, padding=0, bias=True)
def forward(self, input_0):
primals_1 = self.depthwise_conv.weight
primals_3 = self.pointwise_conv.weight
primals_4 = self.pointwise_conv.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
carol007/pytorch-ImageNet-CIFAR-COCO-VOC-training
|
SeparableConvBlock
| false
| 6,389
|
[
"MIT"
] | 1
|
e8b37046e6fbe914f6a68bbde1fe419c46373c1d
|
https://github.com/carol007/pytorch-ImageNet-CIFAR-COCO-VOC-training/tree/e8b37046e6fbe914f6a68bbde1fe419c46373c1d
|
GlobalChannelLayerNorm
|
import torch
import torch.nn as nn
class GlobalChannelLayerNorm(nn.Module):
"""
Global channel layer normalization
"""
def __init__(self, dim, eps=1e-05, elementwise_affine=True):
super(GlobalChannelLayerNorm, self).__init__()
self.eps = eps
self.normalized_dim = dim
self.elementwise_affine = elementwise_affine
if elementwise_affine:
self.beta = nn.Parameter(torch.zeros(dim, 1))
self.gamma = nn.Parameter(torch.ones(dim, 1))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
def forward(self, x):
"""
x: N x C x T
"""
if x.dim() != 3:
raise RuntimeError('{} accept 3D tensor as input'.format(self.
__name__))
mean = torch.mean(x, (1, 2), keepdim=True)
var = torch.mean((x - mean) ** 2, (1, 2), keepdim=True)
if self.elementwise_affine:
x = self.gamma * (x - mean) / torch.sqrt(var + self.eps
) + self.beta
else:
x = (x - mean) / torch.sqrt(var + self.eps)
return x
def extra_repr(self):
return (
'{normalized_dim}, eps={eps}, elementwise_affine={elementwise_affine}'
.format(**self.__dict__))
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_div_mean_mul_pow_sqrt_sub_0(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r3 = rindex // 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp17 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tmp7 = tmp0 - tmp6
tmp8 = tmp7 * tmp7
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.where(xmask, tmp9, 0)
tmp12 = tl.sum(tmp11, 1)[:, None]
tmp13 = tmp12 / tmp5
tmp14 = 1e-05
tmp15 = tmp13 + tmp14
tmp16 = libdevice.sqrt(tmp15)
tmp18 = tmp17 * tmp7
tmp19 = tmp18 / tmp16
tmp21 = tmp19 + tmp20
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x0, tmp16, xmask)
tl.store(out_ptr0 + (r1 + 16 * x0), tmp21, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 1), (1, 1))
assert_size_stride(primals_3, (4, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 1, 1), (1, 1, 1), 0)
del buf0
buf2 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf3 = reinterpret_tensor(buf2, (4, 1, 1), (1, 1, 1), 0)
del buf2
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_add_div_mean_mul_pow_sqrt_sub_0[grid(4)](buf1,
buf3, primals_1, primals_2, primals_3, buf4, 4, 16, XBLOCK=1,
num_warps=2, num_stages=1)
del primals_2
del primals_3
return buf4, primals_1, buf1, buf3
class GlobalChannelLayerNormNew(nn.Module):
"""
Global channel layer normalization
"""
def __init__(self, dim, eps=1e-05, elementwise_affine=True):
super(GlobalChannelLayerNormNew, self).__init__()
self.eps = eps
self.normalized_dim = dim
self.elementwise_affine = elementwise_affine
if elementwise_affine:
self.beta = nn.Parameter(torch.zeros(dim, 1))
self.gamma = nn.Parameter(torch.ones(dim, 1))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
def extra_repr(self):
return (
'{normalized_dim}, eps={eps}, elementwise_affine={elementwise_affine}'
.format(**self.__dict__))
def forward(self, input_0):
primals_2 = self.beta
primals_3 = self.gamma
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
c-ma13/sepTFNet
|
GlobalChannelLayerNorm
| false
| 6,390
|
[
"MIT"
] | 1
|
a06c89c080f9449ac2e5090f80d9645deea7f23a
|
https://github.com/c-ma13/sepTFNet/tree/a06c89c080f9449ac2e5090f80d9645deea7f23a
|
HighwayNetwork
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class HighwayNetwork(nn.Module):
def __init__(self, size):
super().__init__()
self.W1 = nn.Linear(size, size)
self.W2 = nn.Linear(size, size)
self.W1.bias.data.fill_(0.0)
def forward(self, x):
x1 = self.W1(x)
x2 = self.W2(x)
g = torch.sigmoid(x2)
y = g * F.relu(x1) + (1.0 - g) * x
return y
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_relu_rsub_sigmoid_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp8 = tl.load(in_ptr2 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = tmp1 * tmp4
tmp6 = 1.0
tmp7 = tmp6 - tmp1
tmp9 = tmp7 * tmp8
tmp10 = tmp5 + tmp9
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_relu_rsub_sigmoid_0[grid(256)](buf1, buf0,
primals_3, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
return buf2, primals_3, buf0, buf1
class HighwayNetworkNew(nn.Module):
def __init__(self, size):
super().__init__()
self.W1 = nn.Linear(size, size)
self.W2 = nn.Linear(size, size)
self.W1.bias.data.fill_(0.0)
def forward(self, input_0):
primals_1 = self.W1.weight
primals_2 = self.W1.bias
primals_4 = self.W2.weight
primals_5 = self.W2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
cassiavb/Tacotron
|
HighwayNetwork
| false
| 6,391
|
[
"MIT"
] | 1
|
946408f8cd7b5fe9c53931c631267ba2a723910d
|
https://github.com/cassiavb/Tacotron/tree/946408f8cd7b5fe9c53931c631267ba2a723910d
|
LevelVariabilityLoss
|
import torch
import torch.nn as nn
class LevelVariabilityLoss(nn.Module):
"""Computes the variability penalty for the level.
levels: levels obtained from exponential smoothing component of ESRNN.
tensor with shape (batch, n_time).
level_variability_penalty: float.
return: level_var_loss
"""
def __init__(self, level_variability_penalty):
super(LevelVariabilityLoss, self).__init__()
self.level_variability_penalty = level_variability_penalty
def forward(self, levels):
assert levels.shape[1] > 2
level_prev = torch.log(levels[:, :-1])
level_next = torch.log(levels[:, 1:])
log_diff_of_levels = torch.sub(level_prev, level_next)
log_diff_prev = log_diff_of_levels[:, :-1]
log_diff_next = log_diff_of_levels[:, 1:]
diff = torch.sub(log_diff_prev, log_diff_next)
level_var_loss = diff ** 2
level_var_loss = level_var_loss.mean() * self.level_variability_penalty
return level_var_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'level_variability_penalty': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mean_mul_pow_sub_0(in_out_ptr0, in_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 32
r1 = rindex // 32
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp5 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp1 = tl_math.log(tmp0)
tmp3 = tl_math.log(tmp2)
tmp4 = tmp1 - tmp3
tmp6 = tl_math.log(tmp5)
tmp7 = tmp3 - tmp6
tmp8 = tmp4 - tmp7
tmp9 = tmp8 * tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.sum(tmp10, 1)[:, None]
tmp13 = 128.0
tmp14 = tmp12 / tmp13
tmp15 = 4.0
tmp16 = tmp14 * tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp16, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_mul_pow_sub_0[grid(1)](buf1, arg0_1, 1, 128,
XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf1,
class LevelVariabilityLossNew(nn.Module):
"""Computes the variability penalty for the level.
levels: levels obtained from exponential smoothing component of ESRNN.
tensor with shape (batch, n_time).
level_variability_penalty: float.
return: level_var_loss
"""
def __init__(self, level_variability_penalty):
super(LevelVariabilityLossNew, self).__init__()
self.level_variability_penalty = level_variability_penalty
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
cchallu/esrnn
|
LevelVariabilityLoss
| false
| 6,392
|
[
"MIT"
] | 1
|
543ca365c70be2775a4b5863820b246071ccde3c
|
https://github.com/cchallu/esrnn/tree/543ca365c70be2775a4b5863820b246071ccde3c
|
MultiHeadedAttention
|
import math
import torch
import numpy as np
from typing import Optional
from torch import nn
class MultiHeadedAttention(nn.Module):
"""Multi-Head Attention layer
:param int n_head: the number of head s
:param int n_feat: the number of features
:param float dropout_rate: dropout rate
"""
def __init__(self, n_head: 'int', n_feat: 'int', dropout_rate: 'float'):
super(MultiHeadedAttention, self).__init__()
assert n_feat % n_head == 0
self.d_k = n_feat // n_head
self.h = n_head
self.linear_q = nn.Linear(n_feat, n_feat)
self.linear_k = nn.Linear(n_feat, n_feat)
self.linear_v = nn.Linear(n_feat, n_feat)
self.linear_out = nn.Linear(n_feat, n_feat)
self.dropout = nn.Dropout(p=dropout_rate)
def forward(self, query: 'torch.Tensor', key: 'torch.Tensor', value:
'torch.Tensor', mask: 'Optional[torch.Tensor]'=None) ->torch.Tensor:
"""Compute 'Scaled Dot Product Attention'
:param torch.Tensor query: (batch, time1, size)
:param torch.Tensor key: (batch, time2, size)
:param torch.Tensor value: (batch, time2, size)
:param torch.Tensor mask: (batch, time1, time2)
:param torch.nn.Dropout dropout:
:return torch.Tensor: attentined and transformed `value` (batch, time1, d_model)
weighted by the query dot key attention (batch, head, time1, time2)
"""
n_batch = query.size(0)
q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)
k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)
v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)
q = q.transpose(1, 2)
k = k.transpose(1, 2)
v = v.transpose(1, 2)
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)
if mask is not None:
mask = mask.unsqueeze(1).eq(0)
mask = mask
scores = scores.masked_fill_(mask, -np.inf)
attn = torch.softmax(scores, dim=-1).masked_fill(mask, 0.0)
else:
attn = torch.softmax(scores, dim=-1)
p_attn = self.dropout(attn)
x = torch.matmul(p_attn, v)
x = x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)
return self.linear_out(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_head': 4, 'n_feat': 4, 'dropout_rate': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + 16 * y3), tmp4, xmask & ymask)
@triton.jit
def triton_per_fused_1(in_ptr0, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr
):
xnumel = 256
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = float('-inf')
tmp12 = tmp0 == tmp11
tmp13 = tmp12 == 0
tmp14 = tmp13.to(tl.int64)
tmp15 = tmp14 != 0
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.where(xmask, tmp16, 0)
tmp19 = triton_helpers.any(tmp18, 1)[:, None]
tmp20 = tmp19 == 0
tmp21 = tmp6 / tmp10
tmp22 = 0.0
tmp23 = tl.where(tmp20, tmp22, tmp21)
tl.store(out_ptr3 + (r1 + 16 * x0), tmp23, xmask)
@triton.jit
def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_9, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 16, 1), (64, 16, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(16, 16)](buf0, primals_3, buf3, 16, 16,
XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 16), (64, 16, 16, 1), 0)
del buf0
triton_poi_fused_0[grid(16, 16)](buf1, primals_5, buf4, 16, 16,
XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 16, 16), (256, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 16, 1), (16, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 16), (16, 0, 1), 0), out=buf5)
buf9 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch
.float32)
triton_per_fused_1[grid(256)](buf5, buf9, 256, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del buf5
buf10 = reinterpret_tensor(buf1, (4, 4, 16, 1), (64, 16, 1, 1), 0)
del buf1
triton_poi_fused_2[grid(16, 16)](buf2, primals_8, buf10, 16, 16,
XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_8
buf11 = reinterpret_tensor(buf2, (16, 16, 1), (16, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf9, (16, 16, 16), (256, 16,
1), 0), reinterpret_tensor(buf10, (16, 16, 1), (16, 1, 0), 0),
out=buf11)
buf12 = empty_strided_cuda((4, 16, 4, 1), (64, 4, 1, 1), torch.float32)
triton_poi_fused_clone_3[grid(64, 4)](buf11, buf12, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
buf13 = reinterpret_tensor(buf11, (64, 4), (4, 1), 0)
del buf11
extern_kernels.addmm(primals_11, reinterpret_tensor(buf12, (64, 4),
(4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf13)
del primals_11
return reinterpret_tensor(buf13, (4, 16, 4), (64, 4, 1), 0
), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_9, (64, 4), (4, 1), 0
), buf9, reinterpret_tensor(buf10, (16, 1, 16), (16, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 16), (16, 1, 1), 0
), reinterpret_tensor(buf4, (16, 16, 1), (16, 1, 16), 0
), reinterpret_tensor(buf12, (64, 4), (4, 1), 0), primals_10
class MultiHeadedAttentionNew(nn.Module):
"""Multi-Head Attention layer
:param int n_head: the number of head s
:param int n_feat: the number of features
:param float dropout_rate: dropout rate
"""
def __init__(self, n_head: 'int', n_feat: 'int', dropout_rate: 'float'):
super(MultiHeadedAttentionNew, self).__init__()
assert n_feat % n_head == 0
self.d_k = n_feat // n_head
self.h = n_head
self.linear_q = nn.Linear(n_feat, n_feat)
self.linear_k = nn.Linear(n_feat, n_feat)
self.linear_v = nn.Linear(n_feat, n_feat)
self.linear_out = nn.Linear(n_feat, n_feat)
self.dropout = nn.Dropout(p=dropout_rate)
def forward(self, input_0, input_1, input_2):
primals_2 = self.linear_q.weight
primals_3 = self.linear_q.bias
primals_4 = self.linear_k.weight
primals_5 = self.linear_k.bias
primals_7 = self.linear_v.weight
primals_8 = self.linear_v.bias
primals_10 = self.linear_out.weight
primals_11 = self.linear_out.bias
primals_1 = input_0
primals_6 = input_1
primals_9 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
carankt/FastSpeech2-1
|
MultiHeadedAttention
| false
| 6,393
|
[
"Apache-2.0"
] | 1
|
42c06e4fbdf741a0719154d1cb4617b7d3f15a5c
|
https://github.com/carankt/FastSpeech2-1/tree/42c06e4fbdf741a0719154d1cb4617b7d3f15a5c
|
MaskedInstanceNorm1d
|
import torch
import torch.cuda
from torch import nn
import torch.utils.data
import torch.optim
class MaskedInstanceNorm1d(nn.Module):
"""Instance norm + masking."""
MAX_CNT = 100000.0
def __init__(self, d_channel: 'int', unbiased: 'bool'=True, affine:
'bool'=False):
super().__init__()
self.d_channel = d_channel
self.unbiased = unbiased
self.affine = affine
if self.affine:
gamma = torch.ones(d_channel, dtype=torch.float)
beta = torch.zeros_like(gamma)
self.register_parameter('gamma', nn.Parameter(gamma))
self.register_parameter('beta', nn.Parameter(beta))
def forward(self, x: 'torch.Tensor', x_mask: 'torch.Tensor'
) ->torch.Tensor:
"""`x`: [B,C,T], `x_mask`: [B,T] => [B,C,T]."""
x_mask = x_mask.unsqueeze(1).type_as(x)
cnt = x_mask.sum(dim=-1, keepdim=True)
cnt_for_mu = cnt.clamp(1.0, self.MAX_CNT)
mu = (x * x_mask).sum(dim=-1, keepdim=True) / cnt_for_mu
sigma = (x - mu) ** 2
cnt_fot_sigma = (cnt - int(self.unbiased)).clamp(1.0, self.MAX_CNT)
sigma = (sigma * x_mask).sum(dim=-1, keepdim=True) / cnt_fot_sigma
sigma = (sigma + 1e-08).sqrt()
y = (x - mu) / sigma
if self.affine:
gamma = self.gamma.unsqueeze(0).unsqueeze(-1)
beta = self.beta.unsqueeze(0).unsqueeze(-1)
y = y * gamma + beta
return y
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_channel': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.cuda
from torch import nn
import torch.utils.data
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_clamp_div_mul_pow_sqrt_sub_sum_0(in_out_ptr0,
in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 64
x0 = xindex % 16
x2 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x3, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4 * x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x3), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0 + 64 * x2), xmask, eviction_policy
='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x3), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0 + 64 * x2), xmask, eviction_policy
='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0 + 64 * x2), xmask,
eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp15 = tmp1 + tmp4
tmp16 = tmp15 + tmp8
tmp17 = tmp16 + tmp12
tmp18 = 1.0
tmp19 = triton_helpers.maximum(tmp17, tmp18)
tmp20 = 100000.0
tmp21 = triton_helpers.minimum(tmp19, tmp20)
tmp22 = tmp14 / tmp21
tmp23 = tmp0 - tmp22
tmp24 = tmp23 * tmp23
tmp25 = tmp24 * tmp1
tmp26 = tmp3 - tmp22
tmp27 = tmp26 * tmp26
tmp28 = tmp27 * tmp4
tmp29 = tmp25 + tmp28
tmp30 = tmp7 - tmp22
tmp31 = tmp30 * tmp30
tmp32 = tmp31 * tmp8
tmp33 = tmp29 + tmp32
tmp34 = tmp11 - tmp22
tmp35 = tmp34 * tmp34
tmp36 = tmp35 * tmp12
tmp37 = tmp33 + tmp36
tmp38 = tmp17 - tmp18
tmp39 = triton_helpers.maximum(tmp38, tmp18)
tmp40 = triton_helpers.minimum(tmp39, tmp20)
tmp41 = tmp37 / tmp40
tmp42 = 1e-08
tmp43 = tmp41 + tmp42
tmp44 = libdevice.sqrt(tmp43)
tl.store(out_ptr0 + x4, tmp22, xmask)
tl.store(in_out_ptr0 + x4, tmp44, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_sqrt_sub_sum_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 256
x4 = xindex // 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 / tmp3
tl.store(out_ptr0 + x5, tmp4, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 256),
torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 256),
torch.float32)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_add_clamp_div_mul_pow_sqrt_sub_sum_0[grid(256)](buf2,
arg1_1, arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
buf3 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_poi_fused_add_clamp_div_sqrt_sub_sum_1[grid(1024)](arg1_1,
buf0, buf2, buf3, 1024, XBLOCK=256, num_warps=4, num_stages=1)
del arg1_1
del buf0
del buf2
return buf3,
class MaskedInstanceNorm1dNew(nn.Module):
"""Instance norm + masking."""
MAX_CNT = 100000.0
def __init__(self, d_channel: 'int', unbiased: 'bool'=True, affine:
'bool'=False):
super().__init__()
self.d_channel = d_channel
self.unbiased = unbiased
self.affine = affine
if self.affine:
gamma = torch.ones(d_channel, dtype=torch.float)
beta = torch.zeros_like(gamma)
self.register_parameter('gamma', nn.Parameter(gamma))
self.register_parameter('beta', nn.Parameter(beta))
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
carolmanderson/NeMo
|
MaskedInstanceNorm1d
| false
| 6,394
|
[
"Apache-2.0"
] | 1
|
be7114e2d983af751e1af4119465c626682747b7
|
https://github.com/carolmanderson/NeMo/tree/be7114e2d983af751e1af4119465c626682747b7
|
MaxPool2d
|
import torch
from typing import *
from torch import nn
class MaxPool2d(nn.Module):
def __init__(self, kernel_size, **kwargs):
super().__init__()
stride = kwargs.setdefault('stride', kernel_size)
padding = kwargs.setdefault('padding', 0)
dilation = kwargs.setdefault('dilation', 1)
return_indices = kwargs.setdefault('return_indices', False)
ceil_mode = kwargs.setdefault('ceil_mode', False)
self.pool = nn.MaxPool2d(kernel_size, stride=stride, padding=
padding, dilation=dilation, return_indices=return_indices,
ceil_mode=ceil_mode)
def forward(self, x):
*batch, height, width, channels = x.shape
x = x.view(-1, height, width, channels)
x = torch.einsum('nhwc->nchw', [x])
x = self.pool(x)
x = torch.einsum('nchw->nhwc', [x])
_, new_height, new_width, _ = x.shape
x = x.contiguous()
x = x.view(*batch, new_height, new_width, channels)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'kernel_size': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from typing import *
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + 64 * x1), xmask)
tmp7 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp9 = tl.load(in_ptr0 + (20 + x0 + 64 * x1), xmask)
tmp11 = tl.load(in_ptr0 + (24 + x0 + 64 * x1), xmask)
tmp13 = tl.load(in_ptr0 + (28 + x0 + 64 * x1), xmask)
tmp15 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp17 = tl.load(in_ptr0 + (36 + x0 + 64 * x1), xmask)
tmp19 = tl.load(in_ptr0 + (40 + x0 + 64 * x1), xmask)
tmp21 = tl.load(in_ptr0 + (44 + x0 + 64 * x1), xmask)
tmp23 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp25 = tl.load(in_ptr0 + (52 + x0 + 64 * x1), xmask)
tmp27 = tl.load(in_ptr0 + (56 + x0 + 64 * x1), xmask)
tmp29 = tl.load(in_ptr0 + (60 + x0 + 64 * x1), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp20 = triton_helpers.maximum(tmp19, tmp18)
tmp22 = triton_helpers.maximum(tmp21, tmp20)
tmp24 = triton_helpers.maximum(tmp23, tmp22)
tmp26 = triton_helpers.maximum(tmp25, tmp24)
tmp28 = triton_helpers.maximum(tmp27, tmp26)
tmp30 = triton_helpers.maximum(tmp29, tmp28)
tl.store(out_ptr0 + x2, tmp30, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0[grid(16)](arg0_1, buf0,
16, XBLOCK=16, num_warps=1, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 1, 1, 4), (4, 4, 4, 1), 0),
class MaxPool2dNew(nn.Module):
def __init__(self, kernel_size, **kwargs):
super().__init__()
stride = kwargs.setdefault('stride', kernel_size)
padding = kwargs.setdefault('padding', 0)
dilation = kwargs.setdefault('dilation', 1)
return_indices = kwargs.setdefault('return_indices', False)
ceil_mode = kwargs.setdefault('ceil_mode', False)
self.pool = nn.MaxPool2d(kernel_size, stride=stride, padding=
padding, dilation=dilation, return_indices=return_indices,
ceil_mode=ceil_mode)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
cbarrick/csb
|
MaxPool2d
| false
| 6,395
|
[
"MIT"
] | 1
|
0368036ddb7594c0b6e7cdc704aeec918786e58a
|
https://github.com/cbarrick/csb/tree/0368036ddb7594c0b6e7cdc704aeec918786e58a
|
DeepNeuralNet
|
import torch
class DeepNeuralNet(torch.nn.Module):
"""
This is a six-layer neural network.
This is the default network for initializing sigma and center parameters
"""
def __init__(self, n_feature, n_hidden1, n_hidden2, n_hidden3,
n_hidden4, n_hidden5, n_hidden6, n_output):
"""
Initialization
:param n_feature: Feature number
:param n_hidden: the number of hidden layer neurons
:param n_output: output number
"""
super(DeepNeuralNet, self).__init__()
self.fc1 = torch.nn.Linear(n_feature, n_hidden1)
self.fc2 = torch.nn.Linear(n_hidden1, n_hidden2)
self.fc3 = torch.nn.Linear(n_hidden2, n_hidden3)
self.fc4 = torch.nn.Linear(n_hidden3, n_hidden4)
self.fc5 = torch.nn.Linear(n_hidden4, n_hidden5)
self.fc6 = torch.nn.Linear(n_hidden5, n_hidden6)
self.predict = torch.nn.Linear(n_hidden6, n_output)
def forward(self, x):
x = torch.relu(self.fc1(x))
x = torch.relu(self.fc2(x))
x = torch.relu(self.fc3(x))
x = torch.relu(self.fc4(x))
x = torch.relu(self.fc5(x))
x = torch.relu(self.fc6(x))
x = self.predict(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_feature': 4, 'n_hidden1': 4, 'n_hidden2': 4,
'n_hidden3': 4, 'n_hidden4': 4, 'n_hidden5': 4, 'n_hidden6': 4,
'n_output': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4, 4), (4, 1))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (4, 4), (4, 1))
assert_size_stride(primals_15, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf18 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf18, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3,
primals_5, buf17, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
buf16 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf5,
primals_7, buf16, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf5, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf6)
buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf6
buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf7,
primals_9, buf15, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_9
buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf7, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf8)
buf9 = reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf8
buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf9,
primals_11, buf14, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_11
buf10 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf9, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_12, (4, 4), (1, 4), 0), out=buf10)
buf11 = reinterpret_tensor(buf10, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf10
buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf11,
primals_13, buf13, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_13
buf12 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_15, reinterpret_tensor(buf11, (64, 4),
(4, 1), 0), reinterpret_tensor(primals_14, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf12)
del primals_15
return (reinterpret_tensor(buf12, (4, 4, 4, 4), (64, 16, 4, 1), 0),
reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(
buf3, (64, 4), (4, 1), 0), reinterpret_tensor(buf5, (64, 4), (4, 1),
0), reinterpret_tensor(buf7, (64, 4), (4, 1), 0),
reinterpret_tensor(buf9, (64, 4), (4, 1), 0), reinterpret_tensor(
buf11, (64, 4), (4, 1), 0), primals_14, buf13, primals_12, buf14,
primals_10, buf15, primals_8, buf16, primals_6, buf17, primals_4, buf18
)
class DeepNeuralNetNew(torch.nn.Module):
"""
This is a six-layer neural network.
This is the default network for initializing sigma and center parameters
"""
def __init__(self, n_feature, n_hidden1, n_hidden2, n_hidden3,
n_hidden4, n_hidden5, n_hidden6, n_output):
"""
Initialization
:param n_feature: Feature number
:param n_hidden: the number of hidden layer neurons
:param n_output: output number
"""
super(DeepNeuralNetNew, self).__init__()
self.fc1 = torch.nn.Linear(n_feature, n_hidden1)
self.fc2 = torch.nn.Linear(n_hidden1, n_hidden2)
self.fc3 = torch.nn.Linear(n_hidden2, n_hidden3)
self.fc4 = torch.nn.Linear(n_hidden3, n_hidden4)
self.fc5 = torch.nn.Linear(n_hidden4, n_hidden5)
self.fc6 = torch.nn.Linear(n_hidden5, n_hidden6)
self.predict = torch.nn.Linear(n_hidden6, n_output)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_8 = self.fc4.weight
primals_9 = self.fc4.bias
primals_10 = self.fc5.weight
primals_11 = self.fc5.bias
primals_12 = self.fc6.weight
primals_13 = self.fc6.bias
primals_14 = self.predict.weight
primals_15 = self.predict.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15])
return output[0]
|
cassberk/xps_peakfit
|
DeepNeuralNet
| false
| 6,396
|
[
"MIT"
] | 1
|
bbdd62dbfc4d64ec2af0c509361de81b0762bd41
|
https://github.com/cassberk/xps_peakfit/tree/bbdd62dbfc4d64ec2af0c509361de81b0762bd41
|
ConvReLUNorm
|
import torch
import torch.cuda
import torch.utils.data
import torch.optim
class ConvReLUNorm(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, dropout=0.0):
super(ConvReLUNorm, self).__init__()
self.conv = torch.nn.Conv1d(in_channels, out_channels, kernel_size=
kernel_size, padding=kernel_size // 2)
self.norm = torch.nn.LayerNorm(out_channels)
self.dropout = torch.nn.Dropout(dropout)
def forward(self, signal):
out = torch.nn.functional.relu(self.conv(signal))
out = self.norm(out.transpose(1, 2)).transpose(1, 2)
return self.dropout(out)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.cuda
import torch.utils.data
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp6 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp9 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp1, tmp3)
tmp5 = tmp2 + tmp4
tmp7 = triton_helpers.maximum(tmp1, tmp6)
tmp8 = tmp5 + tmp7
tmp10 = triton_helpers.maximum(tmp1, tmp9)
tmp11 = tmp8 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tmp4 - tmp13
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp7 - tmp13
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp10 - tmp13
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp24 / tmp12
tmp26 = 1e-05
tmp27 = tmp25 + tmp26
tmp28 = libdevice.rsqrt(tmp27)
tl.store(out_ptr0 + x2, tmp13, xmask)
tl.store(out_ptr1 + x2, tmp28, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.
constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + y3, ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + y3, ymask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last')
tmp1 = tl.full([1, 1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = tmp2 - tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 * tmp7
tmp10 = tmp8 + tmp9
tl.store(out_ptr0 + (x2 + 4 * y3), tmp10, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4), (16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(64)](buf1, primals_2, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(16)](buf1, buf2, buf3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_2[grid(16, 4)](buf1, buf2, buf3,
primals_4, primals_5, buf4, 16, 4, XBLOCK=4, YBLOCK=16,
num_warps=1, num_stages=1)
del buf2
del buf3
del primals_5
return reinterpret_tensor(buf4, (4, 4, 4), (16, 1, 4), 0
), primals_1, primals_3, primals_4, buf1
class ConvReLUNormNew(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, dropout=0.0):
super(ConvReLUNormNew, self).__init__()
self.conv = torch.nn.Conv1d(in_channels, out_channels, kernel_size=
kernel_size, padding=kernel_size // 2)
self.norm = torch.nn.LayerNorm(out_channels)
self.dropout = torch.nn.Dropout(dropout)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_4 = self.norm.weight
primals_5 = self.norm.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
carolmanderson/NeMo
|
ConvReLUNorm
| false
| 6,397
|
[
"Apache-2.0"
] | 1
|
be7114e2d983af751e1af4119465c626682747b7
|
https://github.com/carolmanderson/NeMo/tree/be7114e2d983af751e1af4119465c626682747b7
|
SineLayer
|
import torch
import numpy as np
import torch.nn as nn
class SineLayer(nn.Module):
def __init__(self, in_features, out_features, bias=True, is_first=False,
omega_0=30):
super().__init__()
self.omega_0 = omega_0
self.is_first = is_first
self.in_features = in_features
self.linear = nn.Linear(in_features, out_features, bias=bias)
self.init_weights()
def init_weights(self):
with torch.no_grad():
if self.is_first:
self.linear.weight.uniform_(-1 / self.in_features, 1 / self
.in_features)
else:
self.linear.weight.uniform_(-np.sqrt(6 / self.in_features) /
self.omega_0, np.sqrt(6 / self.in_features) / self.omega_0)
def forward(self, input):
return torch.sin(self.omega_0 * self.linear(input))
def forward_with_intermediate(self, input):
intermediate = self.omega_0 * self.linear(input)
return torch.sin(intermediate), intermediate
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_sin_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 30.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.sin(tmp2)
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sin_0[grid(256)](buf0, buf1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0
class SineLayerNew(nn.Module):
def __init__(self, in_features, out_features, bias=True, is_first=False,
omega_0=30):
super().__init__()
self.omega_0 = omega_0
self.is_first = is_first
self.in_features = in_features
self.linear = nn.Linear(in_features, out_features, bias=bias)
self.init_weights()
def init_weights(self):
with torch.no_grad():
if self.is_first:
self.linear.weight.uniform_(-1 / self.in_features, 1 / self
.in_features)
else:
self.linear.weight.uniform_(-np.sqrt(6 / self.in_features) /
self.omega_0, np.sqrt(6 / self.in_features) / self.omega_0)
def forward_with_intermediate(self, input):
intermediate = self.omega_0 * self.linear(input)
return torch.sin(intermediate), intermediate
def forward(self, input_0):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
ccxiaotoancai/Anim-NeRF
|
SineLayer
| false
| 6,398
|
[
"MIT"
] | 1
|
1342a9e2d02411a09acecac40ac325f38708b9c9
|
https://github.com/ccxiaotoancai/Anim-NeRF/tree/1342a9e2d02411a09acecac40ac325f38708b9c9
|
Generator
|
import torch
from torch import nn
import torch.nn.functional as F
class Generator(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super().__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.fc3 = nn.Linear(hidden_size, output_size)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.sigmoid(self.fc3(x))
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4, 'output_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3,
primals_5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
triton_poi_fused_sigmoid_1[grid(256)](buf5, primals_7, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_7
return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(
buf3, (64, 4), (4, 1), 0), buf5, primals_6, buf6, primals_4, buf7
class GeneratorNew(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super().__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.fc3 = nn.Linear(hidden_size, output_size)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
cclaypool/pytorch-dcgan
|
Generator
| false
| 6,399
|
[
"MIT"
] | 1
|
a2096daf7bb75bf95e189bb3d2f820c51147b61c
|
https://github.com/cclaypool/pytorch-dcgan/tree/a2096daf7bb75bf95e189bb3d2f820c51147b61c
|
Generator
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Generator(nn.Module):
def __init__(self, dim, hidden_dim, y_dim, sigma=0.02):
super(Generator, self).__init__()
input_dim = dim
hidden_size = hidden_dim
self.fc1 = nn.Linear(input_dim, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.fc3 = nn.Linear(hidden_size, y_dim)
nn.init.normal_(self.fc1.weight, std=sigma)
nn.init.constant_(self.fc1.bias, 0)
nn.init.normal_(self.fc2.weight, std=sigma)
nn.init.constant_(self.fc2.bias, 0)
nn.init.normal_(self.fc3.weight, std=sigma)
nn.init.constant_(self.fc3.bias, 0)
def forward(self, noise):
gen_input = noise
output = F.elu(self.fc1(gen_input))
output = F.elu(self.fc2(output))
output = self.fc3(output)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4, 'hidden_dim': 4, 'y_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64,
4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_elu_0[grid(256)](buf0, buf1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_elu_0[grid(256)](buf2, buf3, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_7
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0
), buf2, reinterpret_tensor(buf3, (64, 4), (4, 1), 0
), primals_6, primals_4
class GeneratorNew(nn.Module):
def __init__(self, dim, hidden_dim, y_dim, sigma=0.02):
super(GeneratorNew, self).__init__()
input_dim = dim
hidden_size = hidden_dim
self.fc1 = nn.Linear(input_dim, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.fc3 = nn.Linear(hidden_size, y_dim)
nn.init.normal_(self.fc1.weight, std=sigma)
nn.init.constant_(self.fc1.bias, 0)
nn.init.normal_(self.fc2.weight, std=sigma)
nn.init.constant_(self.fc2.bias, 0)
nn.init.normal_(self.fc3.weight, std=sigma)
nn.init.constant_(self.fc3.bias, 0)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
ccha23/miml
|
Generator
| false
| 6,400
|
[
"MIT"
] | 1
|
6a41de1c0bb41d38e3cdc6e9c27363215b7729b9
|
https://github.com/ccha23/miml/tree/6a41de1c0bb41d38e3cdc6e9c27363215b7729b9
|
StochasticPool2d
|
import torch
import torch.nn.functional as F
class StochasticPool2d(torch.nn.Module):
def __init__(self, kernel_size=2, stride=2, padding=0):
super(StochasticPool2d, self).__init__()
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.grid_size = kernel_size
self.padding = torch.nn.ConstantPad2d((0, 1, 0, 1), 0)
def forward(self, x, s3pool_flag=False):
if s3pool_flag or self.training:
h, w = x.shape[-2:]
n_h = h // self.grid_size
n_w = w // self.grid_size
n_h = int(n_h)
n_w = int(n_w)
x = self.padding(x)
x = F.max_pool2d(x, self.kernel_size, 1)
w_indices = []
h_indices = []
for i in range(n_w):
position_offset = self.grid_size * i
if i + 1 < n_w:
max_range = self.grid_size
else:
max_range = w - position_offset
if not self.training:
w_index = torch.LongTensor([0])
else:
w_index = torch.LongTensor(1).random_(0, max_range)
w_indices.append(torch.add(w_index, position_offset))
for j in range(n_h):
position_offset = self.grid_size * j
if j + 1 < n_h:
max_range = self.grid_size
else:
max_range = h - position_offset
if not self.training:
h_index = torch.LongTensor([0])
else:
h_index = torch.LongTensor(1).random_(0, max_range)
h_indices.append(torch.add(h_index, position_offset))
h_indices = torch.cat(h_indices, dim=0)
w_indices = torch.cat(w_indices, dim=0)
output = x[:, :, h_indices][:, :, :, w_indices]
None
else:
output = F.avg_pool2d(x, self.kernel_size, self.stride)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_avg_pool2d_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
class StochasticPool2dNew(torch.nn.Module):
def __init__(self, kernel_size=2, stride=2, padding=0):
super(StochasticPool2dNew, self).__init__()
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.grid_size = kernel_size
self.padding = torch.nn.ConstantPad2d((0, 1, 0, 1), 0)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
cclauss/DL4AGX
|
StochasticPool2d
| false
| 6,401
|
[
"Apache-2.0"
] | 1
|
b4d73f6c39b0428e32ce5656352800cc7e2cfb22
|
https://github.com/cclauss/DL4AGX/tree/b4d73f6c39b0428e32ce5656352800cc7e2cfb22
|
GKDLoss
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.nn.functional as F
class GKDLoss(nn.Module):
"""Knowledge Distillation Loss"""
def __init__(self, T):
super().__init__()
self.t = T
def forward(self, stu_pred, tea_pred, label):
stu_pred_log_softmax = F.log_softmax(stu_pred / self.t, dim=1)
tea_pred_softmax = F.softmax(tea_pred / self.t, dim=1)
tea_pred_argmax = torch.argmax(tea_pred_softmax, dim=1)
mask = torch.eq(label, tea_pred_argmax).float()
count = mask[mask == 1].size(0)
mask = mask.unsqueeze(-1)
only_correct_sample_stu_pred_log_softmax = stu_pred_log_softmax.mul(
mask)
only_correct_sample_tea_pred_softmax = tea_pred_softmax.mul(mask)
only_correct_sample_tea_pred_softmax[
only_correct_sample_tea_pred_softmax == 0.0] = 1.0
loss = F.kl_div(only_correct_sample_stu_pred_log_softmax,
only_correct_sample_tea_pred_softmax, reduction='sum'
) * self.t ** 2 / count
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'T': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.parallel
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x3, tmp17, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused_argmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp17 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp32 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 > tmp1
tmp3 = tmp0 == tmp1
tmp4 = tmp0 != tmp0
tmp5 = tmp1 != tmp1
tmp6 = tmp4 > tmp5
tmp7 = tmp2 | tmp6
tmp8 = tmp4 & tmp5
tmp9 = tmp3 | tmp8
tmp10 = tl.full([1], 0, tl.int64)
tmp11 = tl.full([1], 1, tl.int64)
tmp12 = tmp10 < tmp11
tmp13 = tmp9 & tmp12
tmp14 = tmp7 | tmp13
tmp15 = tl.where(tmp14, tmp0, tmp1)
tmp16 = tl.where(tmp14, tmp10, tmp11)
tmp18 = tmp15 > tmp17
tmp19 = tmp15 == tmp17
tmp20 = tmp15 != tmp15
tmp21 = tmp17 != tmp17
tmp22 = tmp20 > tmp21
tmp23 = tmp18 | tmp22
tmp24 = tmp20 & tmp21
tmp25 = tmp19 | tmp24
tmp26 = tl.full([1], 2, tl.int64)
tmp27 = tmp16 < tmp26
tmp28 = tmp25 & tmp27
tmp29 = tmp23 | tmp28
tmp30 = tl.where(tmp29, tmp15, tmp17)
tmp31 = tl.where(tmp29, tmp16, tmp26)
tmp33 = tmp30 > tmp32
tmp34 = tmp30 == tmp32
tmp35 = tmp30 != tmp30
tmp36 = tmp32 != tmp32
tmp37 = tmp35 > tmp36
tmp38 = tmp33 | tmp37
tmp39 = tmp35 & tmp36
tmp40 = tmp34 | tmp39
tmp41 = tl.full([1], 3, tl.int64)
tmp42 = tmp31 < tmp41
tmp43 = tmp40 & tmp42
tmp44 = tmp38 | tmp43
tl.where(tmp44, tmp30, tmp32)
tmp46 = tl.where(tmp44, tmp31, tmp41)
tl.store(out_ptr0 + x2, tmp46, xmask)
@triton.jit
def triton_poi_fused__to_copy_eq_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp1.to(tl.float32)
tmp3 = tmp0 == tmp2
tmp4 = tmp3.to(tl.float32)
tmp5 = 1.0
tmp6 = tmp4 == tmp5
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused__log_softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x3, tmp13, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf0, buf1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64)
triton_poi_fused_argmax_2[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = buf0
del buf0
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused__to_copy_eq_3[grid(256)](arg2_1, buf2, buf3, buf4,
256, XBLOCK=256, num_warps=4, num_stages=1)
del arg2_1
del buf2
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_4[grid(256)](arg0_1, buf5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__log_softmax_5[grid(256)](buf5, buf6, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del buf5
return buf3, buf4, buf6, buf1
class GKDLossNew(nn.Module):
"""Knowledge Distillation Loss"""
def __init__(self, T):
super().__init__()
self.t = T
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
carol007/pytorch-ImageNet-CIFAR-COCO-VOC-training
|
GKDLoss
| false
| 6,402
|
[
"MIT"
] | 1
|
e8b37046e6fbe914f6a68bbde1fe419c46373c1d
|
https://github.com/carol007/pytorch-ImageNet-CIFAR-COCO-VOC-training/tree/e8b37046e6fbe914f6a68bbde1fe419c46373c1d
|
makeStyle
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class makeStyle(nn.Module):
def __init__(self):
super().__init__()
self.flatten = nn.Flatten()
def forward(self, x0):
style = F.avg_pool2d(x0, kernel_size=(x0.shape[-2], x0.shape[-1]))
style = self.flatten(style)
style = style / torch.sum(style ** 2, axis=1, keepdim=True) ** 0.5
return style
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp18 = tmp17 + tmp16
tmp20 = tmp19 + tmp18
tmp22 = tmp21 + tmp20
tmp24 = tmp23 + tmp22
tmp26 = tmp25 + tmp24
tmp28 = tmp27 + tmp26
tmp30 = tmp29 + tmp28
tmp31 = 0.0625
tmp32 = tmp30 * tmp31
tl.store(out_ptr0 + x0, tmp32, xmask)
@triton.jit
def triton_poi_fused_div_pow_sum_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tmp0 / tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_avg_pool2d_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_div_pow_sum_1[grid(16)](buf0, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf0
return buf1,
class makeStyleNew(nn.Module):
def __init__(self):
super().__init__()
self.flatten = nn.Flatten()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
cellimnet/scellseg-publish
|
makeStyle
| false
| 6,403
|
[
"BSD-3-Clause"
] | 1
|
03bfbae11fedcf430c40419c9afadf55cbd3034d
|
https://github.com/cellimnet/scellseg-publish/tree/03bfbae11fedcf430c40419c9afadf55cbd3034d
|
LocalMLP
|
import torch
from torch import nn
import torch.nn.functional as F
class LocalMLP(nn.Module):
def __init__(self, dim_in: 'int', use_norm: 'bool'=True):
"""a Local 1 layer MLP
:param dim_in: feat in size
:type dim_in: int
:param use_norm: if to apply layer norm, defaults to True
:type use_norm: bool, optional
"""
super().__init__()
self.linear = nn.Linear(dim_in, dim_in, bias=not use_norm)
self.use_norm = use_norm
if use_norm:
self.norm = nn.LayerNorm(dim_in)
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
"""forward of the module
:param x: input tensor (..., dim_in)
:type x: torch.Tensor
:return: output tensor (..., dim_in)
:rtype: torch.Tensor
"""
x = self.linear(x)
if hasattr(self, 'norm'):
x = self.norm(x)
x = F.relu(x, inplace=True)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim_in': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_relu_threshold_backward_1(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp11 = 0.0
tmp12 = tmp10 <= tmp11
tl.store(out_ptr0 + x2, tmp10, xmask)
tl.store(out_ptr1 + x2, tmp12, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(64)](buf0, buf1, buf2, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_native_layer_norm_relu_threshold_backward_1[grid(256)
](buf0, buf1, buf2, primals_3, primals_4, buf3, buf4, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del buf1
del buf2
del primals_4
return buf3, primals_3, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0
), buf0, buf4
class LocalMLPNew(nn.Module):
def __init__(self, dim_in: 'int', use_norm: 'bool'=True):
"""a Local 1 layer MLP
:param dim_in: feat in size
:type dim_in: int
:param use_norm: if to apply layer norm, defaults to True
:type use_norm: bool, optional
"""
super().__init__()
self.linear = nn.Linear(dim_in, dim_in, bias=not use_norm)
self.use_norm = use_norm
if use_norm:
self.norm = nn.LayerNorm(dim_in)
def forward(self, input_0):
primals_1 = self.linear.weight
primals_3 = self.norm.weight
primals_4 = self.norm.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
cdicle-motional/l5kit
|
LocalMLP
| false
| 6,404
|
[
"Apache-2.0"
] | 1
|
4dc4ee5391479bb71f0b373f39c316f9eef5a961
|
https://github.com/cdicle-motional/l5kit/tree/4dc4ee5391479bb71f0b373f39c316f9eef5a961
|
MV_Softmax
|
from torch.nn import Module
import math
import torch
from torch.nn import functional as F
import torch._utils
from torch.nn import Parameter
from itertools import product as product
import torch.utils.data.distributed
class MV_Softmax(Module):
"""Implementation for "Mis-classified Vector Guided Softmax Loss for Face Recognition"
"""
def __init__(self, feat_dim, num_class, is_am, margin=0.35, mv_weight=
1.12, scale=32):
super(MV_Softmax, self).__init__()
self.weight = Parameter(torch.Tensor(feat_dim, num_class))
self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-05).mul_(100000.0)
self.margin = margin
self.mv_weight = mv_weight
self.scale = scale
self.is_am = is_am
self.cos_m = math.cos(margin)
self.sin_m = math.sin(margin)
self.threshold = math.cos(math.pi - margin)
self.mm = self.sin_m * margin
def forward(self, x, label):
kernel_norm = F.normalize(self.weight, dim=0)
cos_theta = torch.mm(x, kernel_norm)
batch_size = label.size(0)
gt = cos_theta[torch.arange(0, batch_size), label].view(-1, 1)
if self.is_am:
mask = cos_theta > gt - self.margin
final_gt = torch.where(gt > self.margin, gt - self.margin, gt)
else:
sin_theta = torch.sqrt(1.0 - torch.pow(gt, 2))
cos_theta_m = gt * self.cos_m - sin_theta * self.sin_m
mask = cos_theta > cos_theta_m
final_gt = torch.where(gt > 0.0, cos_theta_m, gt)
hard_example = cos_theta[mask]
cos_theta[mask] = self.mv_weight * hard_example + self.mv_weight - 1.0
cos_theta.scatter_(1, label.data.view(-1, 1), final_gt)
cos_theta *= self.scale
return cos_theta
def get_inputs():
return [torch.rand([4, 4]), torch.ones([4], dtype=torch.int64)]
def get_init_inputs():
return [[], {'feat_dim': 4, 'num_class': 4, 'is_am': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch.nn import Module
import math
import torch._utils
from torch.nn import Parameter
from itertools import product as product
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_arange_1(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_gt_sub_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.full([XBLOCK], 4, tl.int32)
tmp3 = tmp1 + tmp2
tmp4 = tmp1 < 0
tmp5 = tl.where(tmp4, tmp3, tmp1)
tl.device_assert((0 <= tmp5) & (tmp5 < 4) | ~xmask,
'index out of bounds: 0 <= tmp5 < 4')
tmp7 = tl.load(in_ptr0 + (tmp5 + 4 * x1), xmask, eviction_policy=
'evict_last')
tmp8 = 0.35
tmp9 = tmp7 - tmp8
tmp10 = tmp0 > tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_gt_sub_where_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 4) | ~xmask,
'index out of bounds: 0 <= tmp4 < 4')
tmp6 = tl.load(in_ptr1 + (tmp4 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp7 = 0.35
tmp8 = tmp6 > tmp7
tmp9 = tmp6 - tmp7
tmp10 = tl.where(tmp8, tmp9, tmp6)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(16)](primals_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_2, buf0, out=buf1)
del buf0
buf2 = empty_strided_cuda((4,), (1,), torch.int64)
triton_poi_fused_arange_1[grid(4)](buf2, 4, XBLOCK=4, num_warps=1,
num_stages=1)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_gt_sub_2[grid(16)](buf1, primals_3, buf3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 1), (1, 1), torch.bool)
buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
triton_poi_fused_gt_sub_where_3[grid(4)](primals_3, buf1, buf4,
buf5, 4, XBLOCK=4, num_warps=1, num_stages=1)
return (buf1, buf3, buf5, primals_1, primals_3, buf2, buf4,
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0))
class MV_SoftmaxNew(Module):
"""Implementation for "Mis-classified Vector Guided Softmax Loss for Face Recognition"
"""
def __init__(self, feat_dim, num_class, is_am, margin=0.35, mv_weight=
1.12, scale=32):
super(MV_SoftmaxNew, self).__init__()
self.weight = Parameter(torch.Tensor(feat_dim, num_class))
self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-05).mul_(100000.0)
self.margin = margin
self.mv_weight = mv_weight
self.scale = scale
self.is_am = is_am
self.cos_m = math.cos(margin)
self.sin_m = math.sin(margin)
self.threshold = math.cos(math.pi - margin)
self.mm = self.sin_m * margin
def forward(self, input_0, input_1):
primals_1 = self.weight
primals_2 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
|
cavalleria/FaceX-Zoo
|
MV_Softmax
| false
| 6,405
|
[
"Apache-2.0"
] | 1
|
c4bf8924f1858928f8cf83efabf8ad237c67f620
|
https://github.com/cavalleria/FaceX-Zoo/tree/c4bf8924f1858928f8cf83efabf8ad237c67f620
|
ShakeResNeXt
|
import math
import torch
from torch import nn
from numpy import int64 as int64
import torch.nn.functional as F
from torch.autograd import Variable
class ShakeShake(torch.autograd.Function):
@staticmethod
def forward(ctx, x1, x2, training=True):
if training:
alpha = torch.FloatTensor(x1.size(0)).uniform_()
alpha = alpha.view(alpha.size(0), 1, 1, 1).expand_as(x1)
else:
alpha = 0.5
return alpha * x1 + (1 - alpha) * x2
@staticmethod
def backward(ctx, grad_output):
beta = torch.FloatTensor(grad_output.size(0)).uniform_()
beta = beta.view(beta.size(0), 1, 1, 1).expand_as(grad_output)
beta = Variable(beta)
return beta * grad_output, (1 - beta) * grad_output, None
class Shortcut(nn.Module):
def __init__(self, in_ch, out_ch, stride):
super(Shortcut, self).__init__()
self.stride = stride
self.conv1 = nn.Conv2d(in_ch, out_ch // 2, 1, stride=1, padding=0,
bias=False)
self.conv2 = nn.Conv2d(in_ch, out_ch // 2, 1, stride=1, padding=0,
bias=False)
self.bn = nn.BatchNorm2d(out_ch)
def forward(self, x):
h = F.relu(x)
h1 = F.avg_pool2d(h, 1, self.stride)
h1 = self.conv1(h1)
h2 = F.avg_pool2d(F.pad(h, (-1, 1, -1, 1)), 1, self.stride)
h2 = self.conv2(h2)
h = torch.cat((h1, h2), 1)
return self.bn(h)
class ShakeBottleNeck(nn.Module):
def __init__(self, in_ch, mid_ch, out_ch, cardinary, stride=1):
super(ShakeBottleNeck, self).__init__()
self.equal_io = in_ch == out_ch
self.shortcut = None if self.equal_io else Shortcut(in_ch, out_ch,
stride=stride)
self.branch1 = self._make_branch(in_ch, mid_ch, out_ch, cardinary,
stride)
self.branch2 = self._make_branch(in_ch, mid_ch, out_ch, cardinary,
stride)
def forward(self, x):
h1 = self.branch1(x)
h2 = self.branch2(x)
h = ShakeShake.apply(h1, h2, self.training)
h0 = x if self.equal_io else self.shortcut(x)
return h + h0
def _make_branch(self, in_ch, mid_ch, out_ch, cardinary, stride=1):
return nn.Sequential(nn.Conv2d(in_ch, mid_ch, 1, padding=0, bias=
False), nn.BatchNorm2d(mid_ch), nn.ReLU(inplace=False), nn.
Conv2d(mid_ch, mid_ch, 3, padding=1, stride=stride, groups=
cardinary, bias=False), nn.BatchNorm2d(mid_ch), nn.ReLU(inplace
=False), nn.Conv2d(mid_ch, out_ch, 1, padding=0, bias=False),
nn.BatchNorm2d(out_ch))
class ShakeResNeXt(nn.Module):
def __init__(self, depth, w_base, cardinary, label):
super(ShakeResNeXt, self).__init__()
n_units = (depth - 2) // 9
n_chs = [64, 128, 256, 1024]
self.n_chs = n_chs
self.in_ch = n_chs[0]
self.c_in = nn.Conv2d(3, n_chs[0], 3, padding=1)
self.layer1 = self._make_layer(n_units, n_chs[0], w_base, cardinary)
self.layer2 = self._make_layer(n_units, n_chs[1], w_base, cardinary, 2)
self.layer3 = self._make_layer(n_units, n_chs[2], w_base, cardinary, 2)
self.fc_out = nn.Linear(n_chs[3], label)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
h = self.c_in(x)
h = self.layer1(h)
h = self.layer2(h)
h = self.layer3(h)
h = F.relu(h)
h = F.avg_pool2d(h, 8)
h = h.view(-1, self.n_chs[3])
h = self.fc_out(h)
return h
def _make_layer(self, n_units, n_ch, w_base, cardinary, stride=1):
layers = []
mid_ch, out_ch = n_ch * (w_base // 64) * cardinary, n_ch * 4
for i in range(n_units):
layers.append(ShakeBottleNeck(self.in_ch, mid_ch, out_ch,
cardinary, stride=stride))
self.in_ch, stride = out_ch, 1
return nn.Sequential(*layers)
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {'depth': 1, 'w_base': 4, 'cardinary': 4, 'label': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import math
from torch import nn
from numpy import int64 as int64
import torch.nn.functional as F
from torch.autograd import Variable
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (4, 1024), (1024, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(1048576)](buf1, primals_2,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = torch.ops.aten.avg_pool2d.default(buf1, [8, 8], [8, 8], [0,
0], False, True, None)
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf3, (16, 1024),
(1024, 1), 0), reinterpret_tensor(primals_4, (1024, 4), (1,
1024), 0), alpha=1, beta=1, out=buf4)
del primals_5
return buf4, primals_1, primals_3, buf1, reinterpret_tensor(buf3, (16,
1024), (1024, 1), 0), primals_4
class ShakeShake(torch.autograd.Function):
@staticmethod
def forward(ctx, x1, x2, training=True):
if training:
alpha = torch.FloatTensor(x1.size(0)).uniform_()
alpha = alpha.view(alpha.size(0), 1, 1, 1).expand_as(x1)
else:
alpha = 0.5
return alpha * x1 + (1 - alpha) * x2
@staticmethod
def backward(ctx, grad_output):
beta = torch.FloatTensor(grad_output.size(0)).uniform_()
beta = beta.view(beta.size(0), 1, 1, 1).expand_as(grad_output)
beta = Variable(beta)
return beta * grad_output, (1 - beta) * grad_output, None
class Shortcut(nn.Module):
def __init__(self, in_ch, out_ch, stride):
super(Shortcut, self).__init__()
self.stride = stride
self.conv1 = nn.Conv2d(in_ch, out_ch // 2, 1, stride=1, padding=0,
bias=False)
self.conv2 = nn.Conv2d(in_ch, out_ch // 2, 1, stride=1, padding=0,
bias=False)
self.bn = nn.BatchNorm2d(out_ch)
def forward(self, x):
h = F.relu(x)
h1 = F.avg_pool2d(h, 1, self.stride)
h1 = self.conv1(h1)
h2 = F.avg_pool2d(F.pad(h, (-1, 1, -1, 1)), 1, self.stride)
h2 = self.conv2(h2)
h = torch.cat((h1, h2), 1)
return self.bn(h)
class ShakeBottleNeck(nn.Module):
def __init__(self, in_ch, mid_ch, out_ch, cardinary, stride=1):
super(ShakeBottleNeck, self).__init__()
self.equal_io = in_ch == out_ch
self.shortcut = None if self.equal_io else Shortcut(in_ch, out_ch,
stride=stride)
self.branch1 = self._make_branch(in_ch, mid_ch, out_ch, cardinary,
stride)
self.branch2 = self._make_branch(in_ch, mid_ch, out_ch, cardinary,
stride)
def forward(self, x):
h1 = self.branch1(x)
h2 = self.branch2(x)
h = ShakeShake.apply(h1, h2, self.training)
h0 = x if self.equal_io else self.shortcut(x)
return h + h0
def _make_branch(self, in_ch, mid_ch, out_ch, cardinary, stride=1):
return nn.Sequential(nn.Conv2d(in_ch, mid_ch, 1, padding=0, bias=
False), nn.BatchNorm2d(mid_ch), nn.ReLU(inplace=False), nn.
Conv2d(mid_ch, mid_ch, 3, padding=1, stride=stride, groups=
cardinary, bias=False), nn.BatchNorm2d(mid_ch), nn.ReLU(inplace
=False), nn.Conv2d(mid_ch, out_ch, 1, padding=0, bias=False),
nn.BatchNorm2d(out_ch))
class ShakeResNeXtNew(nn.Module):
def __init__(self, depth, w_base, cardinary, label):
super(ShakeResNeXtNew, self).__init__()
n_units = (depth - 2) // 9
n_chs = [64, 128, 256, 1024]
self.n_chs = n_chs
self.in_ch = n_chs[0]
self.c_in = nn.Conv2d(3, n_chs[0], 3, padding=1)
self.layer1 = self._make_layer(n_units, n_chs[0], w_base, cardinary)
self.layer2 = self._make_layer(n_units, n_chs[1], w_base, cardinary, 2)
self.layer3 = self._make_layer(n_units, n_chs[2], w_base, cardinary, 2)
self.fc_out = nn.Linear(n_chs[3], label)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def _make_layer(self, n_units, n_ch, w_base, cardinary, stride=1):
layers = []
mid_ch, out_ch = n_ch * (w_base // 64) * cardinary, n_ch * 4
for i in range(n_units):
layers.append(ShakeBottleNeck(self.in_ch, mid_ch, out_ch,
cardinary, stride=stride))
self.in_ch, stride = out_ch, 1
return nn.Sequential(*layers)
def forward(self, input_0):
primals_1 = self.c_in.weight
primals_2 = self.c_in.bias
primals_4 = self.fc_out.weight
primals_5 = self.fc_out.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
cdtalley/AutoML
|
ShakeResNeXt
| false
| 6,406
|
[
"MIT"
] | 1
|
918cda6bb1bd55b4ca974bdcdd59e32b2e28399d
|
https://github.com/cdtalley/AutoML/tree/918cda6bb1bd55b4ca974bdcdd59e32b2e28399d
|
p_model
|
import torch
from torch import nn
import torch.nn.functional as F
class p_model(nn.Module):
"""
input: N * C * W * H
output: N * 1 * W * H
"""
def __init__(self):
super(p_model, self).__init__()
def forward(self, x):
n, c, w, h = x.size()
x = x.view(n, c, w * h).permute(0, 2, 1)
pooled = F.avg_pool1d(x, c)
return pooled.view(n, 1, w, h)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 64, 64), torch.float32
)
get_raw_stream(0)
triton_poi_fused_avg_pool2d_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 1, 4, 4), (16, 16, 4, 1), 0),
class p_modelNew(nn.Module):
"""
input: N * C * W * H
output: N * 1 * W * H
"""
def __init__(self):
super(p_modelNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
cenkcorapci/visual-fashion-item-search
|
p_model
| false
| 6,407
|
[
"MIT"
] | 1
|
47b93f97383c1b7f9ec23bb4ff66f90504db3da8
|
https://github.com/cenkcorapci/visual-fashion-item-search/tree/47b93f97383c1b7f9ec23bb4ff66f90504db3da8
|
ShakeResNet
|
import math
import torch
from torch import nn
from numpy import int64 as int64
import torch.nn.functional as F
from torch.autograd import Variable
class ShakeShake(torch.autograd.Function):
@staticmethod
def forward(ctx, x1, x2, training=True):
if training:
alpha = torch.FloatTensor(x1.size(0)).uniform_()
alpha = alpha.view(alpha.size(0), 1, 1, 1).expand_as(x1)
else:
alpha = 0.5
return alpha * x1 + (1 - alpha) * x2
@staticmethod
def backward(ctx, grad_output):
beta = torch.FloatTensor(grad_output.size(0)).uniform_()
beta = beta.view(beta.size(0), 1, 1, 1).expand_as(grad_output)
beta = Variable(beta)
return beta * grad_output, (1 - beta) * grad_output, None
class Shortcut(nn.Module):
def __init__(self, in_ch, out_ch, stride):
super(Shortcut, self).__init__()
self.stride = stride
self.conv1 = nn.Conv2d(in_ch, out_ch // 2, 1, stride=1, padding=0,
bias=False)
self.conv2 = nn.Conv2d(in_ch, out_ch // 2, 1, stride=1, padding=0,
bias=False)
self.bn = nn.BatchNorm2d(out_ch)
def forward(self, x):
h = F.relu(x)
h1 = F.avg_pool2d(h, 1, self.stride)
h1 = self.conv1(h1)
h2 = F.avg_pool2d(F.pad(h, (-1, 1, -1, 1)), 1, self.stride)
h2 = self.conv2(h2)
h = torch.cat((h1, h2), 1)
return self.bn(h)
class ShakeBlock(nn.Module):
def __init__(self, in_ch, out_ch, stride=1):
super(ShakeBlock, self).__init__()
self.equal_io = in_ch == out_ch
self.shortcut = self.equal_io and None or Shortcut(in_ch, out_ch,
stride=stride)
self.branch1 = self._make_branch(in_ch, out_ch, stride)
self.branch2 = self._make_branch(in_ch, out_ch, stride)
def forward(self, x):
h1 = self.branch1(x)
h2 = self.branch2(x)
h = ShakeShake.apply(h1, h2, self.training)
h0 = x if self.equal_io else self.shortcut(x)
return h + h0
def _make_branch(self, in_ch, out_ch, stride=1):
return nn.Sequential(nn.ReLU(inplace=False), nn.Conv2d(in_ch,
out_ch, 3, padding=1, stride=stride, bias=False), nn.
BatchNorm2d(out_ch), nn.ReLU(inplace=False), nn.Conv2d(out_ch,
out_ch, 3, padding=1, stride=1, bias=False), nn.BatchNorm2d(out_ch)
)
class ShakeResNet(nn.Module):
def __init__(self, depth, w_base, label):
super(ShakeResNet, self).__init__()
n_units = (depth - 2) / 6
in_chs = [16, w_base, w_base * 2, w_base * 4]
self.in_chs = in_chs
self.c_in = nn.Conv2d(3, in_chs[0], 3, padding=1)
self.layer1 = self._make_layer(n_units, in_chs[0], in_chs[1])
self.layer2 = self._make_layer(n_units, in_chs[1], in_chs[2], 2)
self.layer3 = self._make_layer(n_units, in_chs[2], in_chs[3], 2)
self.fc_out = nn.Linear(in_chs[3], label)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
h = self.c_in(x)
h = self.layer1(h)
h = self.layer2(h)
h = self.layer3(h)
h = F.relu(h)
h = F.avg_pool2d(h, 8)
h = h.view(-1, self.in_chs[3])
h = self.fc_out(h)
return h
def _make_layer(self, n_units, in_ch, out_ch, stride=1):
layers = []
for i in range(int(n_units)):
layers.append(ShakeBlock(in_ch, out_ch, stride=stride))
in_ch, stride = out_ch, 1
return nn.Sequential(*layers)
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {'depth': 1, 'w_base': 4, 'label': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import math
from torch import nn
from numpy import int64 as int64
import torch.nn.functional as F
from torch.autograd import Variable
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 16
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (16, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (4, 16), (16, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(262144)](buf1, primals_2,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = torch.ops.aten.avg_pool2d.default(buf1, [8, 8], [8, 8], [0,
0], False, True, None)
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((256, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf3, (256, 16),
(16, 1), 0), reinterpret_tensor(primals_4, (16, 4), (1, 16), 0),
alpha=1, beta=1, out=buf4)
del primals_5
return buf4, primals_1, primals_3, buf1, reinterpret_tensor(buf3, (256,
16), (16, 1), 0), primals_4
class ShakeShake(torch.autograd.Function):
@staticmethod
def forward(ctx, x1, x2, training=True):
if training:
alpha = torch.FloatTensor(x1.size(0)).uniform_()
alpha = alpha.view(alpha.size(0), 1, 1, 1).expand_as(x1)
else:
alpha = 0.5
return alpha * x1 + (1 - alpha) * x2
@staticmethod
def backward(ctx, grad_output):
beta = torch.FloatTensor(grad_output.size(0)).uniform_()
beta = beta.view(beta.size(0), 1, 1, 1).expand_as(grad_output)
beta = Variable(beta)
return beta * grad_output, (1 - beta) * grad_output, None
class Shortcut(nn.Module):
def __init__(self, in_ch, out_ch, stride):
super(Shortcut, self).__init__()
self.stride = stride
self.conv1 = nn.Conv2d(in_ch, out_ch // 2, 1, stride=1, padding=0,
bias=False)
self.conv2 = nn.Conv2d(in_ch, out_ch // 2, 1, stride=1, padding=0,
bias=False)
self.bn = nn.BatchNorm2d(out_ch)
def forward(self, x):
h = F.relu(x)
h1 = F.avg_pool2d(h, 1, self.stride)
h1 = self.conv1(h1)
h2 = F.avg_pool2d(F.pad(h, (-1, 1, -1, 1)), 1, self.stride)
h2 = self.conv2(h2)
h = torch.cat((h1, h2), 1)
return self.bn(h)
class ShakeBlock(nn.Module):
def __init__(self, in_ch, out_ch, stride=1):
super(ShakeBlock, self).__init__()
self.equal_io = in_ch == out_ch
self.shortcut = self.equal_io and None or Shortcut(in_ch, out_ch,
stride=stride)
self.branch1 = self._make_branch(in_ch, out_ch, stride)
self.branch2 = self._make_branch(in_ch, out_ch, stride)
def forward(self, x):
h1 = self.branch1(x)
h2 = self.branch2(x)
h = ShakeShake.apply(h1, h2, self.training)
h0 = x if self.equal_io else self.shortcut(x)
return h + h0
def _make_branch(self, in_ch, out_ch, stride=1):
return nn.Sequential(nn.ReLU(inplace=False), nn.Conv2d(in_ch,
out_ch, 3, padding=1, stride=stride, bias=False), nn.
BatchNorm2d(out_ch), nn.ReLU(inplace=False), nn.Conv2d(out_ch,
out_ch, 3, padding=1, stride=1, bias=False), nn.BatchNorm2d(out_ch)
)
class ShakeResNetNew(nn.Module):
def __init__(self, depth, w_base, label):
super(ShakeResNetNew, self).__init__()
n_units = (depth - 2) / 6
in_chs = [16, w_base, w_base * 2, w_base * 4]
self.in_chs = in_chs
self.c_in = nn.Conv2d(3, in_chs[0], 3, padding=1)
self.layer1 = self._make_layer(n_units, in_chs[0], in_chs[1])
self.layer2 = self._make_layer(n_units, in_chs[1], in_chs[2], 2)
self.layer3 = self._make_layer(n_units, in_chs[2], in_chs[3], 2)
self.fc_out = nn.Linear(in_chs[3], label)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def _make_layer(self, n_units, in_ch, out_ch, stride=1):
layers = []
for i in range(int(n_units)):
layers.append(ShakeBlock(in_ch, out_ch, stride=stride))
in_ch, stride = out_ch, 1
return nn.Sequential(*layers)
def forward(self, input_0):
primals_1 = self.c_in.weight
primals_2 = self.c_in.bias
primals_4 = self.fc_out.weight
primals_5 = self.fc_out.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
cdtalley/AutoML
|
ShakeResNet
| false
| 6,408
|
[
"MIT"
] | 1
|
918cda6bb1bd55b4ca974bdcdd59e32b2e28399d
|
https://github.com/cdtalley/AutoML/tree/918cda6bb1bd55b4ca974bdcdd59e32b2e28399d
|
LanguageModelCriterion
|
import torch
import torch.nn as nn
from torch.autograd import *
class LanguageModelCriterion(nn.Module):
def __init__(self):
super(LanguageModelCriterion, self).__init__()
def forward(self, input, target, mask):
target = target[:, :input.size(1)]
mask = mask[:, :input.size(1)]
output = -input.gather(2, target.unsqueeze(2)).squeeze(2) * mask
output = torch.sum(output) / torch.sum(mask)
return output
def get_inputs():
return [torch.ones([4, 4, 4], dtype=torch.int64), torch.ones([4, 4],
dtype=torch.int64), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from torch.autograd import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_div_mul_neg_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp9 = tl.load(in_ptr2 + r0, None)
tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 4),
'index out of bounds: 0 <= tmp4 < 4')
tmp6 = tl.load(in_ptr1 + (tmp4 + 4 * r0), None, eviction_policy=
'evict_last')
tmp7 = -tmp6
tmp8 = tmp7.to(tl.float32)
tmp10 = tmp8 * tmp9
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.sum(tmp11, 1)[:, None]
tmp14 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp16 = tl.sum(tmp14, 1)[:, None]
tmp17 = tmp13 / tmp16
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp17, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_div_mul_neg_sum_0[grid(1)](buf2, arg1_1, arg0_1,
arg2_1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf2,
class LanguageModelCriterionNew(nn.Module):
def __init__(self):
super(LanguageModelCriterionNew, self).__init__()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
chagmgang/object_relation_transformer
|
LanguageModelCriterion
| false
| 6,409
|
[
"MIT"
] | 1
|
04b88514f97232c12b576720e4b82226751c3c48
|
https://github.com/chagmgang/object_relation_transformer/tree/04b88514f97232c12b576720e4b82226751c3c48
|
BertSelfOutput
|
from _paritybench_helpers import _mock_config
import torch
from torch import nn
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-05):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, hidden_dropout_prob=
0.5)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mean_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + 1)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp10 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + 2)
tmp15 = tl.broadcast_to(tmp14, [XBLOCK])
tmp17 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + 3)
tmp22 = tl.broadcast_to(tmp21, [XBLOCK])
tmp24 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tmp0 + tmp2
tmp5 = tmp3 + tmp4
tmp9 = tmp6 + tmp8
tmp11 = tmp9 + tmp10
tmp12 = tmp5 + tmp11
tmp16 = tmp13 + tmp15
tmp18 = tmp16 + tmp17
tmp19 = tmp12 + tmp18
tmp23 = tmp20 + tmp22
tmp25 = tmp23 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tl.store(out_ptr0 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_sub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp5 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 - tmp5
tl.store(in_out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_sqrt_2(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp2 * tmp2
tmp5 = tmp4 * tmp4
tmp6 = tmp3 + tmp5
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = 4.0
tmp14 = tmp12 / tmp13
tmp15 = 1e-05
tmp16 = tmp14 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tmp18 = tmp1 / tmp17
tmp19 = tmp0 * tmp18
tmp21 = tmp19 + tmp20
tl.store(out_ptr0 + x2, tmp21, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mean_0[grid(64)](buf0, primals_2, primals_4,
buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused_add_sub_1[grid(256)](buf2, primals_2, primals_4,
buf1, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf1
del primals_2
del primals_4
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_pow_sqrt_2[grid(256)](primals_5,
buf2, primals_6, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_6
return buf3, primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf2
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-05):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertSelfOutputNew(nn.Module):
def __init__(self, config):
super(BertSelfOutputNew, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_0, input_1):
primals_1 = self.dense.weight
primals_2 = self.dense.bias
primals_5 = self.LayerNorm.weight
primals_6 = self.LayerNorm.bias
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
caldoe/BERT-NL2SPARQL
|
BertSelfOutput
| false
| 6,410
|
[
"MIT"
] | 1
|
2e09c1aeffc855bc7f1dc8c182e21153b2bc73a8
|
https://github.com/caldoe/BERT-NL2SPARQL/tree/2e09c1aeffc855bc7f1dc8c182e21153b2bc73a8
|
Norm
|
import torch
import torch.nn as nn
import torch.onnx
class Norm(nn.Module):
def __init__(self, emb_dim, eps=1e-06):
super().__init__()
self.size = emb_dim
self.alpha = nn.Parameter(torch.ones(self.size))
self.bias = nn.Parameter(torch.zeros(self.size))
self.eps = eps
def forward(self, x):
"""
inputs:
x: input of shape: (batch size, sequence length, embedding dimensions)
outputs: Scaled, normalized x
"""
norm = (x - x.mean(dim=-1, keepdim=True)) / (x.std(dim=-1, keepdim=
True) + self.eps)
norm = self.alpha * norm + self.bias
return norm
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'emb_dim': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp2 - tmp10
tmp13 = tmp12 * tmp12
tmp14 = tmp3 - tmp10
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp10
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp7 - tmp10
tmp21 = tmp20 * tmp20
tmp22 = tmp19 + tmp21
tmp23 = 3.0
tmp24 = tmp22 / tmp23
tmp25 = libdevice.sqrt(tmp24)
tmp26 = 1e-06
tmp27 = tmp25 + tmp26
tmp28 = tmp11 / tmp27
tmp29 = tmp0 * tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + x2, tmp31, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_std_sub_0[grid(256)](primals_2,
primals_1, primals_3, buf0, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_2
del primals_3
return buf0, primals_1
class NormNew(nn.Module):
def __init__(self, emb_dim, eps=1e-06):
super().__init__()
self.size = emb_dim
self.alpha = nn.Parameter(torch.ones(self.size))
self.bias = nn.Parameter(torch.zeros(self.size))
self.eps = eps
def forward(self, input_0):
primals_2 = self.alpha
primals_3 = self.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
chandar-lab/CriticalGradientOptimization
|
Norm
| false
| 6,411
|
[
"MIT"
] | 1
|
1af4b1df40489991289bb50bb69859a00b2c97c6
|
https://github.com/chandar-lab/CriticalGradientOptimization/tree/1af4b1df40489991289bb50bb69859a00b2c97c6
|
RewardCriterion
|
import torch
import torch.nn as nn
from torch.autograd import *
def to_contiguous(tensor):
if tensor.is_contiguous():
return tensor
else:
return tensor.contiguous()
class RewardCriterion(nn.Module):
def __init__(self):
super(RewardCriterion, self).__init__()
def forward(self, input, seq, reward):
"""
This function computes
log(y_t) * reward * mask_t (where mask_t zeroes out non-words in the sequence)
given
input = predicted probability
sequence = predicted word index
reward = ...
"""
input = to_contiguous(input).view(-1)
reward = to_contiguous(reward).view(-1)
mask = (seq > 0).float()
mask = to_contiguous(torch.cat([mask.new(mask.size(0), 1).fill_(1),
mask[:, :-1]], 1)).view(-1)
output = -input * reward * mask
output = torch.sum(output) / torch.sum(mask)
return output
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from torch.autograd import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_div_mul_neg_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp2 = tl.load(in_ptr1 + r0, None)
tmp1 = -tmp0
tmp3 = tmp1 * tmp2
tmp4 = r0 % 4
tl.full([1, 1], 0, tl.int64)
tmp7 = tl.full([1, 1], 1, tl.int64)
tmp8 = tmp4 < tmp7
tmp9 = 1.0
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp8, tmp9, tmp10)
tmp12 = tmp4 >= tmp7
tl.full([1, 1], 4, tl.int64)
tmp15 = tl.load(in_ptr2 + tl.broadcast_to(4 * (r0 // 4) + (-1 + r0 % 4),
[XBLOCK, RBLOCK]), tmp12, eviction_policy='evict_last', other=0.0)
tmp16 = 0.0
tmp17 = tmp15 > tmp16
tmp18 = tmp17.to(tl.float32)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp12, tmp18, tmp19)
tmp21 = tl.where(tmp8, tmp11, tmp20)
tmp22 = tmp3 * tmp21
tmp23 = tl.broadcast_to(tmp22, [XBLOCK, RBLOCK])
tmp25 = tl.sum(tmp23, 1)[:, None]
tmp26 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK])
tmp28 = tl.sum(tmp26, 1)[:, None]
tmp29 = tmp25 / tmp28
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp29, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_div_mul_neg_sum_0[grid(1)](buf2, arg0_1, arg1_1,
arg2_1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf2,
def to_contiguous(tensor):
if tensor.is_contiguous():
return tensor
else:
return tensor.contiguous()
class RewardCriterionNew(nn.Module):
def __init__(self):
super(RewardCriterionNew, self).__init__()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
chagmgang/object_relation_transformer
|
RewardCriterion
| false
| 6,412
|
[
"MIT"
] | 1
|
04b88514f97232c12b576720e4b82226751c3c48
|
https://github.com/chagmgang/object_relation_transformer/tree/04b88514f97232c12b576720e4b82226751c3c48
|
DiceLoss
|
import torch
import torch.nn as nn
class DiceLoss(nn.Module):
def __init__(self, weight=None, size_average=True):
super(DiceLoss, self).__init__()
def forward(self, inputs, targets, smooth=1):
inputs = torch.sigmoid(inputs)
inputs = inputs.view(-1)
targets = targets.view(-1)
intersection = (inputs * targets).sum()
dice = (2.0 * intersection + smooth) / (inputs.sum() + targets.sum(
) + smooth)
return 1 - dice
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp2 = tl.load(in_ptr1 + r0, None)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = tl.broadcast_to(tmp1, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = tl.broadcast_to(tmp2, [RBLOCK])
tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0))
tmp13 = 2.0
tmp14 = tmp6 * tmp13
tmp15 = 1.0
tmp16 = tmp14 + tmp15
tmp17 = tmp9 + tmp12
tmp18 = tmp17 + tmp15
tmp19 = tmp16 / tmp18
tmp20 = tmp15 - tmp19
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp20, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mul_rsub_sum_0[grid(1)](buf3, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf3,
class DiceLossNew(nn.Module):
def __init__(self, weight=None, size_average=True):
super(DiceLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
chakerouari/UNET_segmetation
|
DiceLoss
| false
| 6,413
|
[
"MIT"
] | 1
|
a7d9e9ccd31595d482f620cbf9a625a486f5f0df
|
https://github.com/chakerouari/UNET_segmetation/tree/a7d9e9ccd31595d482f620cbf9a625a486f5f0df
|
LocalSubGraphLayer
|
import torch
from torch import nn
import torch.nn.functional as F
class LocalMLP(nn.Module):
def __init__(self, dim_in: 'int', use_norm: 'bool'=True):
"""a Local 1 layer MLP
:param dim_in: feat in size
:type dim_in: int
:param use_norm: if to apply layer norm, defaults to True
:type use_norm: bool, optional
"""
super().__init__()
self.linear = nn.Linear(dim_in, dim_in, bias=not use_norm)
self.use_norm = use_norm
if use_norm:
self.norm = nn.LayerNorm(dim_in)
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
"""forward of the module
:param x: input tensor (..., dim_in)
:type x: torch.Tensor
:return: output tensor (..., dim_in)
:rtype: torch.Tensor
"""
x = self.linear(x)
if hasattr(self, 'norm'):
x = self.norm(x)
x = F.relu(x, inplace=True)
return x
class LocalSubGraphLayer(nn.Module):
def __init__(self, dim_in: 'int', dim_out: 'int') ->None:
"""Local subgraph layer
:param dim_in: input feat size
:type dim_in: int
:param dim_out: output feat size
:type dim_out: int
"""
super(LocalSubGraphLayer, self).__init__()
self.mlp = LocalMLP(dim_in)
self.linear_remap = nn.Linear(dim_in * 2, dim_out)
def forward(self, x: 'torch.Tensor', invalid_mask: 'torch.Tensor'
) ->torch.Tensor:
"""Forward of the model
:param x: input tensor
:tensor (B,N,P,dim_in)
:param invalid_mask: invalid mask for x
:tensor invalid_mask (B,N,P)
:return: output tensor (B,N,P,dim_out)
:rtype: torch.Tensor
"""
_, num_vectors, _ = x.shape
x = self.mlp(x)
masked_x = x.masked_fill(invalid_mask[..., None] > 0, float('-inf'))
x_agg = masked_x.max(dim=1, keepdim=True).values
x_agg = x_agg.repeat(1, num_vectors, 1)
x = torch.cat([x, x_agg], dim=-1)
x = self.linear_remap(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'dim_in': 4, 'dim_out': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_relu_1(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tl.store(out_ptr0 + (x0 + 8 * x1), tmp10, xmask)
@triton.jit
def triton_poi_fused_gt_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_masked_fill_max_3(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp1 = tl.load(in_ptr1 + (x0 + 32 * x1), xmask)
tmp4 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp5 = tl.load(in_ptr1 + (8 + x0 + 32 * x1), xmask)
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp9 = tl.load(in_ptr1 + (16 + x0 + 32 * x1), xmask)
tmp12 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp13 = tl.load(in_ptr1 + (24 + x0 + 32 * x1), xmask)
tmp2 = float('-inf')
tmp3 = tl.where(tmp0, tmp2, tmp1)
tmp6 = tl.where(tmp4, tmp2, tmp5)
tmp7 = triton_helpers.maximum(tmp3, tmp6)
tmp10 = tl.where(tmp8, tmp2, tmp9)
tmp11 = triton_helpers.maximum(tmp7, tmp10)
tmp14 = tl.where(tmp12, tmp2, tmp13)
tmp15 = triton_helpers.maximum(tmp11, tmp14)
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_masked_fill_max_repeat_4(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex // 4
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (x0 + 8 * x3), tmp0, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, 8), (8, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(16)](buf0, buf1, buf2, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf7 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
buf3 = reinterpret_tensor(buf7, (4, 4, 4), (32, 8, 1), 0)
triton_poi_fused_native_layer_norm_relu_1[grid(64)](buf0, buf1,
buf2, primals_3, primals_4, buf3, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf1
buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.bool)
triton_poi_fused_gt_2[grid(16)](primals_5, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_5
buf5 = reinterpret_tensor(buf2, (4, 1, 4), (4, 16, 1), 0)
del buf2
triton_poi_fused_masked_fill_max_3[grid(16)](buf4, buf3, buf5, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf6 = reinterpret_tensor(buf7, (4, 4, 4), (32, 8, 1), 4)
triton_poi_fused_masked_fill_max_repeat_4[grid(64)](buf5, buf6, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf5
buf8 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf7, (16, 8), (
8, 1), 0), reinterpret_tensor(primals_6, (8, 4), (1, 8), 0),
alpha=1, beta=1, out=buf8)
del primals_7
return reinterpret_tensor(buf8, (4, 4, 4), (16, 4, 1), 0
), primals_3, primals_4, reinterpret_tensor(primals_1, (16, 4), (4,
1), 0), buf0, buf4, reinterpret_tensor(buf7, (16, 8), (8, 1), 0
), primals_6
class LocalMLP(nn.Module):
def __init__(self, dim_in: 'int', use_norm: 'bool'=True):
"""a Local 1 layer MLP
:param dim_in: feat in size
:type dim_in: int
:param use_norm: if to apply layer norm, defaults to True
:type use_norm: bool, optional
"""
super().__init__()
self.linear = nn.Linear(dim_in, dim_in, bias=not use_norm)
self.use_norm = use_norm
if use_norm:
self.norm = nn.LayerNorm(dim_in)
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
"""forward of the module
:param x: input tensor (..., dim_in)
:type x: torch.Tensor
:return: output tensor (..., dim_in)
:rtype: torch.Tensor
"""
x = self.linear(x)
if hasattr(self, 'norm'):
x = self.norm(x)
x = F.relu(x, inplace=True)
return x
class LocalSubGraphLayerNew(nn.Module):
def __init__(self, dim_in: 'int', dim_out: 'int') ->None:
"""Local subgraph layer
:param dim_in: input feat size
:type dim_in: int
:param dim_out: output feat size
:type dim_out: int
"""
super(LocalSubGraphLayerNew, self).__init__()
self.mlp = LocalMLP(dim_in)
self.linear_remap = nn.Linear(dim_in * 2, dim_out)
def forward(self, input_0, input_1):
primals_2 = self.mlp.linear.weight
primals_3 = self.mlp.norm.weight
primals_4 = self.mlp.norm.bias
primals_6 = self.linear_remap.weight
primals_7 = self.linear_remap.bias
primals_1 = input_0
primals_5 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
cdicle-motional/l5kit
|
LocalSubGraphLayer
| false
| 6,414
|
[
"Apache-2.0"
] | 1
|
4dc4ee5391479bb71f0b373f39c316f9eef5a961
|
https://github.com/cdicle-motional/l5kit/tree/4dc4ee5391479bb71f0b373f39c316f9eef5a961
|
PinballLoss
|
import torch
import torch.nn as nn
class PinballLoss(nn.Module):
"""Computes the pinball loss between y and y_hat.
y: actual values in torch tensor.
y_hat: predicted values in torch tensor.
tau: a float between 0 and 1 the slope of the pinball loss. In the context
of quantile regression, the value of alpha determine the conditional
quantile level.
return: pinball_loss
"""
def __init__(self, tau=0.5):
super(PinballLoss, self).__init__()
self.tau = tau
def forward(self, y, y_hat):
delta_y = torch.sub(y, y_hat)
pinball = torch.max(torch.mul(self.tau, delta_y), torch.mul(self.
tau - 1, delta_y))
pinball = pinball.mean()
return pinball
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_maximum_mean_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = 0.5
tmp4 = tmp3 * tmp2
tmp5 = -0.5
tmp6 = tmp5 * tmp2
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = 256.0
tmp12 = tmp10 / tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_maximum_mean_mul_sub_0[grid(1)](buf1, arg1_1,
arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class PinballLossNew(nn.Module):
"""Computes the pinball loss between y and y_hat.
y: actual values in torch tensor.
y_hat: predicted values in torch tensor.
tau: a float between 0 and 1 the slope of the pinball loss. In the context
of quantile regression, the value of alpha determine the conditional
quantile level.
return: pinball_loss
"""
def __init__(self, tau=0.5):
super(PinballLossNew, self).__init__()
self.tau = tau
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
cchallu/esrnn
|
PinballLoss
| false
| 6,415
|
[
"MIT"
] | 1
|
543ca365c70be2775a4b5863820b246071ccde3c
|
https://github.com/cchallu/esrnn/tree/543ca365c70be2775a4b5863820b246071ccde3c
|
TripletMarginLossCosine
|
import torch
from torch import nn
import torch.nn.functional as F
class TripletMarginLossCosine(nn.Module):
def __init__(self, margin=1.0):
super(TripletMarginLossCosine, self).__init__()
self.margin = margin
def forward(self, anchor, positive, negative):
d_p = 1 - F.cosine_similarity(anchor, positive).view(-1, 1)
d_n = 1 - F.cosine_similarity(anchor, negative).view(-1, 1)
dist_hinge = torch.clamp(self.margin + d_p - d_n, min=0.0)
loss = torch.mean(dist_hinge)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr1 + x3, xmask)
tmp17 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp22 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp32 = tl.load(in_ptr2 + x3, xmask)
tmp33 = tl.load(in_ptr2 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp35 = tl.load(in_ptr2 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp38 = tl.load(in_ptr2 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp41 = tl.load(in_ptr2 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-08
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tmp18 = tmp17 * tmp17
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = libdevice.sqrt(tmp27)
tmp29 = triton_helpers.maximum(tmp28, tmp13)
tmp30 = tmp16 / tmp29
tmp31 = tmp15 * tmp30
tmp34 = tmp33 * tmp33
tmp36 = tmp35 * tmp35
tmp37 = tmp34 + tmp36
tmp39 = tmp38 * tmp38
tmp40 = tmp37 + tmp39
tmp42 = tmp41 * tmp41
tmp43 = tmp40 + tmp42
tmp44 = libdevice.sqrt(tmp43)
tmp45 = triton_helpers.maximum(tmp44, tmp13)
tmp46 = tmp32 / tmp45
tmp47 = tmp15 * tmp46
tl.store(out_ptr0 + x3, tmp31, xmask)
tl.store(out_ptr1 + x3, tmp47, xmask)
@triton.jit
def triton_per_fused_add_clamp_mean_rsub_sub_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (64 * (r0 // 16) + r0 % 16), None)
tmp1 = tl.load(in_ptr0 + (16 + 64 * (r0 // 16) + r0 % 16), None)
tmp3 = tl.load(in_ptr0 + (32 + 64 * (r0 // 16) + r0 % 16), None)
tmp5 = tl.load(in_ptr0 + (48 + 64 * (r0 // 16) + r0 % 16), None)
tmp10 = tl.load(in_ptr1 + (64 * (r0 // 16) + r0 % 16), None)
tmp11 = tl.load(in_ptr1 + (16 + 64 * (r0 // 16) + r0 % 16), None)
tmp13 = tl.load(in_ptr1 + (32 + 64 * (r0 // 16) + r0 % 16), None)
tmp15 = tl.load(in_ptr1 + (48 + 64 * (r0 // 16) + r0 % 16), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 1.0
tmp8 = tmp7 - tmp6
tmp9 = tmp8 + tmp7
tmp12 = tmp10 + tmp11
tmp14 = tmp12 + tmp13
tmp16 = tmp14 + tmp15
tmp17 = tmp7 - tmp16
tmp18 = tmp9 - tmp17
tmp19 = 0.0
tmp20 = triton_helpers.maximum(tmp18, tmp19)
tmp21 = tl.broadcast_to(tmp20, [XBLOCK, RBLOCK])
tmp23 = tl.sum(tmp21, 1)[:, None]
tmp24 = 64.0
tmp25 = tmp23 / tmp24
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp25, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0[grid(256)](
arg1_1, arg0_1, arg2_1, buf0, buf1, 256, XBLOCK=128, num_warps=
4, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
triton_per_fused_add_clamp_mean_rsub_sub_1[grid(1)](buf3, buf0,
buf1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf1
return buf3,
class TripletMarginLossCosineNew(nn.Module):
def __init__(self, margin=1.0):
super(TripletMarginLossCosineNew, self).__init__()
self.margin = margin
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
cenkcorapci/visual-fashion-item-search
|
TripletMarginLossCosine
| false
| 6,416
|
[
"MIT"
] | 1
|
47b93f97383c1b7f9ec23bb4ff66f90504db3da8
|
https://github.com/cenkcorapci/visual-fashion-item-search/tree/47b93f97383c1b7f9ec23bb4ff66f90504db3da8
|
ImgPatches
|
import torch
import torch.nn as nn
class ImgPatches(nn.Module):
def __init__(self, input_channel=3, dim=768, patch_size=4):
super().__init__()
self.patch_embed = nn.Conv2d(input_channel, dim, kernel_size=
patch_size, stride=patch_size)
def forward(self, img):
patches = self.patch_embed(img).flatten(2).transpose(1, 2)
return patches
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 2304
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 48 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_convolution_2(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 768
y1 = yindex // 768
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 768 * x2 + 196608 * y1), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 256 * y3), tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (768, 3, 4, 4), (48, 16, 4, 1))
assert_size_stride(primals_2, (768,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((768, 3, 4, 4), (48, 1, 12, 3), torch.float32
)
get_raw_stream(0)
triton_poi_fused_0[grid(2304, 16)](primals_1, buf0, 2304, 16,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch
.float32)
triton_poi_fused_1[grid(12, 4096)](primals_3, buf1, 12, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf2 = extern_kernels.convolution(buf1, buf0, stride=(4, 4),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 768, 16, 16), (196608, 1, 12288, 768))
buf3 = empty_strided_cuda((4, 768, 16, 16), (196608, 256, 16, 1),
torch.float32)
triton_poi_fused_convolution_2[grid(3072, 256)](buf2, primals_2,
buf3, 3072, 256, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del buf2
del primals_2
return reinterpret_tensor(buf3, (4, 256, 768), (196608, 1, 256), 0
), buf0, buf1
class ImgPatchesNew(nn.Module):
def __init__(self, input_channel=3, dim=768, patch_size=4):
super().__init__()
self.patch_embed = nn.Conv2d(input_channel, dim, kernel_size=
patch_size, stride=patch_size)
def forward(self, input_0):
primals_1 = self.patch_embed.weight
primals_2 = self.patch_embed.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
ch0n9waiu/TransCycleGAN
|
ImgPatches
| false
| 6,417
|
[
"MIT"
] | 1
|
a3e846e21101400282a9f1393c1f8d150a3d92c9
|
https://github.com/ch0n9waiu/TransCycleGAN/tree/a3e846e21101400282a9f1393c1f8d150a3d92c9
|
MultiHeadAttn
|
import torch
import torch.cuda
from torch.nn import functional as F
from torch import nn
import torch.utils.data
import torch.optim
class MultiHeadAttn(nn.Module):
def __init__(self, n_head, d_model, d_head, dropout, dropatt=0.1,
pre_lnorm=False):
super(MultiHeadAttn, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.scale = 1 / d_head ** 0.5
self.pre_lnorm = pre_lnorm
self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = nn.LayerNorm(d_model)
def forward(self, inp, attn_mask=None):
return self._forward(inp, attn_mask)
def _forward(self, inp, attn_mask=None):
residual = inp
if self.pre_lnorm:
inp = self.layer_norm(inp)
n_head, d_head = self.n_head, self.d_head
head_q, head_k, head_v = torch.chunk(self.qkv_net(inp), 3, dim=2)
head_q = head_q.view(inp.size(0), inp.size(1), n_head, d_head)
head_k = head_k.view(inp.size(0), inp.size(1), n_head, d_head)
head_v = head_v.view(inp.size(0), inp.size(1), n_head, d_head)
q = head_q.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head)
k = head_k.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head)
v = head_v.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head)
attn_score = torch.bmm(q, k.transpose(1, 2))
attn_score.mul_(self.scale)
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(1)
attn_mask = attn_mask.repeat(n_head, attn_mask.size(2), 1)
attn_score.masked_fill_(attn_mask, -float('inf'))
attn_prob = F.softmax(attn_score, dim=2)
attn_prob = self.dropatt(attn_prob)
attn_vec = torch.bmm(attn_prob, v)
attn_vec = attn_vec.view(n_head, inp.size(0), inp.size(1), d_head)
attn_vec = attn_vec.permute(1, 2, 0, 3).contiguous().view(inp.size(
0), inp.size(1), n_head * d_head)
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
output = residual + attn_out
else:
output = self.layer_norm(residual + attn_out)
return output
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'n_head': 4, 'd_model': 4, 'd_head': 4, 'dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.cuda
from torch.nn import functional as F
from torch import nn
import torch.utils.data
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 48 * x1 + 192 * x3), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x4, tmp2, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (32 + x0 + 4 * x2 + 48 * x1 + 192 * x3), xmask)
tmp1 = tl.load(in_ptr1 + (32 + x0 + 4 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x4, tmp2, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (16 + x0 + 4 * x2 + 48 * x1 + 192 * x3), xmask)
tmp1 = tl.load(in_ptr1 + (16 + x0 + 4 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x4, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x2, tmp17, xmask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 64 * x1), xmask)
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (48, 4), (4, 1))
assert_size_stride(primals_3, (48,), (1,))
assert_size_stride(primals_4, (4, 16), (16, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 48), (48, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 48), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(256)](buf0, primals_3, buf1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_1[grid(256)](buf0, primals_3, buf2, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_2[grid(256)](buf0, primals_3, buf3, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del primals_3
buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0), out=buf4)
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(256)](buf4, buf5, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf6 = buf4
del buf4
triton_poi_fused__softmax_4[grid(256)](buf5, buf6, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf7 = buf5
del buf5
extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (16, 4, 4), (16,
4, 1), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_5[grid(256)](buf7, buf8, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf7
buf9 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf8, (16, 16), (16, 1), 0),
reinterpret_tensor(primals_4, (16, 4), (1, 16), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_native_layer_norm_6[grid(16)](primals_1, buf9,
buf10, buf11, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_7[grid(64)](primals_1, buf9,
buf10, buf11, primals_5, primals_6, buf12, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf10
del buf11
del primals_6
return buf12, primals_1, primals_5, buf6, reinterpret_tensor(buf8, (16,
16), (16, 1), 0), buf9, primals_4, reinterpret_tensor(buf2, (16, 4,
4), (16, 1, 4), 0), reinterpret_tensor(buf1, (16, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0)
class MultiHeadAttnNew(nn.Module):
def __init__(self, n_head, d_model, d_head, dropout, dropatt=0.1,
pre_lnorm=False):
super(MultiHeadAttnNew, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.scale = 1 / d_head ** 0.5
self.pre_lnorm = pre_lnorm
self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = nn.LayerNorm(d_model)
def _forward(self, inp, attn_mask=None):
residual = inp
if self.pre_lnorm:
inp = self.layer_norm(inp)
n_head, d_head = self.n_head, self.d_head
head_q, head_k, head_v = torch.chunk(self.qkv_net(inp), 3, dim=2)
head_q = head_q.view(inp.size(0), inp.size(1), n_head, d_head)
head_k = head_k.view(inp.size(0), inp.size(1), n_head, d_head)
head_v = head_v.view(inp.size(0), inp.size(1), n_head, d_head)
q = head_q.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head)
k = head_k.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head)
v = head_v.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head)
attn_score = torch.bmm(q, k.transpose(1, 2))
attn_score.mul_(self.scale)
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(1)
attn_mask = attn_mask.repeat(n_head, attn_mask.size(2), 1)
attn_score.masked_fill_(attn_mask, -float('inf'))
attn_prob = F.softmax(attn_score, dim=2)
attn_prob = self.dropatt(attn_prob)
attn_vec = torch.bmm(attn_prob, v)
attn_vec = attn_vec.view(n_head, inp.size(0), inp.size(1), d_head)
attn_vec = attn_vec.permute(1, 2, 0, 3).contiguous().view(inp.size(
0), inp.size(1), n_head * d_head)
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
output = residual + attn_out
else:
output = self.layer_norm(residual + attn_out)
return output
def forward(self, input_0):
primals_2 = self.qkv_net.weight
primals_3 = self.qkv_net.bias
primals_4 = self.o_net.weight
primals_5 = self.layer_norm.weight
primals_6 = self.layer_norm.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
carolmanderson/NeMo
|
MultiHeadAttn
| false
| 6,418
|
[
"Apache-2.0"
] | 1
|
be7114e2d983af751e1af4119465c626682747b7
|
https://github.com/carolmanderson/NeMo/tree/be7114e2d983af751e1af4119465c626682747b7
|
FeedForward
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.onnx
class FeedForward(nn.Module):
def __init__(self, emb_dim, ff_dim=2048, dropout=0.1):
super().__init__()
self.linear_1 = nn.Linear(emb_dim, ff_dim)
self.dropout = nn.Dropout(dropout)
self.linear_2 = nn.Linear(ff_dim, emb_dim)
def forward(self, x):
x = self.dropout(F.leaky_relu(self.linear_1(x)))
x = self.linear_2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'emb_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 2048
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, None)
tl.store(out_ptr1 + x2, tmp7, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (2048, 4), (4, 1))
assert_size_stride(primals_2, (2048,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 2048), (2048, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 2048), (2048, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 2048), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 2048), (32768, 8192, 2048, 1),
torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 2048), (32768, 8192, 2048, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(131072)](buf0, primals_2, buf1,
buf2, 131072, XBLOCK=1024, num_warps=4, num_stages=1)
del buf0
del primals_2
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 2048),
(2048, 1), 0), reinterpret_tensor(primals_4, (2048, 4), (1,
2048), 0), alpha=1, beta=1, out=buf3)
del primals_5
return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, reinterpret_tensor(buf2, (64, 2048), (2048, 1), 0), primals_4
class FeedForwardNew(nn.Module):
def __init__(self, emb_dim, ff_dim=2048, dropout=0.1):
super().__init__()
self.linear_1 = nn.Linear(emb_dim, ff_dim)
self.dropout = nn.Dropout(dropout)
self.linear_2 = nn.Linear(ff_dim, emb_dim)
def forward(self, input_0):
primals_1 = self.linear_1.weight
primals_2 = self.linear_1.bias
primals_4 = self.linear_2.weight
primals_5 = self.linear_2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
chandar-lab/CriticalGradientOptimization
|
FeedForward
| false
| 6,419
|
[
"MIT"
] | 1
|
1af4b1df40489991289bb50bb69859a00b2c97c6
|
https://github.com/chandar-lab/CriticalGradientOptimization/tree/1af4b1df40489991289bb50bb69859a00b2c97c6
|
RNN
|
import torch
import torch.nn as nn
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.i2h = nn.Linear(input_size + hidden_size, hidden_size)
self.i2o = nn.Linear(input_size + hidden_size, output_size)
self.softmax = nn.LogSoftmax(dim=1)
def forward(self, input_tensor, hidden_tensor):
combined = torch.cat((input_tensor, hidden_tensor), 1)
hidden = self.i2h(combined)
output = self.i2o(combined)
output = self.softmax(output)
return output, hidden
def init_hidden(self):
return torch.zeros(1, self.hidden_size)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4, 'output_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 8), (8, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3,
(8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, buf0, reinterpret_tensor(primals_5,
(8, 4), (1, 8), 0), alpha=1, beta=1, out=buf2)
del primals_5
del primals_6
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__log_softmax_1[grid(16)](buf2, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf4 = buf2
del buf2
triton_poi_fused__log_softmax_2[grid(16)](buf3, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf3
return buf4, buf1, buf0, buf4
class RNNNew(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(RNNNew, self).__init__()
self.hidden_size = hidden_size
self.i2h = nn.Linear(input_size + hidden_size, hidden_size)
self.i2o = nn.Linear(input_size + hidden_size, output_size)
self.softmax = nn.LogSoftmax(dim=1)
def init_hidden(self):
return torch.zeros(1, self.hidden_size)
def forward(self, input_0, input_1):
primals_3 = self.i2h.weight
primals_4 = self.i2h.bias
primals_5 = self.i2o.weight
primals_6 = self.i2o.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0], output[1]
|
chauhankartik/DeepLearning-EarlySteps
|
RNN
| false
| 6,420
|
[
"MIT"
] | 1
|
44b0189cf6e81f8032a6a80cc33ff80496ebd462
|
https://github.com/chauhankartik/DeepLearning-EarlySteps/tree/44b0189cf6e81f8032a6a80cc33ff80496ebd462
|
MultiHeadAttention
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.onnx
class MultiHeadAttention(nn.Module):
def __init__(self, num_heads, emb_dim, dim_k=None, dropout=0.1):
super().__init__()
self.emb_dim = emb_dim
self.dim_k = dim_k if dim_k else emb_dim // num_heads
self.num_heads = num_heads
self.q_linear = nn.Linear(emb_dim, self.dim_k * num_heads)
self.k_linear = nn.Linear(emb_dim, self.dim_k * num_heads)
self.v_linear = nn.Linear(emb_dim, self.dim_k * num_heads)
self.dropout = nn.Dropout(dropout)
self.out = nn.Linear(self.dim_k * num_heads, emb_dim)
def attention(self, q, k, v, dim_k, mask=None, dropout=None, explain=False
):
k = k.transpose(-2, -1)
if explain:
None
scores = torch.matmul(q, k) / math.sqrt(dim_k)
if explain:
None
if mask is not None:
mask = mask.unsqueeze(1)
if explain:
None
scores = scores.masked_fill(mask == 0, -1000000000.0)
softscores = F.softmax(scores, dim=-1)
if dropout is not None:
softscores = dropout(softscores)
output = torch.matmul(softscores, v)
return output, scores
def forward(self, q, k, v, mask=None, explain=False):
"""
inputs:
q has shape (batch size, q_sequence length, embedding dimensions)
k,v have shape (batch size, kv_sequence length, embedding dimensions)
mask of shape (batch size, 1, kv_sequence length)
explain: boolean, prints intermediate values if True
outputs: sequence of vectors, re-represented using attention
shape (batch size, q_sequence length, embedding dimensions)
use:
The encoder layer places the same source vector sequence into q,k,v
and mask into mask.
The decoder layer uses this twice, once with decoder inputs as q,k,v
and target mask as mask. then with decoder inputs as q, encoder outputs
as k, v and source mask as mask
"""
batch_size = q.size(0)
q = self.q_linear(q)
k = self.k_linear(k)
v = self.v_linear(v)
if explain:
None
k = k.view(batch_size, -1, self.num_heads, self.dim_k)
q = q.view(batch_size, -1, self.num_heads, self.dim_k)
v = v.view(batch_size, -1, self.num_heads, self.dim_k)
k = k.transpose(1, 2)
q = q.transpose(1, 2)
v = v.transpose(1, 2)
if explain:
None
attn, scores = self.attention(q, k, v, self.dim_k, mask, self.
dropout, explain)
if explain:
None
concat = attn.transpose(1, 2).contiguous().view(batch_size, -1,
self.dim_k * self.num_heads)
if explain:
None
output = self.out(concat)
if explain:
None
return output, scores
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_heads': 4, 'emb_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import torch.nn as nn
import torch.nn.functional as F
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_per_fused__softmax_div_1(in_ptr0, out_ptr2, out_ptr3, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 256
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, float('-inf'))
tmp6 = triton_helpers.max2(tmp5, 1)[:, None]
tmp7 = tmp2 - tmp6
tmp8 = tmp7 * tmp1
tmp9 = tl_math.exp(tmp8)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.where(xmask, tmp10, 0)
tmp13 = tl.sum(tmp12, 1)[:, None]
tmp14 = tmp9 / tmp13
tl.store(out_ptr2 + (r1 + 16 * x0), tmp2, xmask)
tl.store(out_ptr3 + (r1 + 16 * x0), tmp14, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_9, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 16, 1), (64, 16, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(16, 16)](buf0, primals_3, buf3, 16,
16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 16), (64, 16, 16, 1), 0)
del buf0
triton_poi_fused_clone_0[grid(16, 16)](buf1, primals_5, buf4, 16,
16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 16, 16), (256, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 16, 1), (16, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 16), (16, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch
.float32)
buf9 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch
.float32)
triton_per_fused__softmax_div_1[grid(256)](buf5, buf6, buf9, 256,
16, XBLOCK=128, num_warps=8, num_stages=1)
del buf5
buf10 = reinterpret_tensor(buf1, (4, 4, 16, 1), (64, 16, 1, 1), 0)
del buf1
triton_poi_fused_clone_0[grid(16, 16)](buf2, primals_8, buf10, 16,
16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_8
buf11 = reinterpret_tensor(buf2, (16, 16, 1), (16, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf9, (16, 16, 16), (256, 16,
1), 0), reinterpret_tensor(buf10, (16, 16, 1), (16, 1, 0), 0),
out=buf11)
buf12 = empty_strided_cuda((4, 16, 4, 1), (64, 4, 1, 1), torch.float32)
triton_poi_fused_clone_2[grid(64, 4)](buf11, buf12, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
buf13 = reinterpret_tensor(buf11, (64, 4), (4, 1), 0)
del buf11
extern_kernels.addmm(primals_11, reinterpret_tensor(buf12, (64, 4),
(4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf13)
del primals_11
return reinterpret_tensor(buf13, (4, 16, 4), (64, 4, 1), 0
), buf6, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_9, (64, 4), (4, 1), 0
), buf9, reinterpret_tensor(buf12, (64, 4), (4, 1), 0
), primals_10, reinterpret_tensor(buf10, (16, 1, 16), (16, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 16), (16, 1, 1), 0
), reinterpret_tensor(buf4, (16, 16, 1), (16, 1, 16), 0)
class MultiHeadAttentionNew(nn.Module):
def __init__(self, num_heads, emb_dim, dim_k=None, dropout=0.1):
super().__init__()
self.emb_dim = emb_dim
self.dim_k = dim_k if dim_k else emb_dim // num_heads
self.num_heads = num_heads
self.q_linear = nn.Linear(emb_dim, self.dim_k * num_heads)
self.k_linear = nn.Linear(emb_dim, self.dim_k * num_heads)
self.v_linear = nn.Linear(emb_dim, self.dim_k * num_heads)
self.dropout = nn.Dropout(dropout)
self.out = nn.Linear(self.dim_k * num_heads, emb_dim)
def attention(self, q, k, v, dim_k, mask=None, dropout=None, explain=False
):
k = k.transpose(-2, -1)
if explain:
None
scores = torch.matmul(q, k) / math.sqrt(dim_k)
if explain:
None
if mask is not None:
mask = mask.unsqueeze(1)
if explain:
None
scores = scores.masked_fill(mask == 0, -1000000000.0)
softscores = F.softmax(scores, dim=-1)
if dropout is not None:
softscores = dropout(softscores)
output = torch.matmul(softscores, v)
return output, scores
def forward(self, input_0, input_1, input_2):
primals_2 = self.q_linear.weight
primals_3 = self.q_linear.bias
primals_4 = self.k_linear.weight
primals_5 = self.k_linear.bias
primals_7 = self.v_linear.weight
primals_8 = self.v_linear.bias
primals_10 = self.out.weight
primals_11 = self.out.bias
primals_1 = input_0
primals_6 = input_1
primals_9 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0], output[1]
|
chandar-lab/CriticalGradientOptimization
|
MultiHeadAttention
| false
| 6,421
|
[
"MIT"
] | 1
|
1af4b1df40489991289bb50bb69859a00b2c97c6
|
https://github.com/chandar-lab/CriticalGradientOptimization/tree/1af4b1df40489991289bb50bb69859a00b2c97c6
|
Actor
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, max_action):
super(Actor, self).__init__()
self.l1 = nn.Linear(state_dim, 5)
self.l2 = nn.Linear(5, 3)
self.l3 = nn.Linear(3, action_dim)
self.max_action = max_action
def forward(self, state):
a = F.relu(self.l1(state))
a = F.relu(self.l2(a))
return self.max_action * torch.tanh(self.l3(a))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4, 'action_dim': 4, 'max_action': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 5
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 3
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_mul_tanh_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tmp2 = 4.0
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (5, 4), (4, 1))
assert_size_stride(primals_2, (5,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (3, 5), (5, 1))
assert_size_stride(primals_5, (3,), (1,))
assert_size_stride(primals_6, (4, 3), (3, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 5), (5, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 5), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 5), (80, 20, 5, 1), 0)
del buf0
buf7 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(320)](buf1,
primals_2, buf7, 320, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 3), (3, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 5), (5, 1), 0),
reinterpret_tensor(primals_4, (5, 3), (1, 5), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 3), (48, 12, 3, 1), 0)
del buf2
buf6 = empty_strided_cuda((4, 4, 4, 3), (48, 12, 3, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(192)](buf3,
primals_5, buf6, 192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 3), (
3, 1), 0), reinterpret_tensor(primals_6, (3, 4), (1, 3), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_tanh_2[grid(256)](buf4, buf5, 256, XBLOCK=128,
num_warps=4, num_stages=1)
return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 5), (5, 1), 0), reinterpret_tensor(
buf3, (64, 3), (3, 1), 0), buf4, primals_6, buf6, primals_4, buf7
class ActorNew(nn.Module):
def __init__(self, state_dim, action_dim, max_action):
super(ActorNew, self).__init__()
self.l1 = nn.Linear(state_dim, 5)
self.l2 = nn.Linear(5, 3)
self.l3 = nn.Linear(3, action_dim)
self.max_action = max_action
def forward(self, input_0):
primals_1 = self.l1.weight
primals_2 = self.l1.bias
primals_4 = self.l2.weight
primals_5 = self.l2.bias
primals_6 = self.l3.weight
primals_7 = self.l3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
chenbq1234/CityLearn
|
Actor
| false
| 6,422
|
[
"MIT"
] | 1
|
baa162435954ecd58e7f4769a46fa9046f4d2cf6
|
https://github.com/chenbq1234/CityLearn/tree/baa162435954ecd58e7f4769a46fa9046f4d2cf6
|
BayesConv1d
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
def calculate_kl(mu_p, sig_p, mu_q, sig_q):
"""
Calculates the Kullback-Leibler divergence between two univariate Gaussians (p and q)
Args:
mu_p: mean of the Gaussian p
sig_p: standard deviation of the Gaussian p
mu_q: mean of the Gaussian q
sig_q: standard deviation of the Gaussian q
"""
kl = 0.5 * (2 * torch.log(sig_p / sig_q) - 1 + (sig_q / sig_p).pow(2) +
((mu_p - mu_q) / sig_p).pow(2)).sum()
return kl
class BayesConv1d(nn.Module):
"""
This class implements a Bayesian 1-dimensional Convolutional layer.
"""
def __init__(self, in_channels, out_channels, kernel_size, stride,
padding, dilation, bias=True, log_sigma_prior=-5, mu_prior=-1):
"""
Initializes BayesConv1d layer.
Args:
in_channels: number of input channels
out_channels: number of output channels
kernel_size: size of the convolutional kernel
stride: stride of the convolution
dilation: spacing between the kernel points of the convolution
bias: whether to add bias
log_sigma_prior: the initial value of the standard deviation of the distribution
mu_prior: the initial value of the mean of the distribution
"""
super(BayesConv1d, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.w_mu = nn.Parameter(torch.Tensor(out_channels, in_channels,
kernel_size))
self.w_log_sigma = nn.Parameter(torch.Tensor(out_channels,
in_channels, kernel_size))
self.mu_prior_init = mu_prior
self.log_sigma_prior_init = log_sigma_prior
if bias is True:
self.bias = nn.Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
"""
Resets the parameters of the layer
"""
init.kaiming_uniform_(self.w_mu, a=math.sqrt(5))
init.uniform_(self.w_log_sigma, self.log_sigma_prior_init - 0.1,
self.log_sigma_prior_init)
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.w_mu)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def forward(self, input):
"""
Performs a forward pass of the input. Uses the Reparemetrization trick proposed by Kingma et al.
in "Variational Dropout and the Local Reparameterization trick" to sample directly from the activations.
Args:
input: the input to be forwarded
"""
act_mu = F.conv1d(input, self.w_mu, self.bias, self.stride, self.
padding, self.dilation)
act_sigma = torch.sqrt(torch.clamp(F.conv1d(input ** 2, torch.exp(
self.w_log_sigma) ** 2, self.bias, self.stride, self.padding,
self.dilation), min=1e-16))
epsilon = torch.randn_like(act_mu)
return act_mu + act_sigma * epsilon
def kl(self):
"""
Returns the Kullback-Leibler divergence between the prior and the posterior of Bayesian layer.
"""
return calculate_kl(torch.Tensor([self.mu_prior_init]).type_as(self
.w_mu), torch.exp(torch.Tensor([self.log_sigma_prior_init]).
type_as(self.w_mu)), self.w_mu, torch.exp(self.w_log_sigma))
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4,
'stride': 1, 'padding': 4, 'dilation': 1}]
|
import torch
from torch import device
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.nn as nn
from torch.nn import init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_exp_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl_math.exp(tmp0)
tmp2 = tmp1 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_pow_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0 * tmp0
tl.store(out_ptr0 + x0, tmp1, xmask)
@triton.jit
def triton_poi_fused_add_clamp_convolution_mul_sqrt_2(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 36
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 9
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_out_ptr1 + x2, xmask)
tmp8 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp3 + tmp1
tmp5 = 1e-16
tmp6 = triton_helpers.maximum(tmp2, tmp5)
tmp7 = libdevice.sqrt(tmp6)
tmp9 = tmp7 * tmp8
tmp10 = tmp4 + tmp9
tl.store(in_out_ptr0 + x2, tmp2, xmask)
tl.store(in_out_ptr1 + x2, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1,
4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(4,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf0, (1, 4, 9), (36, 9, 1))
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_exp_pow_0[grid(64)](primals_4, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_pow_1[grid(16)](primals_3, buf2, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf3 = extern_kernels.convolution(reinterpret_tensor(buf2, (1, 4, 4
), (0, 4, 1), 0), buf1, stride=(1,), padding=(4,), dilation=(1,
), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf3, (1, 4, 9), (36, 9, 1))
buf5 = torch.ops.aten.randn.default([4, 9], dtype=torch.float32,
device=device(type='cuda', index=0), pin_memory=False)
buf6 = buf5
del buf5
buf4 = buf3
del buf3
buf7 = reinterpret_tensor(buf0, (4, 9), (9, 1), 0)
del buf0
triton_poi_fused_add_clamp_convolution_mul_sqrt_2[grid(36)](buf4,
buf7, primals_2, buf6, 36, XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
return buf7, primals_1, primals_4, reinterpret_tensor(primals_3, (1, 4,
4), (16, 4, 1), 0), buf1, reinterpret_tensor(buf2, (1, 4, 4), (16,
4, 1), 0), buf4, buf6
def calculate_kl(mu_p, sig_p, mu_q, sig_q):
"""
Calculates the Kullback-Leibler divergence between two univariate Gaussians (p and q)
Args:
mu_p: mean of the Gaussian p
sig_p: standard deviation of the Gaussian p
mu_q: mean of the Gaussian q
sig_q: standard deviation of the Gaussian q
"""
kl = 0.5 * (2 * torch.log(sig_p / sig_q) - 1 + (sig_q / sig_p).pow(2) +
((mu_p - mu_q) / sig_p).pow(2)).sum()
return kl
class BayesConv1dNew(nn.Module):
"""
This class implements a Bayesian 1-dimensional Convolutional layer.
"""
def __init__(self, in_channels, out_channels, kernel_size, stride,
padding, dilation, bias=True, log_sigma_prior=-5, mu_prior=-1):
"""
Initializes BayesConv1d layer.
Args:
in_channels: number of input channels
out_channels: number of output channels
kernel_size: size of the convolutional kernel
stride: stride of the convolution
dilation: spacing between the kernel points of the convolution
bias: whether to add bias
log_sigma_prior: the initial value of the standard deviation of the distribution
mu_prior: the initial value of the mean of the distribution
"""
super(BayesConv1dNew, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.w_mu = nn.Parameter(torch.Tensor(out_channels, in_channels,
kernel_size))
self.w_log_sigma = nn.Parameter(torch.Tensor(out_channels,
in_channels, kernel_size))
self.mu_prior_init = mu_prior
self.log_sigma_prior_init = log_sigma_prior
if bias is True:
self.bias = nn.Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
"""
Resets the parameters of the layer
"""
init.kaiming_uniform_(self.w_mu, a=math.sqrt(5))
init.uniform_(self.w_log_sigma, self.log_sigma_prior_init - 0.1,
self.log_sigma_prior_init)
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.w_mu)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def kl(self):
"""
Returns the Kullback-Leibler divergence between the prior and the posterior of Bayesian layer.
"""
return calculate_kl(torch.Tensor([self.mu_prior_init]).type_as(self
.w_mu), torch.exp(torch.Tensor([self.log_sigma_prior_init]).
type_as(self.w_mu)), self.w_mu, torch.exp(self.w_log_sigma))
def forward(self, input_0):
primals_1 = self.w_mu
primals_4 = self.w_log_sigma
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
chapmanbe/uncertainty
|
BayesConv1d
| false
| 6,423
|
[
"Apache-2.0"
] | 1
|
d4eec00e937c76043d57a13ffcc9618b1e08d967
|
https://github.com/chapmanbe/uncertainty/tree/d4eec00e937c76043d57a13ffcc9618b1e08d967
|
BayesLinear
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
def calculate_kl(mu_p, sig_p, mu_q, sig_q):
"""
Calculates the Kullback-Leibler divergence between two univariate Gaussians (p and q)
Args:
mu_p: mean of the Gaussian p
sig_p: standard deviation of the Gaussian p
mu_q: mean of the Gaussian q
sig_q: standard deviation of the Gaussian q
"""
kl = 0.5 * (2 * torch.log(sig_p / sig_q) - 1 + (sig_q / sig_p).pow(2) +
((mu_p - mu_q) / sig_p).pow(2)).sum()
return kl
class BayesLinear(nn.Module):
"""
This class implements a Bayesian Linear layer, which has a distribution instead of weights.
"""
def __init__(self, in_features, out_features, bias=True,
log_sigma_prior=-5, mu_prior=-1):
"""
Initializes a BayesLinear layer.
Args:
in_features: number of input features
out_features: number of output features
bias: whether to add bias
log_sigma_prior: the initial value of the standard deviation of the distribution
mu_prior: the initial value of the mean of the distribution
"""
super(BayesLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.w_mu = nn.Parameter(torch.Tensor(out_features, in_features))
self.w_log_sigma = nn.Parameter(torch.Tensor(out_features, in_features)
)
self.mu_prior_init = mu_prior
self.log_sigma_prior_init = log_sigma_prior
if bias is True:
self.bias = nn.Parameter(torch.Tensor(out_features))
self.reset_parameters()
def reset_parameters(self):
"""
Resets the parameters of the layer
"""
init.kaiming_uniform_(self.w_mu, a=math.sqrt(5))
init.uniform_(self.w_log_sigma, self.log_sigma_prior_init - 0.1,
self.log_sigma_prior_init)
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.w_mu)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def forward(self, input):
"""
Performs a forward pass of the input. Uses the Reparemetrization trick proposed by Kingma et al.
in "Variational Dropout and the Local Reparameterization trick" to sample directly from the activations.
Args:
input: the input to be forwarded
"""
act_mu = F.linear(input, self.w_mu, self.bias)
act_sigma = torch.sqrt(F.linear(input ** 2, torch.exp(self.
w_log_sigma) ** 2) + 1e-08)
epsilon = torch.randn_like(act_mu)
return act_mu + act_sigma * epsilon
def kl(self):
"""
Returns the Kullback-Leibler divergence between the prior and the posterior of Bayesian layer.
"""
return calculate_kl(torch.Tensor([self.mu_prior_init]).type_as(self
.w_mu), torch.exp(torch.Tensor([self.log_sigma_prior_init]).
type_as(self.w_mu)), self.w_mu, torch.exp(self.w_log_sigma))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch import device
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.nn as nn
from torch.nn import init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0 * tmp0
tl.store(out_ptr0 + x0, tmp1, xmask)
@triton.jit
def triton_poi_fused_exp_pow_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl_math.exp(tmp0)
tmp2 = tmp1 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_mul_sqrt_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp7 = tl.load(in_ptr2 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = 1e-08
tmp5 = tmp3 + tmp4
tmp6 = libdevice.sqrt(tmp5)
tmp8 = tmp6 * tmp7
tmp9 = tmp2 + tmp8
tl.store(in_out_ptr0 + x2, tmp9, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_pow_0[grid(256)](primals_3, buf1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_exp_pow_1[grid(16)](primals_4, buf2, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(buf2, (4, 4), (1, 4), 0), out=buf3)
del buf2
buf4 = torch.ops.aten.randn.default([4, 4, 4, 4], dtype=torch.
float32, device=device(type='cuda', index=0), pin_memory=False)
buf5 = buf4
del buf4
buf6 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused_add_mul_sqrt_2[grid(256)](buf6, primals_2, buf3,
buf5, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return buf6, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf3, buf5
def calculate_kl(mu_p, sig_p, mu_q, sig_q):
"""
Calculates the Kullback-Leibler divergence between two univariate Gaussians (p and q)
Args:
mu_p: mean of the Gaussian p
sig_p: standard deviation of the Gaussian p
mu_q: mean of the Gaussian q
sig_q: standard deviation of the Gaussian q
"""
kl = 0.5 * (2 * torch.log(sig_p / sig_q) - 1 + (sig_q / sig_p).pow(2) +
((mu_p - mu_q) / sig_p).pow(2)).sum()
return kl
class BayesLinearNew(nn.Module):
"""
This class implements a Bayesian Linear layer, which has a distribution instead of weights.
"""
def __init__(self, in_features, out_features, bias=True,
log_sigma_prior=-5, mu_prior=-1):
"""
Initializes a BayesLinear layer.
Args:
in_features: number of input features
out_features: number of output features
bias: whether to add bias
log_sigma_prior: the initial value of the standard deviation of the distribution
mu_prior: the initial value of the mean of the distribution
"""
super(BayesLinearNew, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.w_mu = nn.Parameter(torch.Tensor(out_features, in_features))
self.w_log_sigma = nn.Parameter(torch.Tensor(out_features, in_features)
)
self.mu_prior_init = mu_prior
self.log_sigma_prior_init = log_sigma_prior
if bias is True:
self.bias = nn.Parameter(torch.Tensor(out_features))
self.reset_parameters()
def reset_parameters(self):
"""
Resets the parameters of the layer
"""
init.kaiming_uniform_(self.w_mu, a=math.sqrt(5))
init.uniform_(self.w_log_sigma, self.log_sigma_prior_init - 0.1,
self.log_sigma_prior_init)
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.w_mu)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def kl(self):
"""
Returns the Kullback-Leibler divergence between the prior and the posterior of Bayesian layer.
"""
return calculate_kl(torch.Tensor([self.mu_prior_init]).type_as(self
.w_mu), torch.exp(torch.Tensor([self.log_sigma_prior_init]).
type_as(self.w_mu)), self.w_mu, torch.exp(self.w_log_sigma))
def forward(self, input_0):
primals_1 = self.w_mu
primals_4 = self.w_log_sigma
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
chapmanbe/uncertainty
|
BayesLinear
| false
| 6,424
|
[
"Apache-2.0"
] | 1
|
d4eec00e937c76043d57a13ffcc9618b1e08d967
|
https://github.com/chapmanbe/uncertainty/tree/d4eec00e937c76043d57a13ffcc9618b1e08d967
|
PositionwiseFeedForward
|
import torch
import torch.nn as nn
class LayerNorm(nn.Module):
"""
Layer Normalization class
"""
def __init__(self, features, eps=1e-06):
super(LayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(features))
self.bias = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.weight * (x - mean) / (std + self.eps) + self.bias
class PositionwiseFeedForward(nn.Module):
""" A two-layer Feed-Forward-Network with residual layer norm.
Args:
d_model (int): the size of input for the first-layer of the FFN.
d_ff (int): the hidden layer size of the second-layer
of the FNN.
dropout (float): dropout probability(0-1.0).
"""
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.intermediate = nn.Linear(d_model, d_ff)
self.output = nn.Linear(d_ff, d_model)
self.layer_norm = LayerNorm(d_model)
self.dropout_1 = nn.Dropout(dropout)
self.relu = nn.ReLU()
self.dropout_2 = nn.Dropout(dropout)
def forward(self, x):
"""
Layer definition.
Args:
input: [ batch_size, input_len, model_dim ]
Returns:
output: [ batch_size, input_len, model_dim ]
"""
inter = self.dropout_1(self.relu(self.intermediate(self.layer_norm(x)))
)
output = self.dropout_2(self.output(inter))
return output + x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'd_ff': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp0 * tmp11
tmp13 = tmp2 - tmp10
tmp14 = tmp13 * tmp13
tmp15 = tmp3 - tmp10
tmp16 = tmp15 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = tmp5 - tmp10
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp21 = tmp7 - tmp10
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp24 = 3.0
tmp25 = tmp23 / tmp24
tmp26 = libdevice.sqrt(tmp25)
tmp27 = 1e-06
tmp28 = tmp26 + tmp27
tmp29 = tmp12 / tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + x2, tmp31, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_std_sub_0[grid(256)](primals_2,
primals_1, primals_3, buf0, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_2
del primals_3
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(256)](buf2,
primals_5, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf3
triton_poi_fused_add_2[grid(256)](buf4, primals_7, primals_1, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
return buf4, primals_1, reinterpret_tensor(buf0, (64, 4), (4, 1), 0
), reinterpret_tensor(buf2, (64, 4), (4, 1), 0
), primals_6, buf5, primals_4
class LayerNorm(nn.Module):
"""
Layer Normalization class
"""
def __init__(self, features, eps=1e-06):
super(LayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(features))
self.bias = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.weight * (x - mean) / (std + self.eps) + self.bias
class PositionwiseFeedForwardNew(nn.Module):
""" A two-layer Feed-Forward-Network with residual layer norm.
Args:
d_model (int): the size of input for the first-layer of the FFN.
d_ff (int): the hidden layer size of the second-layer
of the FNN.
dropout (float): dropout probability(0-1.0).
"""
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForwardNew, self).__init__()
self.intermediate = nn.Linear(d_model, d_ff)
self.output = nn.Linear(d_ff, d_model)
self.layer_norm = LayerNorm(d_model)
self.dropout_1 = nn.Dropout(dropout)
self.relu = nn.ReLU()
self.dropout_2 = nn.Dropout(dropout)
def forward(self, input_0):
primals_4 = self.intermediate.weight
primals_2 = self.intermediate.bias
primals_6 = self.output.weight
primals_3 = self.output.bias
primals_5 = self.layer_norm.weight
primals_7 = self.layer_norm.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
chengjunyan1/Graph-Sparse-Transformer
|
PositionwiseFeedForward
| false
| 6,425
|
[
"Apache-2.0"
] | 1
|
2c3b77f81789ca80e0c30c32f0c702b2d3bac048
|
https://github.com/chengjunyan1/Graph-Sparse-Transformer/tree/2c3b77f81789ca80e0c30c32f0c702b2d3bac048
|
Critic
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Critic(nn.Module):
def __init__(self, state_dim, action_dim):
super(Critic, self).__init__()
self.l1 = nn.Linear(state_dim + action_dim, 7)
self.l2 = nn.Linear(7, 6)
self.l3 = nn.Linear(6, 1)
self.l4 = nn.Linear(state_dim + action_dim, 7)
self.l5 = nn.Linear(7, 6)
self.l6 = nn.Linear(6, 1)
def forward(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
q2 = F.relu(self.l4(sa))
q2 = F.relu(self.l5(q2))
q2 = self.l6(q2)
return q1, q2
def Q1(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
return q1
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4, 'action_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 28
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 7
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 24
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 6
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (7, 8), (8, 1))
assert_size_stride(primals_4, (7,), (1,))
assert_size_stride(primals_5, (6, 7), (7, 1))
assert_size_stride(primals_6, (6,), (1,))
assert_size_stride(primals_7, (1, 6), (6, 1))
assert_size_stride(primals_8, (1,), (1,))
assert_size_stride(primals_9, (7, 8), (8, 1))
assert_size_stride(primals_10, (7,), (1,))
assert_size_stride(primals_11, (6, 7), (7, 1))
assert_size_stride(primals_12, (6,), (1,))
assert_size_stride(primals_13, (1, 6), (6, 1))
assert_size_stride(primals_14, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 7), (7, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 7), (1, 8
), 0), out=buf1)
del primals_3
buf2 = buf1
del buf1
triton_poi_fused_relu_1[grid(28)](buf2, primals_4, 28, XBLOCK=32,
num_warps=1, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 6), (6, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (7, 6), (1, 7
), 0), out=buf3)
buf4 = buf3
del buf3
triton_poi_fused_relu_2[grid(24)](buf4, primals_6, 24, XBLOCK=32,
num_warps=1, num_stages=1)
del primals_6
buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7,
(6, 1), (1, 6), 0), alpha=1, beta=1, out=buf6)
del primals_8
buf7 = empty_strided_cuda((4, 7), (7, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_9, (8, 7), (1, 8
), 0), out=buf7)
del primals_9
buf8 = buf7
del buf7
triton_poi_fused_relu_1[grid(28)](buf8, primals_10, 28, XBLOCK=32,
num_warps=1, num_stages=1)
del primals_10
buf9 = empty_strided_cuda((4, 6), (6, 1), torch.float32)
extern_kernels.mm(buf8, reinterpret_tensor(primals_11, (7, 6), (1,
7), 0), out=buf9)
buf10 = buf9
del buf9
triton_poi_fused_relu_2[grid(24)](buf10, primals_12, 24, XBLOCK=32,
num_warps=1, num_stages=1)
del primals_12
buf12 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_14, buf10, reinterpret_tensor(
primals_13, (6, 1), (1, 6), 0), alpha=1, beta=1, out=buf12)
del primals_14
return (buf6, buf12, buf0, buf2, buf4, buf8, buf10, primals_13,
primals_11, primals_7, primals_5)
class CriticNew(nn.Module):
def __init__(self, state_dim, action_dim):
super(CriticNew, self).__init__()
self.l1 = nn.Linear(state_dim + action_dim, 7)
self.l2 = nn.Linear(7, 6)
self.l3 = nn.Linear(6, 1)
self.l4 = nn.Linear(state_dim + action_dim, 7)
self.l5 = nn.Linear(7, 6)
self.l6 = nn.Linear(6, 1)
def Q1(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
return q1
def forward(self, input_0, input_1):
primals_3 = self.l1.weight
primals_4 = self.l1.bias
primals_5 = self.l2.weight
primals_6 = self.l2.bias
primals_7 = self.l3.weight
primals_8 = self.l3.bias
primals_9 = self.l4.weight
primals_10 = self.l4.bias
primals_11 = self.l5.weight
primals_12 = self.l5.bias
primals_13 = self.l6.weight
primals_14 = self.l6.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14])
return output[0], output[1]
|
chenbq1234/CityLearn
|
Critic
| false
| 6,426
|
[
"MIT"
] | 1
|
baa162435954ecd58e7f4769a46fa9046f4d2cf6
|
https://github.com/chenbq1234/CityLearn/tree/baa162435954ecd58e7f4769a46fa9046f4d2cf6
|
FM
|
import torch
import torch.nn as nn
from sklearn.metrics import *
class FM(nn.Module):
"""Factorization Machine models pairwise (order-2) feature interactions
without linear term and bias.
Input shape
- 3D tensor with shape: ``(batch_size,field_size,embedding_size)``.
Output shape
- 2D tensor with shape: ``(batch_size, 1)``.
References
- [Factorization Machines](https://www.csie.ntu.edu.tw/~b97053/paper/Rendle2010FM.pdf)
"""
def __init__(self):
super(FM, self).__init__()
def forward(self, inputs):
fm_input = inputs
square_of_sum = torch.pow(torch.sum(fm_input, dim=1, keepdim=True), 2)
sum_of_square = torch.sum(fm_input * fm_input, dim=1, keepdim=True)
cross_term = square_of_sum - sum_of_square
cross_term = 0.5 * torch.sum(cross_term, dim=2, keepdim=False)
return cross_term
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from sklearn.metrics import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp16 = tl.load(in_ptr0 + (4 + x0 + 64 * x1), xmask)
tmp17 = tl.load(in_ptr0 + (20 + x0 + 64 * x1), xmask)
tmp19 = tl.load(in_ptr0 + (36 + x0 + 64 * x1), xmask)
tmp21 = tl.load(in_ptr0 + (52 + x0 + 64 * x1), xmask)
tmp33 = tl.load(in_ptr0 + (8 + x0 + 64 * x1), xmask)
tmp34 = tl.load(in_ptr0 + (24 + x0 + 64 * x1), xmask)
tmp36 = tl.load(in_ptr0 + (40 + x0 + 64 * x1), xmask)
tmp38 = tl.load(in_ptr0 + (56 + x0 + 64 * x1), xmask)
tmp50 = tl.load(in_ptr0 + (12 + x0 + 64 * x1), xmask)
tmp51 = tl.load(in_ptr0 + (28 + x0 + 64 * x1), xmask)
tmp53 = tl.load(in_ptr0 + (44 + x0 + 64 * x1), xmask)
tmp55 = tl.load(in_ptr0 + (60 + x0 + 64 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp0 * tmp0
tmp9 = tmp1 * tmp1
tmp10 = tmp8 + tmp9
tmp11 = tmp3 * tmp3
tmp12 = tmp10 + tmp11
tmp13 = tmp5 * tmp5
tmp14 = tmp12 + tmp13
tmp15 = tmp7 - tmp14
tmp18 = tmp16 + tmp17
tmp20 = tmp18 + tmp19
tmp22 = tmp20 + tmp21
tmp23 = tmp22 * tmp22
tmp24 = tmp16 * tmp16
tmp25 = tmp17 * tmp17
tmp26 = tmp24 + tmp25
tmp27 = tmp19 * tmp19
tmp28 = tmp26 + tmp27
tmp29 = tmp21 * tmp21
tmp30 = tmp28 + tmp29
tmp31 = tmp23 - tmp30
tmp32 = tmp15 + tmp31
tmp35 = tmp33 + tmp34
tmp37 = tmp35 + tmp36
tmp39 = tmp37 + tmp38
tmp40 = tmp39 * tmp39
tmp41 = tmp33 * tmp33
tmp42 = tmp34 * tmp34
tmp43 = tmp41 + tmp42
tmp44 = tmp36 * tmp36
tmp45 = tmp43 + tmp44
tmp46 = tmp38 * tmp38
tmp47 = tmp45 + tmp46
tmp48 = tmp40 - tmp47
tmp49 = tmp32 + tmp48
tmp52 = tmp50 + tmp51
tmp54 = tmp52 + tmp53
tmp56 = tmp54 + tmp55
tmp57 = tmp56 * tmp56
tmp58 = tmp50 * tmp50
tmp59 = tmp51 * tmp51
tmp60 = tmp58 + tmp59
tmp61 = tmp53 * tmp53
tmp62 = tmp60 + tmp61
tmp63 = tmp55 * tmp55
tmp64 = tmp62 + tmp63
tmp65 = tmp57 - tmp64
tmp66 = tmp49 + tmp65
tmp67 = 0.5
tmp68 = tmp66 * tmp67
tl.store(in_out_ptr0 + x2, tmp68, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 1, 4), (4, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_mul_pow_sub_sum_0[grid(16)](buf1, arg0_1, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del arg0_1
return buf1,
class FMNew(nn.Module):
"""Factorization Machine models pairwise (order-2) feature interactions
without linear term and bias.
Input shape
- 3D tensor with shape: ``(batch_size,field_size,embedding_size)``.
Output shape
- 2D tensor with shape: ``(batch_size, 1)``.
References
- [Factorization Machines](https://www.csie.ntu.edu.tw/~b97053/paper/Rendle2010FM.pdf)
"""
def __init__(self):
super(FMNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
chenkkkk/DeepCTR-PyTorch
|
FM
| false
| 6,427
|
[
"Apache-2.0"
] | 1
|
a10a3ace4ad79171e7fb182407b3e4d22bf753e7
|
https://github.com/chenkkkk/DeepCTR-PyTorch/tree/a10a3ace4ad79171e7fb182407b3e4d22bf753e7
|
USConv2d
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class USConv2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True, us=[False, False]):
super(USConv2d, self).__init__(in_channels, out_channels,
kernel_size, stride=stride, padding=padding, dilation=dilation,
groups=groups, bias=bias)
self.width_mult = None
self.us = us
def forward(self, inputs):
in_channels = inputs.shape[1] // self.groups if self.us[0
] else self.in_channels // self.groups
out_channels = int(self.out_channels * self.width_mult) if self.us[1
] else self.out_channels
weight = self.weight[:out_channels, :in_channels, :, :]
bias = self.bias[:out_channels] if self.bias is not None else self.bias
y = F.conv2d(inputs, weight, bias, self.stride, self.padding, self.
dilation, self.groups)
return y
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16)](buf1, primals_2, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
return buf1, primals_1, primals_3
class USConv2dNew(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True, us=[False, False]):
super(USConv2dNew, self).__init__(in_channels, out_channels,
kernel_size, stride=stride, padding=padding, dilation=dilation,
groups=groups, bias=bias)
self.width_mult = None
self.us = us
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
chenbong/torchsummaryDynamic
|
USConv2d
| false
| 6,428
|
[
"MIT"
] | 1
|
48ad7e46c4c762dda335b496313ed63b76507b59
|
https://github.com/chenbong/torchsummaryDynamic/tree/48ad7e46c4c762dda335b496313ed63b76507b59
|
DenseModel
|
import torch
import torch.nn as nn
class DenseModel(nn.Module):
def __init__(self, input_dim, num_classes=2):
super(DenseModel, self).__init__()
self.fc1 = nn.Linear(input_dim, 400)
self.relu1 = nn.ReLU(inplace=True)
self.fc2 = nn.Linear(400, 400)
self.relu2 = nn.ReLU(inplace=True)
if num_classes == 2:
self.fc3 = nn.Linear(400, 1)
else:
self.fc3 = nn.Linear(400, num_classes)
def forward(self, x):
x = self.relu1(self.fc1(x))
x = self.relu2(self.fc2(x))
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 400
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 400
x1 = xindex // 400
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 400 * x1 + 1600 * (x1 % 4 // 4) + 6400 *
((4 * (x1 // 4 % 4) + x1 % 4) // 16)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_threshold_backward_2(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x5 = xindex
x6 = xindex % 1600
x7 = xindex // 1600
tmp0 = tl.load(in_ptr0 + x5, xmask)
tmp1 = 0.0
tmp2 = tmp0 <= tmp1
tl.store(out_ptr0 + (x6 + 1664 * x7), tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (400, 4), (4, 1))
assert_size_stride(primals_2, (400,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (400, 400), (400, 1))
assert_size_stride(primals_5, (400,), (1,))
assert_size_stride(primals_6, (1, 400), (400, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 400), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 400), (6400, 1600, 400, 1), 0
)
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(25600)](buf1, primals_2, 25600, XBLOCK
=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
triton_poi_fused_view_1[grid(25600)](buf1, buf2, 25600, XBLOCK=128,
num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (400, 400), (
1, 400), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 400), (6400, 1600, 400, 1), 0
)
del buf3
triton_poi_fused_relu_0[grid(25600)](buf4, primals_5, 25600, XBLOCK
=128, num_warps=4, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
triton_poi_fused_view_1[grid(25600)](buf4, buf5, 25600, XBLOCK=128,
num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_7, buf5, reinterpret_tensor(primals_6,
(400, 1), (1, 400), 0), alpha=1, beta=1, out=buf7)
del primals_7
buf8 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1),
torch.bool)
triton_poi_fused_threshold_backward_2[grid(25600)](buf4, buf8,
25600, XBLOCK=128, num_warps=4, num_stages=1)
del buf4
buf9 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1),
torch.bool)
triton_poi_fused_threshold_backward_2[grid(25600)](buf1, buf9,
25600, XBLOCK=128, num_warps=4, num_stages=1)
del buf1
return reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf2, buf5, primals_6, buf8, primals_4, buf9
class DenseModelNew(nn.Module):
def __init__(self, input_dim, num_classes=2):
super(DenseModelNew, self).__init__()
self.fc1 = nn.Linear(input_dim, 400)
self.relu1 = nn.ReLU(inplace=True)
self.fc2 = nn.Linear(400, 400)
self.relu2 = nn.ReLU(inplace=True)
if num_classes == 2:
self.fc3 = nn.Linear(400, 1)
else:
self.fc3 = nn.Linear(400, num_classes)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
chawins/adv-exp
|
DenseModel
| false
| 6,429
|
[
"MIT"
] | 1
|
5423e135c5599e4ec2bf90372916d8d05c89f285
|
https://github.com/chawins/adv-exp/tree/5423e135c5599e4ec2bf90372916d8d05c89f285
|
PredictionLayer
|
import torch
import torch.nn as nn
from sklearn.metrics import *
class PredictionLayer(nn.Module):
"""
Arguments
- **task**: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss
- **use_bias**: bool.Whether add bias term or not.
"""
def __init__(self, task='binary', use_bias=True, **kwargs):
if task not in ['binary', 'multiclass', 'regression']:
raise ValueError('task must be binary,multiclass or regression')
super(PredictionLayer, self).__init__()
self.use_bias = use_bias
self.task = task
if self.use_bias:
self.bias = nn.Parameter(torch.zeros((1,)))
def forward(self, X):
output = X
if self.use_bias:
output += self.bias
if self.task == 'binary':
output = torch.sigmoid(output)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from sklearn.metrics import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(out_ptr0 + x0, tmp3, xmask)
tl.store(out_ptr1 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_sigmoid_0[grid(256)](primals_1, primals_2,
buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
return buf0, buf1, buf1
class PredictionLayerNew(nn.Module):
"""
Arguments
- **task**: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss
- **use_bias**: bool.Whether add bias term or not.
"""
def __init__(self, task='binary', use_bias=True, **kwargs):
if task not in ['binary', 'multiclass', 'regression']:
raise ValueError('task must be binary,multiclass or regression')
super(PredictionLayerNew, self).__init__()
self.use_bias = use_bias
self.task = task
if self.use_bias:
self.bias = nn.Parameter(torch.zeros((1,)))
def forward(self, input_0):
primals_2 = self.bias
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
chenkkkk/DeepCTR-PyTorch
|
PredictionLayer
| false
| 6,430
|
[
"Apache-2.0"
] | 1
|
a10a3ace4ad79171e7fb182407b3e4d22bf753e7
|
https://github.com/chenkkkk/DeepCTR-PyTorch/tree/a10a3ace4ad79171e7fb182407b3e4d22bf753e7
|
NPairLoss
|
import torch
class NPairLoss(torch.nn.Module):
def __init__(self, l2=0.05):
"""
Basic N-Pair Loss as proposed in 'Improved Deep Metric Learning with Multi-class N-pair Loss Objective'
Args:
l2: float, weighting parameter for weight penality due to embeddings not being normalized.
Returns:
Nothing!
"""
super(NPairLoss, self).__init__()
self.l2 = l2
def npair_distance(self, anchor, positive, negatives):
"""
Compute basic N-Pair loss.
Args:
anchor, positive, negative: torch.Tensor(), resp. embeddings for anchor, positive and negative samples.
Returns:
n-pair loss (torch.Tensor())
"""
return torch.log(1 + torch.sum(torch.exp(anchor.reshape(1, -1).mm((
negatives - positive).transpose(0, 1)))))
def weightsum(self, anchor, positive):
"""
Compute weight penalty.
NOTE: Only need to penalize anchor and positive since the negatives are created based on these.
Args:
anchor, positive: torch.Tensor(), resp. embeddings for anchor and positive samples.
Returns:
torch.Tensor(), Weight penalty
"""
return torch.sum(anchor ** 2 + positive ** 2)
def forward(self, batch):
"""
Args:
batch: torch.Tensor() [(BS x embed_dim)], batch of embeddings
Returns:
n-pair loss (torch.Tensor(), batch-averaged)
"""
loss = torch.stack([self.npair_distance(npair[0], npair[1], npair[2
:]) for npair in batch])
loss = loss + self.l2 * torch.mean(torch.stack([self.weightsum(
npair[0], npair[1]) for npair in batch]))
return torch.mean(loss)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (8 + x2), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_per_fused_exp_sum_1(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
RBLOCK: tl.constexpr = 2
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl_math.exp(tmp0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.sum(tmp2, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp4, None)
@triton.jit
def triton_poi_fused_sub_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (24 + x2), xmask)
tmp1 = tl.load(in_ptr0 + (20 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_sub_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (40 + x2), xmask)
tmp1 = tl.load(in_ptr0 + (36 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_sub_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (56 + x2), xmask)
tmp1 = tl.load(in_ptr0 + (52 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_per_fused_add_pow_stack_sum_5(in_ptr0, out_ptr1, xnumel, rnumel,
XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp2 = tl.load(in_ptr0 + (4 + r0), None)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp7 = tl.sum(tmp5, 1)[:, None]
tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp7, None)
@triton.jit
def triton_per_fused_add_pow_stack_sum_6(in_ptr0, out_ptr1, xnumel, rnumel,
XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (16 + r0), None)
tmp2 = tl.load(in_ptr0 + (20 + r0), None)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp7 = tl.sum(tmp5, 1)[:, None]
tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp7, None)
@triton.jit
def triton_per_fused_add_pow_stack_sum_7(in_ptr0, out_ptr1, xnumel, rnumel,
XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (32 + r0), None)
tmp2 = tl.load(in_ptr0 + (36 + r0), None)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp7 = tl.sum(tmp5, 1)[:, None]
tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp7, None)
@triton.jit
def triton_per_fused_add_pow_stack_sum_8(in_ptr0, out_ptr1, xnumel, rnumel,
XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (48 + r0), None)
tmp2 = tl.load(in_ptr0 + (52 + r0), None)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp7 = tl.sum(tmp5, 1)[:, None]
tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp7, None)
@triton.jit
def triton_per_fused_add_mean_mul_stack_9(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp5 = tl.load(in_ptr0 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp16 = tl.load(in_ptr1 + 0)
tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK])
tmp26 = tl.load(in_ptr2 + 0)
tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK])
tmp35 = tl.load(in_ptr3 + 0)
tmp36 = tl.broadcast_to(tmp35, [XBLOCK, RBLOCK])
tmp44 = tl.load(in_ptr4 + r0, None)
tmp0 = r0
tl.full([1, 1], 0, tl.int64)
tmp3 = tl.full([1, 1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp7 = 1.0
tmp8 = tmp6 + tmp7
tmp9 = tl_math.log(tmp8)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tmp13 = tl.full([1, 1], 2, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = tmp12 & tmp14
tmp18 = tmp17 + tmp7
tmp19 = tl_math.log(tmp18)
tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype)
tmp21 = tl.where(tmp15, tmp19, tmp20)
tmp22 = tmp0 >= tmp13
tmp23 = tl.full([1, 1], 3, tl.int64)
tmp24 = tmp0 < tmp23
tmp25 = tmp22 & tmp24
tmp28 = tmp27 + tmp7
tmp29 = tl_math.log(tmp28)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp25, tmp29, tmp30)
tmp32 = tmp0 >= tmp23
tl.full([1, 1], 4, tl.int64)
tmp37 = tmp36 + tmp7
tmp38 = tl_math.log(tmp37)
tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype)
tmp40 = tl.where(tmp32, tmp38, tmp39)
tmp41 = tl.where(tmp25, tmp31, tmp40)
tmp42 = tl.where(tmp15, tmp21, tmp41)
tmp43 = tl.where(tmp4, tmp11, tmp42)
tmp45 = tl.broadcast_to(tmp44, [XBLOCK, RBLOCK])
tmp47 = tl.sum(tmp45, 1)[:, None]
tmp48 = 4.0
tmp49 = tmp47 / tmp48
tmp50 = 0.05
tmp51 = tmp49 * tmp50
tmp52 = tmp43 + tmp51
tmp53 = tl.broadcast_to(tmp52, [XBLOCK, RBLOCK])
tmp55 = tl.sum(tmp53, 1)[:, None]
tmp56 = tmp55 / tmp48
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp56, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((2, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_sub_0[grid(8)](arg0_1, buf0, 8, XBLOCK=8,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((1, 2), (2, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(arg0_1, (1, 4), (4, 1), 0),
reinterpret_tensor(buf0, (4, 2), (1, 4), 0), out=buf1)
buf2 = empty_strided_cuda((), (), torch.float32)
triton_per_fused_exp_sum_1[grid(1)](buf1, buf2, 1, 2, XBLOCK=1,
num_warps=2, num_stages=1)
buf3 = buf0
del buf0
triton_poi_fused_sub_2[grid(8)](arg0_1, buf3, 8, XBLOCK=8,
num_warps=1, num_stages=1)
buf4 = buf1
del buf1
extern_kernels.mm(reinterpret_tensor(arg0_1, (1, 4), (4, 1), 16),
reinterpret_tensor(buf3, (4, 2), (1, 4), 0), out=buf4)
buf5 = empty_strided_cuda((), (), torch.float32)
triton_per_fused_exp_sum_1[grid(1)](buf4, buf5, 1, 2, XBLOCK=1,
num_warps=2, num_stages=1)
buf6 = buf3
del buf3
triton_poi_fused_sub_3[grid(8)](arg0_1, buf6, 8, XBLOCK=8,
num_warps=1, num_stages=1)
buf7 = buf4
del buf4
extern_kernels.mm(reinterpret_tensor(arg0_1, (1, 4), (4, 1), 32),
reinterpret_tensor(buf6, (4, 2), (1, 4), 0), out=buf7)
buf8 = empty_strided_cuda((), (), torch.float32)
triton_per_fused_exp_sum_1[grid(1)](buf7, buf8, 1, 2, XBLOCK=1,
num_warps=2, num_stages=1)
buf9 = buf6
del buf6
triton_poi_fused_sub_4[grid(8)](arg0_1, buf9, 8, XBLOCK=8,
num_warps=1, num_stages=1)
buf10 = buf7
del buf7
extern_kernels.mm(reinterpret_tensor(arg0_1, (1, 4), (4, 1), 48),
reinterpret_tensor(buf9, (4, 2), (1, 4), 0), out=buf10)
del buf9
buf11 = empty_strided_cuda((), (), torch.float32)
triton_per_fused_exp_sum_1[grid(1)](buf10, buf11, 1, 2, XBLOCK=1,
num_warps=2, num_stages=1)
del buf10
buf21 = empty_strided_cuda((4,), (1,), torch.float32)
buf17 = reinterpret_tensor(buf21, (1,), (1,), 0)
triton_per_fused_add_pow_stack_sum_5[grid(1)](arg0_1, buf17, 1, 4,
XBLOCK=1, num_warps=2, num_stages=1)
buf18 = reinterpret_tensor(buf21, (1,), (1,), 1)
triton_per_fused_add_pow_stack_sum_6[grid(1)](arg0_1, buf18, 1, 4,
XBLOCK=1, num_warps=2, num_stages=1)
buf19 = reinterpret_tensor(buf21, (1,), (1,), 2)
triton_per_fused_add_pow_stack_sum_7[grid(1)](arg0_1, buf19, 1, 4,
XBLOCK=1, num_warps=2, num_stages=1)
buf20 = reinterpret_tensor(buf21, (1,), (1,), 3)
triton_per_fused_add_pow_stack_sum_8[grid(1)](arg0_1, buf20, 1, 4,
XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
buf22 = empty_strided_cuda((), (), torch.float32)
buf23 = buf22
del buf22
buf24 = buf23
del buf23
triton_per_fused_add_mean_mul_stack_9[grid(1)](buf24, buf2, buf5,
buf8, buf11, buf21, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del buf11
del buf17
del buf18
del buf19
del buf2
del buf20
del buf21
del buf5
del buf8
return buf24,
class NPairLossNew(torch.nn.Module):
def __init__(self, l2=0.05):
"""
Basic N-Pair Loss as proposed in 'Improved Deep Metric Learning with Multi-class N-pair Loss Objective'
Args:
l2: float, weighting parameter for weight penality due to embeddings not being normalized.
Returns:
Nothing!
"""
super(NPairLossNew, self).__init__()
self.l2 = l2
def npair_distance(self, anchor, positive, negatives):
"""
Compute basic N-Pair loss.
Args:
anchor, positive, negative: torch.Tensor(), resp. embeddings for anchor, positive and negative samples.
Returns:
n-pair loss (torch.Tensor())
"""
return torch.log(1 + torch.sum(torch.exp(anchor.reshape(1, -1).mm((
negatives - positive).transpose(0, 1)))))
def weightsum(self, anchor, positive):
"""
Compute weight penalty.
NOTE: Only need to penalize anchor and positive since the negatives are created based on these.
Args:
anchor, positive: torch.Tensor(), resp. embeddings for anchor and positive samples.
Returns:
torch.Tensor(), Weight penalty
"""
return torch.sum(anchor ** 2 + positive ** 2)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
bm2-lab/scPrivacy
|
NPairLoss
| false
| 6,431
|
[
"MIT"
] | 1
|
444c8f3a5e7b890c299cd823359e5414f73d6205
|
https://github.com/bm2-lab/scPrivacy/tree/444c8f3a5e7b890c299cd823359e5414f73d6205
|
InnerProductLayer
|
import torch
import torch.nn as nn
from sklearn.metrics import *
class InnerProductLayer(nn.Module):
"""InnerProduct Layer used in PNN that compute the element-wise
product or inner product between feature vectors.
Input shape
- a list of 3D tensor with shape: ``(batch_size,1,embedding_size)``.
Output shape
- 3D tensor with shape: ``(batch_size, N*(N-1)/2 ,1)`` if use reduce_sum. or 3D tensor with shape:
``(batch_size, N*(N-1)/2, embedding_size )`` if not use reduce_sum.
Arguments
- **reduce_sum**: bool. Whether return inner product or element-wise product
References
- [Qu Y, Cai H, Ren K, et al. Product-based neural networks for user response prediction[C]//
Data Mining (ICDM), 2016 IEEE 16th International Conference on. IEEE, 2016: 1149-1154.]
(https://arxiv.org/pdf/1611.00144.pdf)"""
def __init__(self, reduce_sum=True, device='cpu'):
super(InnerProductLayer, self).__init__()
self.reduce_sum = reduce_sum
self
def forward(self, inputs):
embed_list = inputs
row = []
col = []
num_inputs = len(embed_list)
for i in range(num_inputs - 1):
for j in range(i + 1, num_inputs):
row.append(i)
col.append(j)
p = torch.cat([embed_list[idx] for idx in row], dim=1)
q = torch.cat([embed_list[idx] for idx in col], dim=1)
inner_product = p * q
if self.reduce_sum:
inner_product = torch.sum(inner_product, dim=2, keepdim=True)
return inner_product
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from sklearn.metrics import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 24
x0 = xindex % 4
x2 = xindex // 96
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + (x0 + 4 * (-4 + x1) + 16 * x2), tmp9 & xmask,
other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + (x0 + 4 * (-8 + x1) + 16 * x2), tmp14 & xmask,
other=0.0)
tmp16 = tmp0 >= tmp12
tmp17 = tl.full([1], 16, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tmp16 & tmp18
tmp20 = tl.load(in_ptr0 + (64 + x0 + 4 * (-12 + x1) + 16 * x2), tmp19 &
xmask, other=0.0)
tmp21 = tmp0 >= tmp17
tmp22 = tl.full([1], 20, tl.int64)
tmp23 = tmp0 < tmp22
tmp24 = tmp21 & tmp23
tmp25 = tl.load(in_ptr0 + (64 + x0 + 4 * (-16 + x1) + 16 * x2), tmp24 &
xmask, other=0.0)
tmp26 = tmp0 >= tmp22
tl.full([1], 24, tl.int64)
tmp29 = tl.load(in_ptr0 + (128 + x0 + 4 * (-20 + x1) + 16 * x2), tmp26 &
xmask, other=0.0)
tmp30 = tl.where(tmp24, tmp25, tmp29)
tmp31 = tl.where(tmp19, tmp20, tmp30)
tmp32 = tl.where(tmp14, tmp15, tmp31)
tmp33 = tl.where(tmp9, tmp10, tmp32)
tmp34 = tl.where(tmp4, tmp5, tmp33)
tl.store(out_ptr0 + x3, tmp34, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 24
x0 = xindex % 4
x2 = xindex // 96
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (64 + x0 + 4 * x1 + 16 * x2), tmp4 & xmask,
other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + (128 + x0 + 4 * (-4 + x1) + 16 * x2), tmp9 &
xmask, other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + (192 + x0 + 4 * (-8 + x1) + 16 * x2), tmp14 &
xmask, other=0.0)
tmp16 = tmp0 >= tmp12
tmp17 = tl.full([1], 16, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tmp16 & tmp18
tmp20 = tl.load(in_ptr0 + (128 + x0 + 4 * (-12 + x1) + 16 * x2), tmp19 &
xmask, other=0.0)
tmp21 = tmp0 >= tmp17
tmp22 = tl.full([1], 20, tl.int64)
tmp23 = tmp0 < tmp22
tmp24 = tmp21 & tmp23
tmp25 = tl.load(in_ptr0 + (192 + x0 + 4 * (-16 + x1) + 16 * x2), tmp24 &
xmask, other=0.0)
tmp26 = tmp0 >= tmp22
tl.full([1], 24, tl.int64)
tmp29 = tl.load(in_ptr0 + (192 + x0 + 4 * (-20 + x1) + 16 * x2), tmp26 &
xmask, other=0.0)
tmp30 = tl.where(tmp24, tmp25, tmp29)
tmp31 = tl.where(tmp19, tmp20, tmp30)
tmp32 = tl.where(tmp14, tmp15, tmp31)
tmp33 = tl.where(tmp9, tmp10, tmp32)
tmp34 = tl.where(tmp4, tmp5, tmp33)
tl.store(out_ptr0 + x3, tmp34, xmask)
@triton.jit
def triton_poi_fused_mul_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 96
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x0, tmp14, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 24, 4), (96, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(384)](arg0_1, buf0, 384, XBLOCK=128,
num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((4, 24, 4), (96, 4, 1), torch.float32)
triton_poi_fused_cat_1[grid(384)](arg0_1, buf1, 384, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
buf2 = empty_strided_cuda((4, 24, 1), (24, 1, 1), torch.float32)
triton_poi_fused_mul_sum_2[grid(96)](buf0, buf1, buf2, 96, XBLOCK=
128, num_warps=4, num_stages=1)
del buf0
del buf1
return buf2,
class InnerProductLayerNew(nn.Module):
"""InnerProduct Layer used in PNN that compute the element-wise
product or inner product between feature vectors.
Input shape
- a list of 3D tensor with shape: ``(batch_size,1,embedding_size)``.
Output shape
- 3D tensor with shape: ``(batch_size, N*(N-1)/2 ,1)`` if use reduce_sum. or 3D tensor with shape:
``(batch_size, N*(N-1)/2, embedding_size )`` if not use reduce_sum.
Arguments
- **reduce_sum**: bool. Whether return inner product or element-wise product
References
- [Qu Y, Cai H, Ren K, et al. Product-based neural networks for user response prediction[C]//
Data Mining (ICDM), 2016 IEEE 16th International Conference on. IEEE, 2016: 1149-1154.]
(https://arxiv.org/pdf/1611.00144.pdf)"""
def __init__(self, reduce_sum=True, device='cpu'):
super(InnerProductLayerNew, self).__init__()
self.reduce_sum = reduce_sum
self
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
chenkkkk/DeepCTR-PyTorch
|
InnerProductLayer
| false
| 6,432
|
[
"Apache-2.0"
] | 1
|
a10a3ace4ad79171e7fb182407b3e4d22bf753e7
|
https://github.com/chenkkkk/DeepCTR-PyTorch/tree/a10a3ace4ad79171e7fb182407b3e4d22bf753e7
|
DilateContourLoss
|
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
class DilateContourLoss(nn.Module):
def __init__(self):
super(DilateContourLoss, self).__init__()
self.kernel = np.ones((3, 3), np.uint8)
def forward(self, y_pred, y_true):
assert y_pred.size() == y_true.size()
Dilate_y_pred = F.max_pool2d(y_pred, kernel_size=3, stride=1, padding=1
)
MissImg = torch.clamp(y_true - Dilate_y_pred, 0, 1)
Dilate_y_true = F.max_pool2d(y_true, kernel_size=3, stride=1, padding=1
)
RedunImg = torch.clamp(y_pred - Dilate_y_true, 0, 1)
Loss = (MissImg.sum() + RedunImg.sum()) / y_true.sum()
return Loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_clamp_div_max_pool2d_with_indices_sub_sum_0(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r1 = rindex // 4 % 4
r0 = rindex % 4
r3 = rindex
tmp69 = tl.load(in_ptr1 + r3, None)
tmp81 = tl.load(in_ptr0 + r3, None)
tmp0 = -1 + r1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + r0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + tl.broadcast_to(-5 + r3, [RBLOCK]), tmp10,
other=float('-inf'))
tmp12 = r0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + tl.broadcast_to(-4 + r3, [RBLOCK]), tmp16,
other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + r0
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + tl.broadcast_to(-3 + r3, [RBLOCK]), tmp23,
other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = r1
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + tl.broadcast_to(-1 + r3, [RBLOCK]), tmp30,
other=float('-inf'))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + tl.broadcast_to(r3, [RBLOCK]), tmp33, other=
float('-inf'))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + tl.broadcast_to(1 + r3, [RBLOCK]), tmp36,
other=float('-inf'))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 1 + r1
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + tl.broadcast_to(3 + r3, [RBLOCK]), tmp43,
other=float('-inf'))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + tl.broadcast_to(4 + r3, [RBLOCK]), tmp46,
other=float('-inf'))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + tl.broadcast_to(5 + r3, [RBLOCK]), tmp49,
other=float('-inf'))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tmp52 = tl.load(in_ptr1 + tl.broadcast_to(-5 + r3, [RBLOCK]), tmp10,
other=float('-inf'))
tmp53 = tl.load(in_ptr1 + tl.broadcast_to(-4 + r3, [RBLOCK]), tmp16,
other=float('-inf'))
tmp54 = triton_helpers.maximum(tmp53, tmp52)
tmp55 = tl.load(in_ptr1 + tl.broadcast_to(-3 + r3, [RBLOCK]), tmp23,
other=float('-inf'))
tmp56 = triton_helpers.maximum(tmp55, tmp54)
tmp57 = tl.load(in_ptr1 + tl.broadcast_to(-1 + r3, [RBLOCK]), tmp30,
other=float('-inf'))
tmp58 = triton_helpers.maximum(tmp57, tmp56)
tmp59 = tl.load(in_ptr1 + tl.broadcast_to(r3, [RBLOCK]), tmp33, other=
float('-inf'))
tmp60 = triton_helpers.maximum(tmp59, tmp58)
tmp61 = tl.load(in_ptr1 + tl.broadcast_to(1 + r3, [RBLOCK]), tmp36,
other=float('-inf'))
tmp62 = triton_helpers.maximum(tmp61, tmp60)
tmp63 = tl.load(in_ptr1 + tl.broadcast_to(3 + r3, [RBLOCK]), tmp43,
other=float('-inf'))
tmp64 = triton_helpers.maximum(tmp63, tmp62)
tmp65 = tl.load(in_ptr1 + tl.broadcast_to(4 + r3, [RBLOCK]), tmp46,
other=float('-inf'))
tmp66 = triton_helpers.maximum(tmp65, tmp64)
tmp67 = tl.load(in_ptr1 + tl.broadcast_to(5 + r3, [RBLOCK]), tmp49,
other=float('-inf'))
tmp68 = triton_helpers.maximum(tmp67, tmp66)
tmp70 = tmp69 - tmp51
tmp71 = 0.0
tmp72 = triton_helpers.maximum(tmp70, tmp71)
tmp73 = 1.0
tmp74 = triton_helpers.minimum(tmp72, tmp73)
tmp75 = tl.broadcast_to(tmp74, [RBLOCK])
tmp77 = triton_helpers.promote_to_tensor(tl.sum(tmp75, 0))
tmp78 = tl.broadcast_to(tmp69, [RBLOCK])
tmp80 = triton_helpers.promote_to_tensor(tl.sum(tmp78, 0))
tmp82 = tmp81 - tmp68
tmp83 = triton_helpers.maximum(tmp82, tmp71)
tmp84 = triton_helpers.minimum(tmp83, tmp73)
tmp85 = tl.broadcast_to(tmp84, [RBLOCK])
tmp87 = triton_helpers.promote_to_tensor(tl.sum(tmp85, 0))
tmp88 = tmp77 + tmp87
tmp89 = tmp88 / tmp80
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp89, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((), (), torch.float32)
buf5 = buf2
del buf2
get_raw_stream(0)
triton_per_fused_add_clamp_div_max_pool2d_with_indices_sub_sum_0[grid
(1)](buf5, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf5,
class DilateContourLossNew(nn.Module):
def __init__(self):
super(DilateContourLossNew, self).__init__()
self.kernel = np.ones((3, 3), np.uint8)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
chexqi/Tube_Contour_Detection
|
DilateContourLoss
| false
| 6,433
|
[
"MIT"
] | 1
|
d629c992022f22fb3338b6436fcaadab438f8bfb
|
https://github.com/chexqi/Tube_Contour_Detection/tree/d629c992022f22fb3338b6436fcaadab438f8bfb
|
DenseModelV2
|
import torch
import torch.nn as nn
class DenseModelV2(nn.Module):
def __init__(self, input_dim, num_classes=2):
super(DenseModelV2, self).__init__()
self.fc1 = nn.Linear(input_dim, 2000)
self.relu1 = nn.ReLU(inplace=True)
self.fc2 = nn.Linear(2000, 2000)
self.relu2 = nn.ReLU(inplace=True)
self.fc3 = nn.Linear(2000, 2000)
self.relu3 = nn.ReLU(inplace=True)
self.fc4 = nn.Linear(2000, 400)
self.relu4 = nn.ReLU(inplace=True)
if num_classes == 2:
self.fc5 = nn.Linear(400, 1)
else:
self.fc5 = nn.Linear(400, num_classes)
def forward(self, x):
x = self.relu1(self.fc1(x))
x = self.relu2(self.fc2(x))
x = self.relu3(self.fc3(x))
x = self.relu4(self.fc4(x))
x = self.fc5(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2000
x1 = xindex // 2000
tmp0 = tl.load(in_out_ptr0 + (x0 + 2016 * x1), xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x0 + 2016 * x1), tmp4, xmask)
tl.store(out_ptr0 + (x0 + 2048 * x1), tmp6, xmask)
@triton.jit
def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2000
x1 = xindex // 2000
tmp0 = tl.load(in_ptr0 + (x0 + 2016 * x1 + 8064 * (x1 % 4 // 4) + 32256 *
((4 * (x1 // 4 % 4) + x1 % 4) // 16)), xmask)
tl.store(out_ptr0 + (x0 + 2016 * x1), tmp0, xmask)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 400
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_view_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 400
x1 = xindex // 400
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 400 * x1 + 1600 * (x1 % 4 // 4) + 6400 *
((4 * (x1 // 4 % 4) + x1 % 4) // 16)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_threshold_backward_4(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x5 = xindex
x6 = xindex % 1600
x7 = xindex // 1600
tmp0 = tl.load(in_ptr0 + x5, xmask)
tmp1 = 0.0
tmp2 = tmp0 <= tmp1
tl.store(out_ptr0 + (x6 + 1664 * x7), tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (2000, 4), (4, 1))
assert_size_stride(primals_2, (2000,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (2000, 2000), (2000, 1))
assert_size_stride(primals_5, (2000,), (1,))
assert_size_stride(primals_6, (2000, 2000), (2000, 1))
assert_size_stride(primals_7, (2000,), (1,))
assert_size_stride(primals_8, (400, 2000), (2000, 1))
assert_size_stride(primals_9, (400,), (1,))
assert_size_stride(primals_10, (1, 400), (400, 1))
assert_size_stride(primals_11, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 2000), (2016, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 2000), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 2000), (32256, 8064, 2016,
1), 0)
del buf0
buf17 = empty_strided_cuda((4, 4, 4, 2000), (32768, 8192, 2048, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(128000)](buf1,
primals_2, buf17, 128000, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 2000), (2016, 1), torch.float32)
triton_poi_fused_view_1[grid(128000)](buf1, buf2, 128000, XBLOCK=
1024, num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (64, 2000), (2016, 1), 0)
del buf1
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (2000, 2000),
(1, 2000), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 2000), (32256, 8064, 2016,
1), 0)
del buf3
buf16 = empty_strided_cuda((4, 4, 4, 2000), (32768, 8192, 2048, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(128000)](buf4,
primals_5, buf16, 128000, XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((64, 2000), (2016, 1), torch.float32)
triton_poi_fused_view_1[grid(128000)](buf4, buf5, 128000, XBLOCK=
1024, num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf4, (64, 2000), (2016, 1), 0)
del buf4
extern_kernels.mm(buf5, reinterpret_tensor(primals_6, (2000, 2000),
(1, 2000), 0), out=buf6)
buf7 = reinterpret_tensor(buf6, (4, 4, 4, 2000), (32256, 8064, 2016,
1), 0)
del buf6
buf15 = empty_strided_cuda((4, 4, 4, 2000), (32768, 8192, 2048, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(128000)](buf7,
primals_7, buf15, 128000, XBLOCK=512, num_warps=8, num_stages=1)
del primals_7
buf8 = empty_strided_cuda((64, 2000), (2016, 1), torch.float32)
triton_poi_fused_view_1[grid(128000)](buf7, buf8, 128000, XBLOCK=
1024, num_warps=4, num_stages=1)
del buf7
buf9 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
extern_kernels.mm(buf8, reinterpret_tensor(primals_8, (2000, 400),
(1, 2000), 0), out=buf9)
buf10 = reinterpret_tensor(buf9, (4, 4, 4, 400), (6400, 1600, 400,
1), 0)
del buf9
triton_poi_fused_relu_2[grid(25600)](buf10, primals_9, 25600,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf11 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
triton_poi_fused_view_3[grid(25600)](buf10, buf11, 25600, XBLOCK=
256, num_warps=4, num_stages=1)
buf13 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_11, buf11, reinterpret_tensor(
primals_10, (400, 1), (1, 400), 0), alpha=1, beta=1, out=buf13)
del primals_11
buf14 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1),
torch.bool)
triton_poi_fused_threshold_backward_4[grid(25600)](buf10, buf14,
25600, XBLOCK=128, num_warps=4, num_stages=1)
del buf10
return (reinterpret_tensor(buf13, (4, 4, 4, 1), (16, 4, 1, 1), 0),
reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, buf5, buf8,
buf11, primals_10, buf14, primals_8, buf15, primals_6, buf16,
primals_4, buf17)
class DenseModelV2New(nn.Module):
def __init__(self, input_dim, num_classes=2):
super(DenseModelV2New, self).__init__()
self.fc1 = nn.Linear(input_dim, 2000)
self.relu1 = nn.ReLU(inplace=True)
self.fc2 = nn.Linear(2000, 2000)
self.relu2 = nn.ReLU(inplace=True)
self.fc3 = nn.Linear(2000, 2000)
self.relu3 = nn.ReLU(inplace=True)
self.fc4 = nn.Linear(2000, 400)
self.relu4 = nn.ReLU(inplace=True)
if num_classes == 2:
self.fc5 = nn.Linear(400, 1)
else:
self.fc5 = nn.Linear(400, num_classes)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_8 = self.fc4.weight
primals_9 = self.fc4.bias
primals_10 = self.fc5.weight
primals_11 = self.fc5.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
chawins/adv-exp
|
DenseModelV2
| false
| 6,434
|
[
"MIT"
] | 1
|
5423e135c5599e4ec2bf90372916d8d05c89f285
|
https://github.com/chawins/adv-exp/tree/5423e135c5599e4ec2bf90372916d8d05c89f285
|
FC
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class FC(nn.Module):
"""FC baseline implementation"""
def __init__(self):
super(FC, self).__init__()
self.fc1 = nn.Linear(45 * 45, 1024)
self.fc2 = nn.Linear(1024, 256)
self.fc3 = nn.Linear(256, 64)
self.fc4 = nn.Linear(64, 10)
def forward(self, x):
x = x.view(-1, 45 * 45)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = F.relu(self.fc2(x))
x = F.dropout(x, training=self.training)
x = self.fc3(x)
x = F.dropout(x, training=self.training)
x = self.fc4(x)
return F.log_softmax(x, dim=1)
def get_inputs():
return [torch.rand([4, 2025])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1024
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_per_fused__log_softmax_2(in_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 2025), (2025, 1))
assert_size_stride(primals_2, (1024, 2025), (2025, 1))
assert_size_stride(primals_3, (1024,), (1,))
assert_size_stride(primals_4, (256, 1024), (1024, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (64, 256), (256, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (10, 64), (64, 1))
assert_size_stride(primals_9, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (2025,
1024), (1, 2025), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(4096)](buf1, primals_3, 4096, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (1024, 256),
(1, 1024), 0), out=buf2)
buf3 = buf2
del buf2
triton_poi_fused_relu_1[grid(1024)](buf3, primals_5, 1024, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6,
(256, 64), (1, 256), 0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_9, buf4, reinterpret_tensor(primals_8,
(64, 10), (1, 64), 0), alpha=1, beta=1, out=buf5)
del primals_9
buf8 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
triton_per_fused__log_softmax_2[grid(4)](buf5, buf8, 4, 10, XBLOCK=
1, num_warps=2, num_stages=1)
del buf5
return (buf8, primals_1, buf1, buf3, buf4, buf8, primals_8, primals_6,
primals_4)
class FCNew(nn.Module):
"""FC baseline implementation"""
def __init__(self):
super(FCNew, self).__init__()
self.fc1 = nn.Linear(45 * 45, 1024)
self.fc2 = nn.Linear(1024, 256)
self.fc3 = nn.Linear(256, 64)
self.fc4 = nn.Linear(64, 10)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_8 = self.fc4.weight
primals_9 = self.fc4.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
chenxi-wang/cs420-codes
|
FC
| false
| 6,435
|
[
"MIT"
] | 1
|
756b71ea4f4d8c4694c8c3f32ed9d1c6e89fad15
|
https://github.com/chenxi-wang/cs420-codes/tree/756b71ea4f4d8c4694c8c3f32ed9d1c6e89fad15
|
FocalLossV2
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class FocalSigmoidLossFunc(torch.autograd.Function):
"""
compute backward directly for better numeric stability
"""
@staticmethod
def forward(ctx, logits, label, alpha, gamma, reduction):
logits = logits.float()
coeff = torch.empty_like(logits).fill_(1 - alpha)
coeff[label == 1] = alpha
probs = torch.sigmoid(logits)
log_probs = torch.where(logits >= 0, F.softplus(logits, -1, 50),
logits - F.softplus(logits, 1, 50))
log_1_probs = torch.where(logits >= 0, -logits + F.softplus(logits,
-1, 50), -F.softplus(logits, 1, 50))
probs_gamma = probs ** gamma
probs_1_gamma = (1.0 - probs) ** gamma
ctx.coeff = coeff
ctx.probs = probs
ctx.log_probs = log_probs
ctx.log_1_probs = log_1_probs
ctx.probs_gamma = probs_gamma
ctx.probs_1_gamma = probs_1_gamma
ctx.label = label
ctx.gamma = gamma
ctx.reduction = reduction
term1 = probs_1_gamma * log_probs
term2 = probs_gamma * log_1_probs
loss = torch.where(label == 1, term1, term2).mul_(coeff).neg_()
if reduction == 'mean':
loss = loss.mean()
if reduction == 'sum':
loss = loss.sum()
return loss
@staticmethod
def backward(ctx, grad_output):
"""
compute gradient of focal loss
"""
coeff = ctx.coeff
probs = ctx.probs
log_probs = ctx.log_probs
log_1_probs = ctx.log_1_probs
probs_gamma = ctx.probs_gamma
probs_1_gamma = ctx.probs_1_gamma
label = ctx.label
gamma = ctx.gamma
reduction = ctx.reduction
term1 = (1.0 - probs - gamma * probs * log_probs).mul_(probs_1_gamma
).neg_()
term2 = (probs - gamma * (1.0 - probs) * log_1_probs).mul_(probs_gamma)
grads = torch.where(label == 1, term1, term2).mul_(coeff).mul_(
grad_output)
if reduction == 'mean':
grads = grads.div_(label.numel())
if reduction == 'sum':
grads = grads
return grads, None, None, None, None
class FocalLossV2(nn.Module):
"""
This use better formula to compute the gradient, which has better numeric stability
"""
def __init__(self, alpha=0.25, gamma=2, reduction='mean'):
super(FocalLossV2, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
def forward(self, logits, label):
return FocalSigmoidLossFunc.apply(logits, label, self.alpha, self.
gamma, self.reduction)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_eq_fill_ge_index_put_lift_fresh_mean_mul_neg_pow_rsub_sigmoid_softplus_sub_where_0(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp0 == tmp1
tmp4 = tl.sigmoid(tmp3)
tmp5 = tmp1 - tmp4
tmp6 = tmp5 * tmp5
tmp7 = 0.0
tmp8 = tmp3 >= tmp7
tmp9 = -1.0
tmp10 = tmp3 * tmp9
tmp11 = 50.0
tmp12 = tmp10 > tmp11
tmp13 = tl_math.exp(tmp10)
tmp14 = libdevice.log1p(tmp13)
tmp15 = tmp14 * tmp9
tmp16 = tl.where(tmp12, tmp3, tmp15)
tmp17 = tmp3 * tmp1
tmp18 = tmp17 > tmp11
tmp19 = tl_math.exp(tmp17)
tmp20 = libdevice.log1p(tmp19)
tmp21 = tmp20 * tmp1
tmp22 = tl.where(tmp18, tmp3, tmp21)
tmp23 = tmp3 - tmp22
tmp24 = tl.where(tmp8, tmp16, tmp23)
tmp25 = tmp6 * tmp24
tmp26 = tmp4 * tmp4
tmp27 = -tmp3
tmp28 = tmp27 + tmp16
tmp29 = -tmp22
tmp30 = tl.where(tmp8, tmp28, tmp29)
tmp31 = tmp26 * tmp30
tmp32 = tl.where(tmp2, tmp25, tmp31)
tmp33 = 0.25
tmp34 = 0.75
tmp35 = tl.where(tmp2, tmp33, tmp34)
tmp36 = tmp32 * tmp35
tmp37 = -tmp36
tmp38 = tl.broadcast_to(tmp37, [RBLOCK])
tmp40 = triton_helpers.promote_to_tensor(tl.sum(tmp38, 0))
tmp41 = 256.0
tmp42 = tmp40 / tmp41
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp42, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
get_raw_stream(0)
triton_per_fused_add_eq_fill_ge_index_put_lift_fresh_mean_mul_neg_pow_rsub_sigmoid_softplus_sub_where_0[
grid(1)](buf3, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf3,
class FocalSigmoidLossFunc(torch.autograd.Function):
"""
compute backward directly for better numeric stability
"""
@staticmethod
def forward(ctx, logits, label, alpha, gamma, reduction):
logits = logits.float()
coeff = torch.empty_like(logits).fill_(1 - alpha)
coeff[label == 1] = alpha
probs = torch.sigmoid(logits)
log_probs = torch.where(logits >= 0, F.softplus(logits, -1, 50),
logits - F.softplus(logits, 1, 50))
log_1_probs = torch.where(logits >= 0, -logits + F.softplus(logits,
-1, 50), -F.softplus(logits, 1, 50))
probs_gamma = probs ** gamma
probs_1_gamma = (1.0 - probs) ** gamma
ctx.coeff = coeff
ctx.probs = probs
ctx.log_probs = log_probs
ctx.log_1_probs = log_1_probs
ctx.probs_gamma = probs_gamma
ctx.probs_1_gamma = probs_1_gamma
ctx.label = label
ctx.gamma = gamma
ctx.reduction = reduction
term1 = probs_1_gamma * log_probs
term2 = probs_gamma * log_1_probs
loss = torch.where(label == 1, term1, term2).mul_(coeff).neg_()
if reduction == 'mean':
loss = loss.mean()
if reduction == 'sum':
loss = loss.sum()
return loss
@staticmethod
def backward(ctx, grad_output):
"""
compute gradient of focal loss
"""
coeff = ctx.coeff
probs = ctx.probs
log_probs = ctx.log_probs
log_1_probs = ctx.log_1_probs
probs_gamma = ctx.probs_gamma
probs_1_gamma = ctx.probs_1_gamma
label = ctx.label
gamma = ctx.gamma
reduction = ctx.reduction
term1 = (1.0 - probs - gamma * probs * log_probs).mul_(probs_1_gamma
).neg_()
term2 = (probs - gamma * (1.0 - probs) * log_1_probs).mul_(probs_gamma)
grads = torch.where(label == 1, term1, term2).mul_(coeff).mul_(
grad_output)
if reduction == 'mean':
grads = grads.div_(label.numel())
if reduction == 'sum':
grads = grads
return grads, None, None, None, None
class FocalLossV2New(nn.Module):
"""
This use better formula to compute the gradient, which has better numeric stability
"""
def __init__(self, alpha=0.25, gamma=2, reduction='mean'):
super(FocalLossV2New, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
chizhu/pytorch-loss
|
FocalLossV2
| false
| 6,436
|
[
"MIT"
] | 1
|
c8fbd78771f11a910b0b51ae3697c09761dd9696
|
https://github.com/chizhu/pytorch-loss/tree/c8fbd78771f11a910b0b51ae3697c09761dd9696
|
SwishV2
|
import torch
import torch.nn as nn
class SwishFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, feat):
sig = torch.sigmoid(feat)
out = feat * torch.sigmoid(feat)
grad = sig * (1 + feat * (1 - sig))
ctx.grad = grad
return out
@staticmethod
def backward(ctx, grad_output):
grad = ctx.grad
grad *= grad_output
return grad
class SwishV2(nn.Module):
def __init__(self):
super(SwishV2, self).__init__()
def forward(self, feat):
return SwishFunction.apply(feat)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0[grid(256)](arg0_1, buf0, 256, XBLOCK
=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class SwishFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, feat):
sig = torch.sigmoid(feat)
out = feat * torch.sigmoid(feat)
grad = sig * (1 + feat * (1 - sig))
ctx.grad = grad
return out
@staticmethod
def backward(ctx, grad_output):
grad = ctx.grad
grad *= grad_output
return grad
class SwishV2New(nn.Module):
def __init__(self):
super(SwishV2New, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
chizhu/pytorch-loss
|
SwishV2
| false
| 6,437
|
[
"MIT"
] | 1
|
c8fbd78771f11a910b0b51ae3697c09761dd9696
|
https://github.com/chizhu/pytorch-loss/tree/c8fbd78771f11a910b0b51ae3697c09761dd9696
|
PositionEmbedding
|
from _paritybench_helpers import _mock_config
import torch
from torch import nn
class PositionEmbedding(nn.Module):
"""
adpated from transformers package by huggingface.
"""
def __init__(self, config):
super(PositionEmbedding, self).__init__()
self.config = config
self.pos_embs = nn.Embedding(config['trans_max_pos'], config[
'trans_hidden'])
self.LayerNorm = nn.LayerNorm(config['trans_hidden'])
self.dropout = nn.Dropout(config['trans_drop_prob'])
def forward(self, input_embs):
"""
`input_embs` should be shaped as [`numBatch`, `seqLength`, `hiddenSize`]
"""
seq_length = input_embs.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=
input_embs.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_embs[:, :, 0])
position_embeddings = self.pos_embs(position_ids)
embeddings = input_embs + position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(trans_max_pos=4, trans_hidden=4,
trans_drop_prob=0.5)}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_arange_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_embedding_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.full([XBLOCK], 4, tl.int32)
tmp3 = tmp1 + tmp2
tmp4 = tmp1 < 0
tmp5 = tl.where(tmp4, tmp3, tmp1)
tl.device_assert((0 <= tmp5) & (tmp5 < 4) | ~xmask,
'index out of bounds: 0 <= tmp5 < 4')
tmp7 = tl.load(in_ptr2 + (x0 + 4 * tmp5), xmask)
tmp8 = tmp0 + tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_2(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.int64)
get_raw_stream(0)
triton_poi_fused_arange_0[grid(4)](buf0, 4, XBLOCK=4, num_warps=1,
num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_embedding_1[grid(256)](primals_1, buf0,
primals_2, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused_native_layer_norm_2[grid(64)](buf1, buf2, buf3, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_3[grid(256)](buf1, buf2, buf3,
primals_3, primals_4, buf4, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del buf2
del buf3
del primals_4
return buf4, primals_3, reinterpret_tensor(buf0, (1, 4), (4, 1), 0), buf1
class PositionEmbeddingNew(nn.Module):
"""
adpated from transformers package by huggingface.
"""
def __init__(self, config):
super(PositionEmbeddingNew, self).__init__()
self.config = config
self.pos_embs = nn.Embedding(config['trans_max_pos'], config[
'trans_hidden'])
self.LayerNorm = nn.LayerNorm(config['trans_hidden'])
self.dropout = nn.Dropout(config['trans_drop_prob'])
def forward(self, input_0):
primals_2 = self.pos_embs.weight
primals_3 = self.LayerNorm.weight
primals_4 = self.LayerNorm.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
choumartin1234/Music-Eye
|
PositionEmbedding
| false
| 6,438
|
[
"MIT"
] | 1
|
059b43fd21f7e7bf6c84cb35a03fd936e64b59a5
|
https://github.com/choumartin1234/Music-Eye/tree/059b43fd21f7e7bf6c84cb35a03fd936e64b59a5
|
FocalLossV1
|
import torch
import torch.nn as nn
class FocalLossV1(nn.Module):
def __init__(self, alpha=0.25, gamma=2, reduction='mean'):
super(FocalLossV1, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
self.crit = nn.BCEWithLogitsLoss(reduction='none')
def forward(self, logits, label):
"""
args:
logits: tensor of shape (N, ...)
label: tensor of shape(N, ...)
"""
logits = logits.float()
with torch.no_grad():
alpha = torch.empty_like(logits).fill_(1 - self.alpha)
alpha[label == 1] = self.alpha
probs = torch.sigmoid(logits)
pt = torch.where(label == 1, probs, 1 - probs)
ce_loss = self.crit(logits, label.double())
loss = alpha * torch.pow(1 - pt, self.gamma) * ce_loss
if self.reduction == 'mean':
loss = loss.mean()
if self.reduction == 'sum':
loss = loss.sum()
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused__to_copy_binary_cross_entropy_with_logits_eq_fill_index_put_lift_fresh_mean_mul_pow_rsub_sigmoid_where_0(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp6 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp0 == tmp1
tmp3 = 0.25
tmp4 = 0.75
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp7 = tl.sigmoid(tmp6)
tmp8 = tmp1 - tmp7
tmp9 = tl.where(tmp2, tmp7, tmp8)
tmp10 = tmp1 - tmp9
tmp11 = tmp10 * tmp10
tmp12 = tmp5 * tmp11
tmp13 = tmp12.to(tl.float64)
tmp14 = tmp0.to(tl.float64)
tmp15 = tl.full([1], 1.0, tl.float64)
tmp16 = tmp15 - tmp14
tmp17 = tmp6.to(tl.float64)
tmp18 = tmp16 * tmp17
tmp19 = 0.0
tmp20 = triton_helpers.minimum(tmp19, tmp6)
tmp21 = tl_math.abs(tmp6)
tmp22 = -tmp21
tmp23 = tl_math.exp(tmp22)
tmp24 = libdevice.log1p(tmp23)
tmp25 = tmp20 - tmp24
tmp26 = tmp25.to(tl.float64)
tmp27 = tmp18 - tmp26
tmp28 = tmp13 * tmp27
tmp29 = tl.broadcast_to(tmp28, [RBLOCK])
tmp31 = triton_helpers.promote_to_tensor(tl.sum(tmp29, 0))
tmp32 = tl.full([1], 256.0, tl.float64)
tmp33 = tmp31 / tmp32
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp33, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float64)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_per_fused__to_copy_binary_cross_entropy_with_logits_eq_fill_index_put_lift_fresh_mean_mul_pow_rsub_sigmoid_where_0[
grid(1)](buf2, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf2,
class FocalLossV1New(nn.Module):
def __init__(self, alpha=0.25, gamma=2, reduction='mean'):
super(FocalLossV1New, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
self.crit = nn.BCEWithLogitsLoss(reduction='none')
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
chizhu/pytorch-loss
|
FocalLossV1
| false
| 6,439
|
[
"MIT"
] | 1
|
c8fbd78771f11a910b0b51ae3697c09761dd9696
|
https://github.com/chizhu/pytorch-loss/tree/c8fbd78771f11a910b0b51ae3697c09761dd9696
|
InteractingLayer
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.metrics import *
class InteractingLayer(nn.Module):
"""A Layer used in AutoInt that model the correlations between different feature fields by multi-head self-attention mechanism.
Input shape
- A 3D tensor with shape: ``(batch_size,field_size,embedding_size)``.
Output shape
- 3D tensor with shape:``(batch_size,field_size,att_embedding_size * head_num)``.
Arguments
- **in_features** : Positive integer, dimensionality of input features.
- **att_embedding_size**: int.The embedding size in multi-head self-attention network.
- **head_num**: int.The head number in multi-head self-attention network.
- **use_res**: bool.Whether or not use standard residual connections before output.
- **seed**: A Python integer to use as random seed.
References
- [Song W, Shi C, Xiao Z, et al. AutoInt: Automatic Feature Interaction Learning via Self-Attentive Neural Networks[J]. arXiv preprint arXiv:1810.11921, 2018.](https://arxiv.org/abs/1810.11921)
"""
def __init__(self, in_features, att_embedding_size=8, head_num=2,
use_res=True, seed=1024, device='cpu'):
super(InteractingLayer, self).__init__()
if head_num <= 0:
raise ValueError('head_num must be a int > 0')
self.att_embedding_size = att_embedding_size
self.head_num = head_num
self.use_res = use_res
self.seed = seed
embedding_size = in_features
self.W_Query = nn.Parameter(torch.Tensor(embedding_size, self.
att_embedding_size * self.head_num))
self.W_key = nn.Parameter(torch.Tensor(embedding_size, self.
att_embedding_size * self.head_num))
self.W_Value = nn.Parameter(torch.Tensor(embedding_size, self.
att_embedding_size * self.head_num))
if self.use_res:
self.W_Res = nn.Parameter(torch.Tensor(embedding_size, self.
att_embedding_size * self.head_num))
for tensor in self.parameters():
nn.init.normal_(tensor, mean=0.0, std=0.05)
self
def forward(self, inputs):
if len(inputs.shape) != 3:
raise ValueError(
'Unexpected inputs dimensions %d, expect to be 3 dimensions' %
len(inputs.shape))
querys = torch.tensordot(inputs, self.W_Query, dims=([-1], [0]))
keys = torch.tensordot(inputs, self.W_key, dims=([-1], [0]))
values = torch.tensordot(inputs, self.W_Value, dims=([-1], [0]))
querys = torch.stack(torch.split(querys, self.att_embedding_size,
dim=2))
keys = torch.stack(torch.split(keys, self.att_embedding_size, dim=2))
values = torch.stack(torch.split(values, self.att_embedding_size,
dim=2))
inner_product = torch.einsum('bnik,bnjk->bnij', querys, keys)
self.normalized_att_scores = F.softmax(inner_product, dim=1)
result = torch.matmul(self.normalized_att_scores, values)
result = torch.cat(torch.split(result, 1), dim=-1)
result = torch.squeeze(result, dim=0)
if self.use_res:
result += torch.tensordot(inputs, self.W_Res, dims=([-1], [0]))
result = F.relu(result)
return result
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
from sklearn.metrics import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_stack_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 32
x0 = xindex % 8
x1 = xindex // 8 % 4
x3 = xindex
tmp0 = x2
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr0 + (8 + x0 + 16 * x1 + 64 * (-4 + x2)), tmp6 &
xmask, other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_3(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp11 = tl.load(in_out_ptr0 + x2, xmask)
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 8, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (8 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 16, tl.int64)
tmp9 = tl.load(in_ptr0 + (128 + 8 * x1 + (-8 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tmp12 = tmp10 + tmp11
tmp13 = tl.full([1], 0, tl.int32)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp15 = 0.0
tmp16 = tmp14 <= tmp15
tl.store(in_out_ptr0 + x2, tmp14, xmask)
tl.store(out_ptr0 + x2, tmp16, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 16), (16, 1))
assert_size_stride(primals_3, (4, 16), (16, 1))
assert_size_stride(primals_4, (4, 16), (16, 1))
assert_size_stride(primals_5, (4, 16), (16, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
primals_2, out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
primals_3, out=buf1)
del primals_3
buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
primals_4, out=buf2)
del primals_4
buf3 = empty_strided_cuda((8, 4, 8), (32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_stack_0[grid(256)](buf0, buf3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf0, (8, 4, 8), (32, 8, 1), 0)
del buf0
triton_poi_fused_stack_0[grid(256)](buf1, buf4, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf5 = empty_strided_cuda((8, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf3, reinterpret_tensor(buf4, (8, 8, 4), (32, 1,
8), 0), out=buf5)
buf6 = empty_strided_cuda((2, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(128)](buf5, buf6, 128, XBLOCK=128,
num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf5, (2, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused__softmax_2[grid(128)](buf6, buf7, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del buf6
buf8 = reinterpret_tensor(buf1, (8, 4, 8), (32, 8, 1), 0)
del buf1
triton_poi_fused_stack_0[grid(256)](buf2, buf8, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf9 = reinterpret_tensor(buf2, (8, 4, 8), (32, 8, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf7, (8, 4, 4), (16, 4, 1),
0), buf8, out=buf9)
buf10 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
primals_5, out=buf10)
del primals_5
buf11 = reinterpret_tensor(buf10, (4, 4, 16), (64, 16, 1), 0)
del buf10
buf12 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_3[grid(256)](buf11, buf9,
buf12, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf9
return buf11, buf7, buf7, buf12, reinterpret_tensor(primals_1, (4, 16),
(1, 4), 0), reinterpret_tensor(buf8, (8, 8, 4), (32, 1, 8), 0
), reinterpret_tensor(buf3, (8, 8, 4), (32, 1, 8), 0), buf4
class InteractingLayerNew(nn.Module):
"""A Layer used in AutoInt that model the correlations between different feature fields by multi-head self-attention mechanism.
Input shape
- A 3D tensor with shape: ``(batch_size,field_size,embedding_size)``.
Output shape
- 3D tensor with shape:``(batch_size,field_size,att_embedding_size * head_num)``.
Arguments
- **in_features** : Positive integer, dimensionality of input features.
- **att_embedding_size**: int.The embedding size in multi-head self-attention network.
- **head_num**: int.The head number in multi-head self-attention network.
- **use_res**: bool.Whether or not use standard residual connections before output.
- **seed**: A Python integer to use as random seed.
References
- [Song W, Shi C, Xiao Z, et al. AutoInt: Automatic Feature Interaction Learning via Self-Attentive Neural Networks[J]. arXiv preprint arXiv:1810.11921, 2018.](https://arxiv.org/abs/1810.11921)
"""
def __init__(self, in_features, att_embedding_size=8, head_num=2,
use_res=True, seed=1024, device='cpu'):
super(InteractingLayerNew, self).__init__()
if head_num <= 0:
raise ValueError('head_num must be a int > 0')
self.att_embedding_size = att_embedding_size
self.head_num = head_num
self.use_res = use_res
self.seed = seed
embedding_size = in_features
self.W_Query = nn.Parameter(torch.Tensor(embedding_size, self.
att_embedding_size * self.head_num))
self.W_key = nn.Parameter(torch.Tensor(embedding_size, self.
att_embedding_size * self.head_num))
self.W_Value = nn.Parameter(torch.Tensor(embedding_size, self.
att_embedding_size * self.head_num))
if self.use_res:
self.W_Res = nn.Parameter(torch.Tensor(embedding_size, self.
att_embedding_size * self.head_num))
for tensor in self.parameters():
nn.init.normal_(tensor, mean=0.0, std=0.05)
self
def forward(self, input_0):
primals_2 = self.W_Query
primals_3 = self.W_key
primals_4 = self.W_Value
primals_5 = self.W_Res
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
chenkkkk/DeepCTR-PyTorch
|
InteractingLayer
| false
| 6,440
|
[
"Apache-2.0"
] | 1
|
a10a3ace4ad79171e7fb182407b3e4d22bf753e7
|
https://github.com/chenkkkk/DeepCTR-PyTorch/tree/a10a3ace4ad79171e7fb182407b3e4d22bf753e7
|
ScaleNetwork
|
import torch
import torch.nn as nn
class ScaleNetwork(nn.Module):
"""Network for parameterizing a scaling function"""
def __init__(self, input_dim):
super(ScaleNetwork, self).__init__()
self.fc1 = nn.Linear(input_dim, 2000)
self.relu1 = nn.ReLU(inplace=True)
self.fc2 = nn.Linear(2000, 2000)
self.relu2 = nn.ReLU(inplace=True)
self.fc3 = nn.Linear(2000, 2000)
self.relu3 = nn.ReLU(inplace=True)
self.fc4 = nn.Linear(2000, 400)
self.relu4 = nn.ReLU(inplace=True)
self.fc5 = nn.Linear(400, 1)
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.kaiming_uniform_(m.weight)
def forward(self, x):
x = self.relu1(self.fc1(x))
x = self.relu2(self.fc2(x))
x = self.relu3(self.fc3(x))
x = self.relu4(self.fc4(x))
x = self.fc5(x).exp()
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2000
x1 = xindex // 2000
tmp0 = tl.load(in_out_ptr0 + (x0 + 2016 * x1), xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x0 + 2016 * x1), tmp4, xmask)
tl.store(out_ptr0 + (x0 + 2048 * x1), tmp6, xmask)
@triton.jit
def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2000
x1 = xindex // 2000
tmp0 = tl.load(in_ptr0 + (x0 + 2016 * x1 + 8064 * (x1 % 4 // 4) + 32256 *
((4 * (x1 // 4 % 4) + x1 % 4) // 16)), xmask)
tl.store(out_ptr0 + (x0 + 2016 * x1), tmp0, xmask)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 400
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_view_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 400
x1 = xindex // 400
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 400 * x1 + 1600 * (x1 % 4 // 4) + 6400 *
((4 * (x1 // 4 % 4) + x1 % 4) // 16)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_exp_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl_math.exp(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_threshold_backward_5(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x5 = xindex
x6 = xindex % 1600
x7 = xindex // 1600
tmp0 = tl.load(in_ptr0 + x5, xmask)
tmp1 = 0.0
tmp2 = tmp0 <= tmp1
tl.store(out_ptr0 + (x6 + 1664 * x7), tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (2000, 4), (4, 1))
assert_size_stride(primals_2, (2000,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (2000, 2000), (2000, 1))
assert_size_stride(primals_5, (2000,), (1,))
assert_size_stride(primals_6, (2000, 2000), (2000, 1))
assert_size_stride(primals_7, (2000,), (1,))
assert_size_stride(primals_8, (400, 2000), (2000, 1))
assert_size_stride(primals_9, (400,), (1,))
assert_size_stride(primals_10, (1, 400), (400, 1))
assert_size_stride(primals_11, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 2000), (2016, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 2000), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 2000), (32256, 8064, 2016,
1), 0)
del buf0
buf17 = empty_strided_cuda((4, 4, 4, 2000), (32768, 8192, 2048, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(128000)](buf1,
primals_2, buf17, 128000, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 2000), (2016, 1), torch.float32)
triton_poi_fused_view_1[grid(128000)](buf1, buf2, 128000, XBLOCK=
1024, num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (64, 2000), (2016, 1), 0)
del buf1
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (2000, 2000),
(1, 2000), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 2000), (32256, 8064, 2016,
1), 0)
del buf3
buf16 = empty_strided_cuda((4, 4, 4, 2000), (32768, 8192, 2048, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(128000)](buf4,
primals_5, buf16, 128000, XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((64, 2000), (2016, 1), torch.float32)
triton_poi_fused_view_1[grid(128000)](buf4, buf5, 128000, XBLOCK=
1024, num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf4, (64, 2000), (2016, 1), 0)
del buf4
extern_kernels.mm(buf5, reinterpret_tensor(primals_6, (2000, 2000),
(1, 2000), 0), out=buf6)
buf7 = reinterpret_tensor(buf6, (4, 4, 4, 2000), (32256, 8064, 2016,
1), 0)
del buf6
buf15 = empty_strided_cuda((4, 4, 4, 2000), (32768, 8192, 2048, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(128000)](buf7,
primals_7, buf15, 128000, XBLOCK=512, num_warps=8, num_stages=1)
del primals_7
buf8 = empty_strided_cuda((64, 2000), (2016, 1), torch.float32)
triton_poi_fused_view_1[grid(128000)](buf7, buf8, 128000, XBLOCK=
1024, num_warps=4, num_stages=1)
del buf7
buf9 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
extern_kernels.mm(buf8, reinterpret_tensor(primals_8, (2000, 400),
(1, 2000), 0), out=buf9)
buf10 = reinterpret_tensor(buf9, (4, 4, 4, 400), (6400, 1600, 400,
1), 0)
del buf9
triton_poi_fused_relu_2[grid(25600)](buf10, primals_9, 25600,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf11 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
triton_poi_fused_view_3[grid(25600)](buf10, buf11, 25600, XBLOCK=
256, num_warps=4, num_stages=1)
buf12 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(buf11, reinterpret_tensor(primals_10, (400, 1), (
1, 400), 0), out=buf12)
buf13 = reinterpret_tensor(buf12, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf12
triton_poi_fused_exp_4[grid(64)](buf13, primals_11, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_11
buf14 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1),
torch.bool)
triton_poi_fused_threshold_backward_5[grid(25600)](buf10, buf14,
25600, XBLOCK=256, num_warps=4, num_stages=1)
del buf10
return (buf13, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2,
buf5, buf8, buf11, buf13, primals_10, buf14, primals_8, buf15,
primals_6, buf16, primals_4, buf17)
class ScaleNetworkNew(nn.Module):
"""Network for parameterizing a scaling function"""
def __init__(self, input_dim):
super(ScaleNetworkNew, self).__init__()
self.fc1 = nn.Linear(input_dim, 2000)
self.relu1 = nn.ReLU(inplace=True)
self.fc2 = nn.Linear(2000, 2000)
self.relu2 = nn.ReLU(inplace=True)
self.fc3 = nn.Linear(2000, 2000)
self.relu3 = nn.ReLU(inplace=True)
self.fc4 = nn.Linear(2000, 400)
self.relu4 = nn.ReLU(inplace=True)
self.fc5 = nn.Linear(400, 1)
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.kaiming_uniform_(m.weight)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_8 = self.fc4.weight
primals_9 = self.fc4.bias
primals_10 = self.fc5.weight
primals_11 = self.fc5.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
chawins/adv-exp
|
ScaleNetwork
| false
| 6,441
|
[
"MIT"
] | 1
|
5423e135c5599e4ec2bf90372916d8d05c89f285
|
https://github.com/chawins/adv-exp/tree/5423e135c5599e4ec2bf90372916d8d05c89f285
|
CauchyLoss
|
import torch
from typing import *
import torch.nn as nn
class CauchyLoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
err = torch.sum(torch.pow(x - y, 2), dim=-1)
return torch.mean(torch.log(1 + err), dim=-1)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
from typing import *
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_log_mean_pow_sub_sum_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 16 * x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr1 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp9 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr1 + (2 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr1 + (3 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp22 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr1 + (4 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp26 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr1 + (5 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp31 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp32 = tl.load(in_ptr1 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp36 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp37 = tl.load(in_ptr1 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp44 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp45 = tl.load(in_ptr1 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp48 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp49 = tl.load(in_ptr1 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp53 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp54 = tl.load(in_ptr1 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp58 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp59 = tl.load(in_ptr1 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp66 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp67 = tl.load(in_ptr1 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp70 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp71 = tl.load(in_ptr1 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp75 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp76 = tl.load(in_ptr1 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp80 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp81 = tl.load(in_ptr1 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = 1.0
tmp20 = tmp18 + tmp19
tmp21 = tl_math.log(tmp20)
tmp24 = tmp22 - tmp23
tmp25 = tmp24 * tmp24
tmp28 = tmp26 - tmp27
tmp29 = tmp28 * tmp28
tmp30 = tmp25 + tmp29
tmp33 = tmp31 - tmp32
tmp34 = tmp33 * tmp33
tmp35 = tmp30 + tmp34
tmp38 = tmp36 - tmp37
tmp39 = tmp38 * tmp38
tmp40 = tmp35 + tmp39
tmp41 = tmp40 + tmp19
tmp42 = tl_math.log(tmp41)
tmp43 = tmp21 + tmp42
tmp46 = tmp44 - tmp45
tmp47 = tmp46 * tmp46
tmp50 = tmp48 - tmp49
tmp51 = tmp50 * tmp50
tmp52 = tmp47 + tmp51
tmp55 = tmp53 - tmp54
tmp56 = tmp55 * tmp55
tmp57 = tmp52 + tmp56
tmp60 = tmp58 - tmp59
tmp61 = tmp60 * tmp60
tmp62 = tmp57 + tmp61
tmp63 = tmp62 + tmp19
tmp64 = tl_math.log(tmp63)
tmp65 = tmp43 + tmp64
tmp68 = tmp66 - tmp67
tmp69 = tmp68 * tmp68
tmp72 = tmp70 - tmp71
tmp73 = tmp72 * tmp72
tmp74 = tmp69 + tmp73
tmp77 = tmp75 - tmp76
tmp78 = tmp77 * tmp77
tmp79 = tmp74 + tmp78
tmp82 = tmp80 - tmp81
tmp83 = tmp82 * tmp82
tmp84 = tmp79 + tmp83
tmp85 = tmp84 + tmp19
tmp86 = tl_math.log(tmp85)
tmp87 = tmp65 + tmp86
tmp88 = 4.0
tmp89 = tmp87 / tmp88
tl.store(out_ptr0 + x0, tmp89, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_log_mean_pow_sub_sum_0[grid(16)](arg0_1,
arg1_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class CauchyLossNew(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
ciwanceylan/gated-gradient-flow
|
CauchyLoss
| false
| 6,442
|
[
"Apache-2.0"
] | 1
|
c4f6c0c987f428697336e4514099aa7ef2351388
|
https://github.com/ciwanceylan/gated-gradient-flow/tree/c4f6c0c987f428697336e4514099aa7ef2351388
|
LabelSmoothSoftmaxCEV1
|
import torch
import torch.nn as nn
class LabelSmoothSoftmaxCEV1(nn.Module):
"""
This is the autograd version, you can also try the LabelSmoothSoftmaxCEV2 that uses derived gradients
"""
def __init__(self, lb_smooth=0.1, reduction='mean', ignore_index=-100):
super(LabelSmoothSoftmaxCEV1, self).__init__()
self.lb_smooth = lb_smooth
self.reduction = reduction
self.lb_ignore = ignore_index
self.log_softmax = nn.LogSoftmax(dim=1)
def forward(self, logits, label):
"""
args: logits: tensor of shape (N, C, H, W)
args: label: tensor of shape(N, H, W)
"""
logits = logits.float()
with torch.no_grad():
num_classes = logits.size(1)
label = label.clone().detach()
ignore = label == self.lb_ignore
n_valid = (ignore == 0).sum()
label[ignore] = 0
lb_pos, lb_neg = 1.0 - self.lb_smooth, self.lb_smooth / num_classes
label = torch.empty_like(logits).fill_(lb_neg).scatter_(1,
label.unsqueeze(1), lb_pos).detach()
logs = self.log_softmax(logits)
loss = -torch.sum(logs * label, dim=1)
loss[ignore] = 0
if self.reduction == 'mean':
loss = loss.sum() / n_valid
if self.reduction == 'sum':
loss = loss.sum()
return loss
def get_inputs():
return [torch.ones([4, 4], dtype=torch.int64), torch.ones([4], dtype=
torch.int64)]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax__to_copy_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = tmp0.to(tl.float32)
tmp3 = tmp2.to(tl.float32)
tmp5 = tmp4.to(tl.float32)
tmp6 = triton_helpers.maximum(tmp3, tmp5)
tmp8 = tmp7.to(tl.float32)
tmp9 = triton_helpers.maximum(tmp6, tmp8)
tmp11 = tmp10.to(tl.float32)
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp13 = tmp1 - tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_per_fused__log_softmax_div_eq_index_put_lift_fresh_mul_neg_scatter_sum_1(
in_out_ptr1, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp5 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp1 = tl.full([1, 1], -100, tl.int64)
tmp2 = tmp0 == tmp1
tmp3 = tl.full([1, 1], 0, tl.int64)
tmp4 = tl.where(tmp2, tmp3, tmp0)
tmp6 = tl_math.exp(tmp5)
tmp8 = tl_math.exp(tmp7)
tmp9 = tmp6 + tmp8
tmp11 = tl_math.exp(tmp10)
tmp12 = tmp9 + tmp11
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp12 + tmp14
tmp16 = tl_math.log(tmp15)
tmp17 = tmp5 - tmp16
tmp18 = tmp4 == tmp3
tmp19 = 0.9
tmp20 = 0.025
tmp21 = tl.where(tmp18, tmp19, tmp20)
tmp22 = tmp17 * tmp21
tmp23 = tmp7 - tmp16
tmp24 = tl.full([1, 1], 1, tl.int64)
tmp25 = tmp4 == tmp24
tmp26 = tl.where(tmp25, tmp19, tmp20)
tmp27 = tmp23 * tmp26
tmp28 = tmp22 + tmp27
tmp29 = tmp10 - tmp16
tmp30 = tl.full([1, 1], 2, tl.int64)
tmp31 = tmp4 == tmp30
tmp32 = tl.where(tmp31, tmp19, tmp20)
tmp33 = tmp29 * tmp32
tmp34 = tmp28 + tmp33
tmp35 = tmp13 - tmp16
tmp36 = tl.full([1, 1], 3, tl.int64)
tmp37 = tmp4 == tmp36
tmp38 = tl.where(tmp37, tmp19, tmp20)
tmp39 = tmp35 * tmp38
tmp40 = tmp34 + tmp39
tmp41 = -tmp40
tmp42 = 0.0
tmp43 = tl.where(tmp2, tmp42, tmp41)
tmp44 = tmp2.to(tl.int64)
tmp45 = tmp44 == tmp3
tmp46 = tmp45.to(tl.int64)
tmp47 = tl.broadcast_to(tmp46, [XBLOCK, RBLOCK])
tmp49 = tl.sum(tmp47, 1)[:, None]
tmp50 = tl.broadcast_to(tmp43, [XBLOCK, RBLOCK])
tmp52 = tl.sum(tmp50, 1)[:, None]
tmp53 = tmp49.to(tl.float32)
tmp54 = tmp52 / tmp53
tl.debug_barrier()
tl.store(in_out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp54, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax__to_copy_0[grid(16)](arg0_1, buf0, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del arg0_1
buf4 = empty_strided_cuda((), (), torch.float32)
buf6 = buf4
del buf4
triton_per_fused__log_softmax_div_eq_index_put_lift_fresh_mul_neg_scatter_sum_1[
grid(1)](buf6, arg1_1, buf0, 1, 4, XBLOCK=1, num_warps=2,
num_stages=1)
del arg1_1
del buf0
return buf6,
class LabelSmoothSoftmaxCEV1New(nn.Module):
"""
This is the autograd version, you can also try the LabelSmoothSoftmaxCEV2 that uses derived gradients
"""
def __init__(self, lb_smooth=0.1, reduction='mean', ignore_index=-100):
super(LabelSmoothSoftmaxCEV1New, self).__init__()
self.lb_smooth = lb_smooth
self.reduction = reduction
self.lb_ignore = ignore_index
self.log_softmax = nn.LogSoftmax(dim=1)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
chizhu/pytorch-loss
|
LabelSmoothSoftmaxCEV1
| false
| 6,443
|
[
"MIT"
] | 1
|
c8fbd78771f11a910b0b51ae3697c09761dd9696
|
https://github.com/chizhu/pytorch-loss/tree/c8fbd78771f11a910b0b51ae3697c09761dd9696
|
EncoderLayer
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.onnx
class Norm(nn.Module):
def __init__(self, emb_dim, eps=1e-06):
super().__init__()
self.size = emb_dim
self.alpha = nn.Parameter(torch.ones(self.size))
self.bias = nn.Parameter(torch.zeros(self.size))
self.eps = eps
def forward(self, x):
"""
inputs:
x: input of shape: (batch size, sequence length, embedding dimensions)
outputs: Scaled, normalized x
"""
norm = (x - x.mean(dim=-1, keepdim=True)) / (x.std(dim=-1, keepdim=
True) + self.eps)
norm = self.alpha * norm + self.bias
return norm
class MultiHeadAttention(nn.Module):
def __init__(self, num_heads, emb_dim, dim_k=None, dropout=0.1):
super().__init__()
self.emb_dim = emb_dim
self.dim_k = dim_k if dim_k else emb_dim // num_heads
self.num_heads = num_heads
self.q_linear = nn.Linear(emb_dim, self.dim_k * num_heads)
self.k_linear = nn.Linear(emb_dim, self.dim_k * num_heads)
self.v_linear = nn.Linear(emb_dim, self.dim_k * num_heads)
self.dropout = nn.Dropout(dropout)
self.out = nn.Linear(self.dim_k * num_heads, emb_dim)
def attention(self, q, k, v, dim_k, mask=None, dropout=None, explain=False
):
k = k.transpose(-2, -1)
if explain:
None
scores = torch.matmul(q, k) / math.sqrt(dim_k)
if explain:
None
if mask is not None:
mask = mask.unsqueeze(1)
if explain:
None
scores = scores.masked_fill(mask == 0, -1000000000.0)
softscores = F.softmax(scores, dim=-1)
if dropout is not None:
softscores = dropout(softscores)
output = torch.matmul(softscores, v)
return output, scores
def forward(self, q, k, v, mask=None, explain=False):
"""
inputs:
q has shape (batch size, q_sequence length, embedding dimensions)
k,v have shape (batch size, kv_sequence length, embedding dimensions)
mask of shape (batch size, 1, kv_sequence length)
explain: boolean, prints intermediate values if True
outputs: sequence of vectors, re-represented using attention
shape (batch size, q_sequence length, embedding dimensions)
use:
The encoder layer places the same source vector sequence into q,k,v
and mask into mask.
The decoder layer uses this twice, once with decoder inputs as q,k,v
and target mask as mask. then with decoder inputs as q, encoder outputs
as k, v and source mask as mask
"""
batch_size = q.size(0)
q = self.q_linear(q)
k = self.k_linear(k)
v = self.v_linear(v)
if explain:
None
k = k.view(batch_size, -1, self.num_heads, self.dim_k)
q = q.view(batch_size, -1, self.num_heads, self.dim_k)
v = v.view(batch_size, -1, self.num_heads, self.dim_k)
k = k.transpose(1, 2)
q = q.transpose(1, 2)
v = v.transpose(1, 2)
if explain:
None
attn, scores = self.attention(q, k, v, self.dim_k, mask, self.
dropout, explain)
if explain:
None
concat = attn.transpose(1, 2).contiguous().view(batch_size, -1,
self.dim_k * self.num_heads)
if explain:
None
output = self.out(concat)
if explain:
None
return output, scores
class FeedForward(nn.Module):
def __init__(self, emb_dim, ff_dim=2048, dropout=0.1):
super().__init__()
self.linear_1 = nn.Linear(emb_dim, ff_dim)
self.dropout = nn.Dropout(dropout)
self.linear_2 = nn.Linear(ff_dim, emb_dim)
def forward(self, x):
x = self.dropout(F.leaky_relu(self.linear_1(x)))
x = self.linear_2(x)
return x
class EncoderLayer(nn.Module):
def __init__(self, emb_dim, heads, dropout=0.1):
super().__init__()
self.norm_1 = Norm(emb_dim)
self.dropout_1 = nn.Dropout(dropout)
self.attn = MultiHeadAttention(heads, emb_dim, dropout=dropout)
self.norm_2 = Norm(emb_dim)
self.ff = FeedForward(emb_dim, dropout=dropout)
self.dropout_2 = nn.Dropout(dropout)
def forward(self, vector_sequence, mask):
"""
input:
vector_sequence of shape (batch size, sequence length, embedding dimensions)
mask (mask over input sequence) of shape (batch size, 1, sequence length)
output: sequence of vectors after embedding, postional encoding, attention and
normalization
shape (batch size, sequence length, embedding dimensions)
"""
x2 = self.norm_1(vector_sequence)
x2_attn, _x2_scores = self.attn(x2, x2, x2, mask)
vector_sequence = vector_sequence + self.dropout_1(x2_attn)
x2 = self.norm_2(vector_sequence)
vector_sequence = vector_sequence + self.dropout_2(self.ff(x2))
return vector_sequence
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'emb_dim': 4, 'heads': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.nn as nn
import torch.nn.functional as F
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp2 - tmp10
tmp13 = tmp12 * tmp12
tmp14 = tmp3 - tmp10
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp10
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp7 - tmp10
tmp21 = tmp20 * tmp20
tmp22 = tmp19 + tmp21
tmp23 = 3.0
tmp24 = tmp22 / tmp23
tmp25 = libdevice.sqrt(tmp24)
tmp26 = 1e-06
tmp27 = tmp25 + tmp26
tmp28 = tmp11 / tmp27
tmp29 = tmp0 * tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + x2, tmp31, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_eq_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_div_masked_fill_3(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (4 * x0 + 16 * x2), xmask, eviction_policy=
'evict_last').to(tl.int1)
tmp1 = tl.load(in_ptr1 + 4 * x3, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * x2), xmask, eviction_policy
='evict_last').to(tl.int1)
tmp7 = tl.load(in_ptr1 + (1 + 4 * x3), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * x2), xmask,
eviction_policy='evict_last').to(tl.int1)
tmp12 = tl.load(in_ptr1 + (2 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * x2), xmask,
eviction_policy='evict_last').to(tl.int1)
tmp17 = tl.load(in_ptr1 + (3 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = -1000000000.0
tmp5 = tl.where(tmp0, tmp4, tmp3)
tmp8 = tmp7 * tmp2
tmp9 = tl.where(tmp6, tmp4, tmp8)
tmp10 = triton_helpers.maximum(tmp5, tmp9)
tmp13 = tmp12 * tmp2
tmp14 = tl.where(tmp11, tmp4, tmp13)
tmp15 = triton_helpers.maximum(tmp10, tmp14)
tmp18 = tmp17 * tmp2
tmp19 = tl.where(tmp16, tmp4, tmp18)
tmp20 = triton_helpers.maximum(tmp15, tmp19)
tmp21 = tmp5 - tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp9 - tmp20
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = tmp14 - tmp20
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tmp19 - tmp20
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp28 + tmp30
tl.store(out_ptr0 + x3, tmp20, xmask)
tl.store(out_ptr1 + x3, tmp31, xmask)
@triton.jit
def triton_poi_fused__softmax_div_masked_fill_4(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 64
x4 = xindex % 16
x5 = xindex
x6 = xindex // 4
tmp0 = tl.load(in_ptr0 + (x4 + 16 * x3), xmask, eviction_policy=
'evict_last').to(tl.int1)
tmp1 = tl.load(in_out_ptr0 + x5, xmask)
tmp6 = tl.load(in_ptr1 + x6, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + x6, xmask, eviction_policy='evict_last')
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = -1000000000.0
tmp5 = tl.where(tmp0, tmp4, tmp3)
tmp7 = tmp5 - tmp6
tmp8 = tl_math.exp(tmp7)
tmp10 = tmp8 / tmp9
tl.store(in_out_ptr0 + x5, tmp10, xmask)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_mean_std_6(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = 3.0
tmp29 = tmp27 / tmp28
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(in_out_ptr0 + x0, tmp29, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_7(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x2, xmask)
tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 - tmp4
tmp7 = libdevice.sqrt(tmp6)
tmp8 = 1e-06
tmp9 = tmp7 + tmp8
tmp10 = tmp5 / tmp9
tmp11 = tmp0 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_8(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 2048
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, None)
tl.store(out_ptr1 + x2, tmp7, None)
@triton.jit
def triton_poi_fused_add_9(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_out_ptr0 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17, primals_18
) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (4,), (1,))
assert_size_stride(primals_15, (2048, 4), (4, 1))
assert_size_stride(primals_16, (2048,), (1,))
assert_size_stride(primals_17, (4, 2048), (2048, 1))
assert_size_stride(primals_18, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_std_sub_0[grid(64)](primals_2,
primals_1, primals_3, buf0, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del primals_2
del primals_3
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_1[grid(16, 4)](buf1, primals_5, buf4, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf5 = reinterpret_tensor(buf1, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf1
triton_poi_fused_clone_1[grid(16, 4)](buf2, primals_7, buf5, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6)
buf7 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.bool)
triton_poi_fused_eq_2[grid(64)](primals_10, buf7, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_10
buf8 = reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf2
buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_div_masked_fill_3[grid(64)](buf7, buf6,
buf8, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf10 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf6
triton_poi_fused__softmax_div_masked_fill_4[grid(256)](buf10, buf7,
buf8, buf9, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf11 = reinterpret_tensor(buf9, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf9
triton_poi_fused_clone_1[grid(16, 4)](buf3, primals_9, buf11, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_9
buf12 = reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 1), 0)
del buf3
extern_kernels.bmm(reinterpret_tensor(buf10, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf11, (16, 4, 1), (4, 1, 0), 0), out=buf12)
buf13 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf8
triton_poi_fused_clone_5[grid(16, 4)](buf12, buf13, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf14 = reinterpret_tensor(buf12, (16, 4), (4, 1), 0)
del buf12
extern_kernels.addmm(primals_12, reinterpret_tensor(buf13, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf14)
del primals_12
buf15 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf16 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf17 = buf16
del buf16
triton_poi_fused_add_mean_std_6[grid(16)](buf17, primals_1, buf14,
buf15, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf18 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_std_sub_7[grid(64)](primals_13,
primals_1, buf14, buf15, buf17, primals_14, buf18, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del buf15
del buf17
del primals_14
buf19 = empty_strided_cuda((16, 2048), (2048, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_15, (4, 2048), (1, 4), 0), out=buf19)
buf20 = empty_strided_cuda((4, 4, 2048), (8192, 2048, 1), torch.bool)
buf21 = empty_strided_cuda((4, 4, 2048), (8192, 2048, 1), torch.float32
)
triton_poi_fused_leaky_relu_8[grid(32768)](buf19, primals_16, buf20,
buf21, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del buf19
del primals_16
buf22 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf21, (16, 2048), (2048, 1),
0), reinterpret_tensor(primals_17, (2048, 4), (1, 2048), 0),
out=buf22)
buf23 = reinterpret_tensor(buf22, (4, 4, 4), (16, 4, 1), 0)
del buf22
triton_poi_fused_add_9[grid(64)](buf23, primals_1, buf14,
primals_18, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_18
return buf23, primals_1, primals_13, reinterpret_tensor(buf0, (16, 4),
(4, 1), 0), buf7, buf10, reinterpret_tensor(buf13, (16, 4), (4, 1), 0
), buf14, reinterpret_tensor(buf18, (16, 4), (4, 1), 0
), buf20, reinterpret_tensor(buf21, (16, 2048), (2048, 1), 0
), primals_17, primals_15, primals_11, reinterpret_tensor(buf11, (
16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 1, 4), (4,
1, 1), 0), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0
), primals_8, primals_6, primals_4
class Norm(nn.Module):
def __init__(self, emb_dim, eps=1e-06):
super().__init__()
self.size = emb_dim
self.alpha = nn.Parameter(torch.ones(self.size))
self.bias = nn.Parameter(torch.zeros(self.size))
self.eps = eps
def forward(self, x):
"""
inputs:
x: input of shape: (batch size, sequence length, embedding dimensions)
outputs: Scaled, normalized x
"""
norm = (x - x.mean(dim=-1, keepdim=True)) / (x.std(dim=-1, keepdim=
True) + self.eps)
norm = self.alpha * norm + self.bias
return norm
class MultiHeadAttention(nn.Module):
def __init__(self, num_heads, emb_dim, dim_k=None, dropout=0.1):
super().__init__()
self.emb_dim = emb_dim
self.dim_k = dim_k if dim_k else emb_dim // num_heads
self.num_heads = num_heads
self.q_linear = nn.Linear(emb_dim, self.dim_k * num_heads)
self.k_linear = nn.Linear(emb_dim, self.dim_k * num_heads)
self.v_linear = nn.Linear(emb_dim, self.dim_k * num_heads)
self.dropout = nn.Dropout(dropout)
self.out = nn.Linear(self.dim_k * num_heads, emb_dim)
def attention(self, q, k, v, dim_k, mask=None, dropout=None, explain=False
):
k = k.transpose(-2, -1)
if explain:
None
scores = torch.matmul(q, k) / math.sqrt(dim_k)
if explain:
None
if mask is not None:
mask = mask.unsqueeze(1)
if explain:
None
scores = scores.masked_fill(mask == 0, -1000000000.0)
softscores = F.softmax(scores, dim=-1)
if dropout is not None:
softscores = dropout(softscores)
output = torch.matmul(softscores, v)
return output, scores
def forward(self, q, k, v, mask=None, explain=False):
"""
inputs:
q has shape (batch size, q_sequence length, embedding dimensions)
k,v have shape (batch size, kv_sequence length, embedding dimensions)
mask of shape (batch size, 1, kv_sequence length)
explain: boolean, prints intermediate values if True
outputs: sequence of vectors, re-represented using attention
shape (batch size, q_sequence length, embedding dimensions)
use:
The encoder layer places the same source vector sequence into q,k,v
and mask into mask.
The decoder layer uses this twice, once with decoder inputs as q,k,v
and target mask as mask. then with decoder inputs as q, encoder outputs
as k, v and source mask as mask
"""
batch_size = q.size(0)
q = self.q_linear(q)
k = self.k_linear(k)
v = self.v_linear(v)
if explain:
None
k = k.view(batch_size, -1, self.num_heads, self.dim_k)
q = q.view(batch_size, -1, self.num_heads, self.dim_k)
v = v.view(batch_size, -1, self.num_heads, self.dim_k)
k = k.transpose(1, 2)
q = q.transpose(1, 2)
v = v.transpose(1, 2)
if explain:
None
attn, scores = self.attention(q, k, v, self.dim_k, mask, self.
dropout, explain)
if explain:
None
concat = attn.transpose(1, 2).contiguous().view(batch_size, -1,
self.dim_k * self.num_heads)
if explain:
None
output = self.out(concat)
if explain:
None
return output, scores
class FeedForward(nn.Module):
def __init__(self, emb_dim, ff_dim=2048, dropout=0.1):
super().__init__()
self.linear_1 = nn.Linear(emb_dim, ff_dim)
self.dropout = nn.Dropout(dropout)
self.linear_2 = nn.Linear(ff_dim, emb_dim)
def forward(self, x):
x = self.dropout(F.leaky_relu(self.linear_1(x)))
x = self.linear_2(x)
return x
class EncoderLayerNew(nn.Module):
def __init__(self, emb_dim, heads, dropout=0.1):
super().__init__()
self.norm_1 = Norm(emb_dim)
self.dropout_1 = nn.Dropout(dropout)
self.attn = MultiHeadAttention(heads, emb_dim, dropout=dropout)
self.norm_2 = Norm(emb_dim)
self.ff = FeedForward(emb_dim, dropout=dropout)
self.dropout_2 = nn.Dropout(dropout)
def forward(self, input_0, input_1):
primals_2 = self.norm_1.alpha
primals_3 = self.norm_1.bias
primals_4 = self.attn.q_linear.weight
primals_5 = self.attn.q_linear.bias
primals_6 = self.attn.k_linear.weight
primals_7 = self.attn.k_linear.bias
primals_8 = self.attn.v_linear.weight
primals_9 = self.attn.v_linear.bias
primals_11 = self.attn.out.weight
primals_12 = self.attn.out.bias
primals_13 = self.norm_2.alpha
primals_14 = self.norm_2.bias
primals_15 = self.ff.linear_1.weight
primals_16 = self.ff.linear_1.bias
primals_17 = self.ff.linear_2.weight
primals_18 = self.ff.linear_2.bias
primals_1 = input_0
primals_10 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18])
return output[0]
|
chandar-lab/CriticalGradientOptimization
|
EncoderLayer
| false
| 6,444
|
[
"MIT"
] | 1
|
1af4b1df40489991289bb50bb69859a00b2c97c6
|
https://github.com/chandar-lab/CriticalGradientOptimization/tree/1af4b1df40489991289bb50bb69859a00b2c97c6
|
co_peak_loss
|
import torch
from torch import nn
class co_peak_loss(nn.Module):
def __init__(self):
super(co_peak_loss, self).__init__()
def forward(self, co_peak_value):
a = -1 * co_peak_value
b = torch.max(torch.zeros_like(co_peak_value), a)
t = b + torch.log(torch.exp(-b) + torch.exp(a - b))
loss = torch.mean(t)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_exp_log_maximum_mean_mul_neg_sub_zeros_like_0(
in_out_ptr0, in_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = -1.0
tmp2 = tmp0 * tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = -tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp2 - tmp4
tmp8 = tl_math.exp(tmp7)
tmp9 = tmp6 + tmp8
tmp10 = tl_math.log(tmp9)
tmp11 = tmp4 + tmp10
tmp12 = tl.broadcast_to(tmp11, [RBLOCK])
tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0))
tmp15 = 256.0
tmp16 = tmp14 / tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp16, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_exp_log_maximum_mean_mul_neg_sub_zeros_like_0[grid
(1)](buf1, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
return buf1,
class co_peak_lossNew(nn.Module):
def __init__(self):
super(co_peak_lossNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
cj4L/DeepCO3-python
|
co_peak_loss
| false
| 6,445
|
[
"MIT"
] | 1
|
fa28ed7b43a3a236d0cc7bf31ce9fd68c01b5888
|
https://github.com/cj4L/DeepCO3-python/tree/fa28ed7b43a3a236d0cc7bf31ce9fd68c01b5888
|
Attention
|
import torch
class Attention(torch.nn.Module):
""" Applies attention mechanism on the `context` using the `query`.
**Thank you** to IBM for their initial implementation of :class:`Attention`. Here is
their `License
<https://github.com/IBM/pytorch-seq2seq/blob/master/LICENSE>`__.
Args:
dimensions (int): Dimensionality of the query and context.
attention_type (str, optional): How to compute the attention score:
* dot: :math:`score(H_j,q) = H_j^T q`
* general: :math:`score(H_j, q) = H_j^T W_a q`
Example:
>>> attention = Attention(256)
>>> query = torch.randn(5, 1, 256)
>>> context = torch.randn(5, 5, 256)
>>> output, weights = attention(query, context)
>>> output.size()
torch.Size([5, 1, 256])
>>> weights.size()
torch.Size([5, 1, 5])
"""
def __init__(self, dimensions, attention_type='general'):
super(Attention, self).__init__()
if attention_type not in ['dot', 'general']:
raise ValueError('Invalid attention type selected.')
self.attention_type = attention_type
if self.attention_type == 'general':
self.linear_in = torch.nn.Linear(dimensions, dimensions, bias=False
)
self.linear_out = torch.nn.Linear(dimensions * 2, dimensions, bias=
False)
self.softmax = torch.nn.Softmax(dim=-1)
self.tanh = torch.nn.Tanh()
def forward(self, query, context):
"""
Args:
query (:class:`torch.FloatTensor` [batch size, output length, dimensions]): Sequence of
queries to query the context.
context (:class:`torch.FloatTensor` [batch size, query length, dimensions]): Data
overwhich to apply the attention mechanism.
Returns:
:class:`tuple` with `output` and `weights`:
* **output** (:class:`torch.LongTensor` [batch size, output length, dimensions]):
Tensor containing the attended features.
* **weights** (:class:`torch.FloatTensor` [batch size, output length, query length]):
Tensor containing attention weights.
"""
batch_size, output_len, dimensions = query.size()
query_len = context.size(1)
if self.attention_type == 'general':
query = query.reshape(batch_size * output_len, dimensions)
query = self.linear_in(query)
query = query.reshape(batch_size, output_len, dimensions)
attention_scores = torch.bmm(query, context.transpose(1, 2).
contiguous())
attention_scores = attention_scores.view(batch_size * output_len,
query_len)
attention_weights = self.softmax(attention_scores)
attention_weights = attention_weights.view(batch_size, output_len,
query_len)
mix = torch.bmm(attention_weights, context)
combined = torch.cat((mix, query), dim=2)
combined = combined.view(batch_size * output_len, 2 * dimensions)
output = self.linear_out(combined).view(batch_size, output_len,
dimensions)
output = self.tanh(output)
return output, attention_weights
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dimensions': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_transpose_0(in_ptr0, out_ptr0, out_ptr1, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
y2 = yindex % 4
y3 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x1 + 4 * y0), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
tl.store(out_ptr1 + (y2 + 4 * x1 + 16 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_tanh_4(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 8), (8, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0)
del primals_3
buf1 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
buf9 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_transpose_0[grid(16, 4)](primals_2, buf1,
buf9, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1),
0), buf1, out=buf2)
buf3 = reinterpret_tensor(buf1, (16, 4), (4, 1), 0)
del buf1
triton_poi_fused__softmax_1[grid(64)](buf2, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf4 = reinterpret_tensor(buf2, (16, 4), (4, 1), 0)
del buf2
triton_poi_fused__softmax_2[grid(64)](buf3, buf4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf5 = reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0)
del buf3
extern_kernels.bmm(reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1),
0), primals_2, out=buf5)
buf6 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
triton_poi_fused_cat_3[grid(128)](buf5, buf0, buf6, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del buf0
buf7 = reinterpret_tensor(buf5, (16, 4), (4, 1), 0)
del buf5
extern_kernels.mm(reinterpret_tensor(buf6, (16, 8), (8, 1), 0),
reinterpret_tensor(primals_4, (8, 4), (1, 8), 0), out=buf7)
buf8 = reinterpret_tensor(buf7, (4, 4, 4), (16, 4, 1), 0)
del buf7
triton_poi_fused_tanh_4[grid(64)](buf8, 64, XBLOCK=64, num_warps=1,
num_stages=1)
return buf8, reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_2, (4, 4, 4), (16, 1, 4), 0
), buf4, reinterpret_tensor(buf6, (16, 8), (8, 1), 0
), buf8, primals_4, buf9
class AttentionNew(torch.nn.Module):
""" Applies attention mechanism on the `context` using the `query`.
**Thank you** to IBM for their initial implementation of :class:`Attention`. Here is
their `License
<https://github.com/IBM/pytorch-seq2seq/blob/master/LICENSE>`__.
Args:
dimensions (int): Dimensionality of the query and context.
attention_type (str, optional): How to compute the attention score:
* dot: :math:`score(H_j,q) = H_j^T q`
* general: :math:`score(H_j, q) = H_j^T W_a q`
Example:
>>> attention = Attention(256)
>>> query = torch.randn(5, 1, 256)
>>> context = torch.randn(5, 5, 256)
>>> output, weights = attention(query, context)
>>> output.size()
torch.Size([5, 1, 256])
>>> weights.size()
torch.Size([5, 1, 5])
"""
def __init__(self, dimensions, attention_type='general'):
super(AttentionNew, self).__init__()
if attention_type not in ['dot', 'general']:
raise ValueError('Invalid attention type selected.')
self.attention_type = attention_type
if self.attention_type == 'general':
self.linear_in = torch.nn.Linear(dimensions, dimensions, bias=False
)
self.linear_out = torch.nn.Linear(dimensions * 2, dimensions, bias=
False)
self.softmax = torch.nn.Softmax(dim=-1)
self.tanh = torch.nn.Tanh()
def forward(self, input_0, input_1):
primals_3 = self.linear_in.weight
primals_4 = self.linear_out.weight
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0], output[1]
|
choderalab/pisco
|
Attention
| false
| 6,446
|
[
"MIT"
] | 1
|
dccb36edf49960929cfb823f885d38cb84d444d1
|
https://github.com/choderalab/pisco/tree/dccb36edf49960929cfb823f885d38cb84d444d1
|
DenseModelV3
|
import torch
import torch.nn as nn
class DenseModelV3(nn.Module):
def __init__(self, input_dim, num_classes=2):
super(DenseModelV3, self).__init__()
self.fc1 = nn.Linear(input_dim, 2000)
self.relu1 = nn.ReLU(inplace=True)
self.fc2 = nn.Linear(2000, 2000)
self.relu2 = nn.ReLU(inplace=True)
self.fc3 = nn.Linear(2000, 2000)
self.relu3 = nn.ReLU(inplace=True)
self.fc4 = nn.Linear(2000, 2000)
self.relu4 = nn.ReLU(inplace=True)
self.fc5 = nn.Linear(2000, 2000)
self.relu5 = nn.ReLU(inplace=True)
self.fc6 = nn.Linear(2000, 2000)
self.relu6 = nn.ReLU(inplace=True)
self.fc7 = nn.Linear(2000, 2000)
self.relu7 = nn.ReLU(inplace=True)
self.fc8 = nn.Linear(2000, 2000)
self.relu8 = nn.ReLU(inplace=True)
self.fc9 = nn.Linear(2000, 400)
self.relu9 = nn.ReLU(inplace=True)
if num_classes == 2:
self.fc10 = nn.Linear(400, 1)
else:
self.fc10 = nn.Linear(400, num_classes)
def forward(self, x):
x = self.relu1(self.fc1(x))
x = self.relu2(self.fc2(x))
x = self.relu3(self.fc3(x))
x = self.relu4(self.fc4(x))
x = self.relu5(self.fc5(x))
x = self.relu6(self.fc6(x))
x = self.relu7(self.fc7(x))
x = self.relu8(self.fc8(x))
x = self.relu9(self.fc9(x))
x = self.fc10(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2000
x1 = xindex // 2000
tmp0 = tl.load(in_out_ptr0 + (x0 + 2016 * x1), xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x0 + 2016 * x1), tmp4, xmask)
tl.store(out_ptr0 + (x0 + 2048 * x1), tmp6, xmask)
@triton.jit
def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2000
x1 = xindex // 2000
tmp0 = tl.load(in_ptr0 + (x0 + 2016 * x1 + 8064 * (x1 % 4 // 4) + 32256 *
((4 * (x1 // 4 % 4) + x1 % 4) // 16)), xmask)
tl.store(out_ptr0 + (x0 + 2016 * x1), tmp0, xmask)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 400
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_view_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 400
x1 = xindex // 400
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 400 * x1 + 1600 * (x1 % 4 // 4) + 6400 *
((4 * (x1 // 4 % 4) + x1 % 4) // 16)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_threshold_backward_4(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x5 = xindex
x6 = xindex % 1600
x7 = xindex // 1600
tmp0 = tl.load(in_ptr0 + x5, xmask)
tmp1 = 0.0
tmp2 = tmp0 <= tmp1
tl.store(out_ptr0 + (x6 + 1664 * x7), tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21) = args
args.clear()
assert_size_stride(primals_1, (2000, 4), (4, 1))
assert_size_stride(primals_2, (2000,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (2000, 2000), (2000, 1))
assert_size_stride(primals_5, (2000,), (1,))
assert_size_stride(primals_6, (2000, 2000), (2000, 1))
assert_size_stride(primals_7, (2000,), (1,))
assert_size_stride(primals_8, (2000, 2000), (2000, 1))
assert_size_stride(primals_9, (2000,), (1,))
assert_size_stride(primals_10, (2000, 2000), (2000, 1))
assert_size_stride(primals_11, (2000,), (1,))
assert_size_stride(primals_12, (2000, 2000), (2000, 1))
assert_size_stride(primals_13, (2000,), (1,))
assert_size_stride(primals_14, (2000, 2000), (2000, 1))
assert_size_stride(primals_15, (2000,), (1,))
assert_size_stride(primals_16, (2000, 2000), (2000, 1))
assert_size_stride(primals_17, (2000,), (1,))
assert_size_stride(primals_18, (400, 2000), (2000, 1))
assert_size_stride(primals_19, (400,), (1,))
assert_size_stride(primals_20, (1, 400), (400, 1))
assert_size_stride(primals_21, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 2000), (2016, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 2000), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 2000), (32256, 8064, 2016,
1), 0)
del buf0
buf37 = empty_strided_cuda((4, 4, 4, 2000), (32768, 8192, 2048, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(128000)](buf1,
primals_2, buf37, 128000, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 2000), (2016, 1), torch.float32)
triton_poi_fused_view_1[grid(128000)](buf1, buf2, 128000, XBLOCK=
1024, num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (64, 2000), (2016, 1), 0)
del buf1
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (2000, 2000),
(1, 2000), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 2000), (32256, 8064, 2016,
1), 0)
del buf3
buf36 = empty_strided_cuda((4, 4, 4, 2000), (32768, 8192, 2048, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(128000)](buf4,
primals_5, buf36, 128000, XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((64, 2000), (2016, 1), torch.float32)
triton_poi_fused_view_1[grid(128000)](buf4, buf5, 128000, XBLOCK=
1024, num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf4, (64, 2000), (2016, 1), 0)
del buf4
extern_kernels.mm(buf5, reinterpret_tensor(primals_6, (2000, 2000),
(1, 2000), 0), out=buf6)
buf7 = reinterpret_tensor(buf6, (4, 4, 4, 2000), (32256, 8064, 2016,
1), 0)
del buf6
buf35 = empty_strided_cuda((4, 4, 4, 2000), (32768, 8192, 2048, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(128000)](buf7,
primals_7, buf35, 128000, XBLOCK=512, num_warps=8, num_stages=1)
del primals_7
buf8 = empty_strided_cuda((64, 2000), (2016, 1), torch.float32)
triton_poi_fused_view_1[grid(128000)](buf7, buf8, 128000, XBLOCK=
1024, num_warps=4, num_stages=1)
buf9 = reinterpret_tensor(buf7, (64, 2000), (2016, 1), 0)
del buf7
extern_kernels.mm(buf8, reinterpret_tensor(primals_8, (2000, 2000),
(1, 2000), 0), out=buf9)
buf10 = reinterpret_tensor(buf9, (4, 4, 4, 2000), (32256, 8064,
2016, 1), 0)
del buf9
buf34 = empty_strided_cuda((4, 4, 4, 2000), (32768, 8192, 2048, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(128000)](buf10,
primals_9, buf34, 128000, XBLOCK=512, num_warps=8, num_stages=1)
del primals_9
buf11 = empty_strided_cuda((64, 2000), (2016, 1), torch.float32)
triton_poi_fused_view_1[grid(128000)](buf10, buf11, 128000, XBLOCK=
1024, num_warps=4, num_stages=1)
buf12 = reinterpret_tensor(buf10, (64, 2000), (2016, 1), 0)
del buf10
extern_kernels.mm(buf11, reinterpret_tensor(primals_10, (2000, 2000
), (1, 2000), 0), out=buf12)
buf13 = reinterpret_tensor(buf12, (4, 4, 4, 2000), (32256, 8064,
2016, 1), 0)
del buf12
buf33 = empty_strided_cuda((4, 4, 4, 2000), (32768, 8192, 2048, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(128000)](buf13,
primals_11, buf33, 128000, XBLOCK=512, num_warps=8, num_stages=1)
del primals_11
buf14 = empty_strided_cuda((64, 2000), (2016, 1), torch.float32)
triton_poi_fused_view_1[grid(128000)](buf13, buf14, 128000, XBLOCK=
1024, num_warps=4, num_stages=1)
buf15 = reinterpret_tensor(buf13, (64, 2000), (2016, 1), 0)
del buf13
extern_kernels.mm(buf14, reinterpret_tensor(primals_12, (2000, 2000
), (1, 2000), 0), out=buf15)
buf16 = reinterpret_tensor(buf15, (4, 4, 4, 2000), (32256, 8064,
2016, 1), 0)
del buf15
buf32 = empty_strided_cuda((4, 4, 4, 2000), (32768, 8192, 2048, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(128000)](buf16,
primals_13, buf32, 128000, XBLOCK=512, num_warps=8, num_stages=1)
del primals_13
buf17 = empty_strided_cuda((64, 2000), (2016, 1), torch.float32)
triton_poi_fused_view_1[grid(128000)](buf16, buf17, 128000, XBLOCK=
1024, num_warps=4, num_stages=1)
buf18 = reinterpret_tensor(buf16, (64, 2000), (2016, 1), 0)
del buf16
extern_kernels.mm(buf17, reinterpret_tensor(primals_14, (2000, 2000
), (1, 2000), 0), out=buf18)
buf19 = reinterpret_tensor(buf18, (4, 4, 4, 2000), (32256, 8064,
2016, 1), 0)
del buf18
buf31 = empty_strided_cuda((4, 4, 4, 2000), (32768, 8192, 2048, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(128000)](buf19,
primals_15, buf31, 128000, XBLOCK=512, num_warps=8, num_stages=1)
del primals_15
buf20 = empty_strided_cuda((64, 2000), (2016, 1), torch.float32)
triton_poi_fused_view_1[grid(128000)](buf19, buf20, 128000, XBLOCK=
1024, num_warps=4, num_stages=1)
buf21 = reinterpret_tensor(buf19, (64, 2000), (2016, 1), 0)
del buf19
extern_kernels.mm(buf20, reinterpret_tensor(primals_16, (2000, 2000
), (1, 2000), 0), out=buf21)
buf22 = reinterpret_tensor(buf21, (4, 4, 4, 2000), (32256, 8064,
2016, 1), 0)
del buf21
buf30 = empty_strided_cuda((4, 4, 4, 2000), (32768, 8192, 2048, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(128000)](buf22,
primals_17, buf30, 128000, XBLOCK=512, num_warps=8, num_stages=1)
del primals_17
buf23 = empty_strided_cuda((64, 2000), (2016, 1), torch.float32)
triton_poi_fused_view_1[grid(128000)](buf22, buf23, 128000, XBLOCK=
1024, num_warps=4, num_stages=1)
del buf22
buf24 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
extern_kernels.mm(buf23, reinterpret_tensor(primals_18, (2000, 400),
(1, 2000), 0), out=buf24)
buf25 = reinterpret_tensor(buf24, (4, 4, 4, 400), (6400, 1600, 400,
1), 0)
del buf24
triton_poi_fused_relu_2[grid(25600)](buf25, primals_19, 25600,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_19
buf26 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
triton_poi_fused_view_3[grid(25600)](buf25, buf26, 25600, XBLOCK=
256, num_warps=4, num_stages=1)
buf28 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_21, buf26, reinterpret_tensor(
primals_20, (400, 1), (1, 400), 0), alpha=1, beta=1, out=buf28)
del primals_21
buf29 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1),
torch.bool)
triton_poi_fused_threshold_backward_4[grid(25600)](buf25, buf29,
25600, XBLOCK=128, num_warps=4, num_stages=1)
del buf25
return (reinterpret_tensor(buf28, (4, 4, 4, 1), (16, 4, 1, 1), 0),
reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, buf5, buf8,
buf11, buf14, buf17, buf20, buf23, buf26, primals_20, buf29,
primals_18, buf30, primals_16, buf31, primals_14, buf32, primals_12,
buf33, primals_10, buf34, primals_8, buf35, primals_6, buf36,
primals_4, buf37)
class DenseModelV3New(nn.Module):
def __init__(self, input_dim, num_classes=2):
super(DenseModelV3New, self).__init__()
self.fc1 = nn.Linear(input_dim, 2000)
self.relu1 = nn.ReLU(inplace=True)
self.fc2 = nn.Linear(2000, 2000)
self.relu2 = nn.ReLU(inplace=True)
self.fc3 = nn.Linear(2000, 2000)
self.relu3 = nn.ReLU(inplace=True)
self.fc4 = nn.Linear(2000, 2000)
self.relu4 = nn.ReLU(inplace=True)
self.fc5 = nn.Linear(2000, 2000)
self.relu5 = nn.ReLU(inplace=True)
self.fc6 = nn.Linear(2000, 2000)
self.relu6 = nn.ReLU(inplace=True)
self.fc7 = nn.Linear(2000, 2000)
self.relu7 = nn.ReLU(inplace=True)
self.fc8 = nn.Linear(2000, 2000)
self.relu8 = nn.ReLU(inplace=True)
self.fc9 = nn.Linear(2000, 400)
self.relu9 = nn.ReLU(inplace=True)
if num_classes == 2:
self.fc10 = nn.Linear(400, 1)
else:
self.fc10 = nn.Linear(400, num_classes)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_8 = self.fc4.weight
primals_9 = self.fc4.bias
primals_10 = self.fc5.weight
primals_11 = self.fc5.bias
primals_12 = self.fc6.weight
primals_13 = self.fc6.bias
primals_14 = self.fc7.weight
primals_15 = self.fc7.bias
primals_16 = self.fc8.weight
primals_17 = self.fc8.bias
primals_18 = self.fc9.weight
primals_19 = self.fc9.bias
primals_20 = self.fc10.weight
primals_21 = self.fc10.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21])
return output[0]
|
chawins/adv-exp
|
DenseModelV3
| false
| 6,447
|
[
"MIT"
] | 1
|
5423e135c5599e4ec2bf90372916d8d05c89f285
|
https://github.com/chawins/adv-exp/tree/5423e135c5599e4ec2bf90372916d8d05c89f285
|
Classifier
|
import torch
import torch.nn.functional as F
from torch import nn
class Classifier(nn.Module):
def __init__(self, dims):
"""
Single hidden layer classifier
with softmax output.
"""
super(Classifier, self).__init__()
[x_dim, h_dim, y_dim] = dims
self.dense = nn.Linear(x_dim, h_dim)
self.logits = nn.Linear(h_dim, y_dim)
def forward(self, x):
x = F.relu(self.dense(x))
x = F.softmax(self.logits(x), dim=-1)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dims': [4, 4, 4]}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf2, buf3, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused__softmax_2[grid(256)](buf3, buf4, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf3
return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf4, primals_4, buf5
class ClassifierNew(nn.Module):
def __init__(self, dims):
"""
Single hidden layer classifier
with softmax output.
"""
super(ClassifierNew, self).__init__()
[x_dim, h_dim, y_dim] = dims
self.dense = nn.Linear(x_dim, h_dim)
self.logits = nn.Linear(h_dim, y_dim)
def forward(self, input_0):
primals_1 = self.dense.weight
primals_2 = self.dense.bias
primals_4 = self.logits.weight
primals_5 = self.logits.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
chunglabmit/phathom
|
Classifier
| false
| 6,448
|
[
"MIT"
] | 1
|
304db7a95e898e9b03d6b2640172752d21a7e3ed
|
https://github.com/chunglabmit/phathom/tree/304db7a95e898e9b03d6b2640172752d21a7e3ed
|
Length
|
import torch
from torch import nn
class Length(nn.Module):
def __init__(self, dim=1, keepdim=True, p='fro'):
super(Length, self).__init__()
self.dim = dim
self.keepdim = keepdim
self.p = p
def forward(self, inputs):
return inputs.norm(dim=self.dim, keepdim=self.keepdim, p=self.p)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp8 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = libdevice.sqrt(tmp10)
tl.store(out_ptr0 + x2, tmp11, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_linalg_vector_norm_0[grid(64)](arg0_1, buf0, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
return buf0,
class LengthNew(nn.Module):
def __init__(self, dim=1, keepdim=True, p='fro'):
super(LengthNew, self).__init__()
self.dim = dim
self.keepdim = keepdim
self.p = p
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
clementpoiret/3D-AGSCaps
|
Length
| false
| 6,449
|
[
"MIT"
] | 1
|
475eb1915bc1425cebbd0bec36e9096c9c2cb53c
|
https://github.com/clementpoiret/3D-AGSCaps/tree/475eb1915bc1425cebbd0bec36e9096c9c2cb53c
|
ElemAffineNetwork
|
import torch
import torch.nn as nn
class ElemAffineNetwork(nn.Module):
"""Network for parameterizing affine transformation"""
def __init__(self, input_dim):
super(ElemAffineNetwork, self).__init__()
self.input_dim = input_dim
self.fc1 = nn.Linear(input_dim, 2000)
self.relu1 = nn.ReLU(inplace=True)
self.fc2 = nn.Linear(2000, 2000)
self.relu2 = nn.ReLU(inplace=True)
self.fc3 = nn.Linear(2000, 2000)
self.relu3 = nn.ReLU(inplace=True)
self.fc4 = nn.Linear(2000, 2000)
self.relu4 = nn.ReLU(inplace=True)
self.fc5 = nn.Linear(2000, 2 * input_dim)
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.constant_(m.weight, 0)
def forward(self, x):
x = self.relu1(self.fc1(x))
x = self.relu2(self.fc2(x))
x = self.relu3(self.fc3(x))
x = self.relu4(self.fc4(x))
x = self.fc5(x)
scale = torch.exp(x[:, :self.input_dim // 2])
shift = torch.tanh(x[:, self.input_dim // 2:])
return scale, shift
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2000
x1 = xindex // 2000
tmp0 = tl.load(in_out_ptr0 + (x0 + 2016 * x1), xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x0 + 2016 * x1), tmp4, xmask)
tl.store(out_ptr0 + (x0 + 2048 * x1), tmp6, xmask)
@triton.jit
def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2000
x1 = xindex // 2000
tmp0 = tl.load(in_ptr0 + (x0 + 2016 * x1 + 8064 * (x1 % 4 // 4) + 32256 *
((4 * (x1 // 4 % 4) + x1 % 4) // 16)), xmask)
tl.store(out_ptr0 + (x0 + 2016 * x1), tmp0, xmask)
@triton.jit
def triton_poi_fused_exp_tanh_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 64
x3 = xindex % 64
x0 = xindex % 8
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3 + 128 * x2), xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (64 + x3 + 128 * x2), xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl_math.exp(tmp2)
tmp5 = tmp4 + tmp1
tmp6 = libdevice.tanh(tmp5)
tl.store(out_ptr0 + x4, tmp3, xmask)
tl.store(out_ptr1 + x4, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (2000, 4), (4, 1))
assert_size_stride(primals_2, (2000,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (2000, 2000), (2000, 1))
assert_size_stride(primals_5, (2000,), (1,))
assert_size_stride(primals_6, (2000, 2000), (2000, 1))
assert_size_stride(primals_7, (2000,), (1,))
assert_size_stride(primals_8, (2000, 2000), (2000, 1))
assert_size_stride(primals_9, (2000,), (1,))
assert_size_stride(primals_10, (8, 2000), (2000, 1))
assert_size_stride(primals_11, (8,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 2000), (2016, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 2000), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 2000), (32256, 8064, 2016,
1), 0)
del buf0
buf18 = empty_strided_cuda((4, 4, 4, 2000), (32768, 8192, 2048, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(128000)](buf1,
primals_2, buf18, 128000, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 2000), (2016, 1), torch.float32)
triton_poi_fused_view_1[grid(128000)](buf1, buf2, 128000, XBLOCK=
1024, num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (64, 2000), (2016, 1), 0)
del buf1
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (2000, 2000),
(1, 2000), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 2000), (32256, 8064, 2016,
1), 0)
del buf3
buf17 = empty_strided_cuda((4, 4, 4, 2000), (32768, 8192, 2048, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(128000)](buf4,
primals_5, buf17, 128000, XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((64, 2000), (2016, 1), torch.float32)
triton_poi_fused_view_1[grid(128000)](buf4, buf5, 128000, XBLOCK=
1024, num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf4, (64, 2000), (2016, 1), 0)
del buf4
extern_kernels.mm(buf5, reinterpret_tensor(primals_6, (2000, 2000),
(1, 2000), 0), out=buf6)
buf7 = reinterpret_tensor(buf6, (4, 4, 4, 2000), (32256, 8064, 2016,
1), 0)
del buf6
buf16 = empty_strided_cuda((4, 4, 4, 2000), (32768, 8192, 2048, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(128000)](buf7,
primals_7, buf16, 128000, XBLOCK=512, num_warps=8, num_stages=1)
del primals_7
buf8 = empty_strided_cuda((64, 2000), (2016, 1), torch.float32)
triton_poi_fused_view_1[grid(128000)](buf7, buf8, 128000, XBLOCK=
1024, num_warps=4, num_stages=1)
buf9 = reinterpret_tensor(buf7, (64, 2000), (2016, 1), 0)
del buf7
extern_kernels.mm(buf8, reinterpret_tensor(primals_8, (2000, 2000),
(1, 2000), 0), out=buf9)
buf10 = reinterpret_tensor(buf9, (4, 4, 4, 2000), (32256, 8064,
2016, 1), 0)
del buf9
buf15 = empty_strided_cuda((4, 4, 4, 2000), (32768, 8192, 2048, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(128000)](buf10,
primals_9, buf15, 128000, XBLOCK=512, num_warps=8, num_stages=1)
del primals_9
buf11 = empty_strided_cuda((64, 2000), (2016, 1), torch.float32)
triton_poi_fused_view_1[grid(128000)](buf10, buf11, 128000, XBLOCK=
1024, num_warps=4, num_stages=1)
del buf10
buf12 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
extern_kernels.mm(buf11, reinterpret_tensor(primals_10, (2000, 8),
(1, 2000), 0), out=buf12)
buf13 = empty_strided_cuda((4, 2, 4, 8), (64, 32, 8, 1), torch.float32)
buf14 = empty_strided_cuda((4, 2, 4, 8), (64, 32, 8, 1), torch.float32)
triton_poi_fused_exp_tanh_2[grid(256)](buf12, primals_11, buf13,
buf14, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf12
del primals_11
return (buf13, buf14, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
buf2, buf5, buf8, buf11, buf13, buf14, primals_10, buf15, primals_8,
buf16, primals_6, buf17, primals_4, buf18)
class ElemAffineNetworkNew(nn.Module):
"""Network for parameterizing affine transformation"""
def __init__(self, input_dim):
super(ElemAffineNetworkNew, self).__init__()
self.input_dim = input_dim
self.fc1 = nn.Linear(input_dim, 2000)
self.relu1 = nn.ReLU(inplace=True)
self.fc2 = nn.Linear(2000, 2000)
self.relu2 = nn.ReLU(inplace=True)
self.fc3 = nn.Linear(2000, 2000)
self.relu3 = nn.ReLU(inplace=True)
self.fc4 = nn.Linear(2000, 2000)
self.relu4 = nn.ReLU(inplace=True)
self.fc5 = nn.Linear(2000, 2 * input_dim)
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.constant_(m.weight, 0)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_8 = self.fc4.weight
primals_9 = self.fc4.bias
primals_10 = self.fc5.weight
primals_11 = self.fc5.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0], output[1]
|
chawins/adv-exp
|
ElemAffineNetwork
| false
| 6,450
|
[
"MIT"
] | 1
|
5423e135c5599e4ec2bf90372916d8d05c89f285
|
https://github.com/chawins/adv-exp/tree/5423e135c5599e4ec2bf90372916d8d05c89f285
|
DecoderLayer
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.onnx
class Norm(nn.Module):
def __init__(self, emb_dim, eps=1e-06):
super().__init__()
self.size = emb_dim
self.alpha = nn.Parameter(torch.ones(self.size))
self.bias = nn.Parameter(torch.zeros(self.size))
self.eps = eps
def forward(self, x):
"""
inputs:
x: input of shape: (batch size, sequence length, embedding dimensions)
outputs: Scaled, normalized x
"""
norm = (x - x.mean(dim=-1, keepdim=True)) / (x.std(dim=-1, keepdim=
True) + self.eps)
norm = self.alpha * norm + self.bias
return norm
class MultiHeadAttention(nn.Module):
def __init__(self, num_heads, emb_dim, dim_k=None, dropout=0.1):
super().__init__()
self.emb_dim = emb_dim
self.dim_k = dim_k if dim_k else emb_dim // num_heads
self.num_heads = num_heads
self.q_linear = nn.Linear(emb_dim, self.dim_k * num_heads)
self.k_linear = nn.Linear(emb_dim, self.dim_k * num_heads)
self.v_linear = nn.Linear(emb_dim, self.dim_k * num_heads)
self.dropout = nn.Dropout(dropout)
self.out = nn.Linear(self.dim_k * num_heads, emb_dim)
def attention(self, q, k, v, dim_k, mask=None, dropout=None, explain=False
):
k = k.transpose(-2, -1)
if explain:
None
scores = torch.matmul(q, k) / math.sqrt(dim_k)
if explain:
None
if mask is not None:
mask = mask.unsqueeze(1)
if explain:
None
scores = scores.masked_fill(mask == 0, -1000000000.0)
softscores = F.softmax(scores, dim=-1)
if dropout is not None:
softscores = dropout(softscores)
output = torch.matmul(softscores, v)
return output, scores
def forward(self, q, k, v, mask=None, explain=False):
"""
inputs:
q has shape (batch size, q_sequence length, embedding dimensions)
k,v have shape (batch size, kv_sequence length, embedding dimensions)
mask of shape (batch size, 1, kv_sequence length)
explain: boolean, prints intermediate values if True
outputs: sequence of vectors, re-represented using attention
shape (batch size, q_sequence length, embedding dimensions)
use:
The encoder layer places the same source vector sequence into q,k,v
and mask into mask.
The decoder layer uses this twice, once with decoder inputs as q,k,v
and target mask as mask. then with decoder inputs as q, encoder outputs
as k, v and source mask as mask
"""
batch_size = q.size(0)
q = self.q_linear(q)
k = self.k_linear(k)
v = self.v_linear(v)
if explain:
None
k = k.view(batch_size, -1, self.num_heads, self.dim_k)
q = q.view(batch_size, -1, self.num_heads, self.dim_k)
v = v.view(batch_size, -1, self.num_heads, self.dim_k)
k = k.transpose(1, 2)
q = q.transpose(1, 2)
v = v.transpose(1, 2)
if explain:
None
attn, scores = self.attention(q, k, v, self.dim_k, mask, self.
dropout, explain)
if explain:
None
concat = attn.transpose(1, 2).contiguous().view(batch_size, -1,
self.dim_k * self.num_heads)
if explain:
None
output = self.out(concat)
if explain:
None
return output, scores
class FeedForward(nn.Module):
def __init__(self, emb_dim, ff_dim=2048, dropout=0.1):
super().__init__()
self.linear_1 = nn.Linear(emb_dim, ff_dim)
self.dropout = nn.Dropout(dropout)
self.linear_2 = nn.Linear(ff_dim, emb_dim)
def forward(self, x):
x = self.dropout(F.leaky_relu(self.linear_1(x)))
x = self.linear_2(x)
return x
class DecoderLayer(nn.Module):
def __init__(self, emb_dim, heads, dropout=0.1):
super().__init__()
self.norm_1 = Norm(emb_dim)
self.norm_2 = Norm(emb_dim)
self.norm_3 = Norm(emb_dim)
self.dropout_1 = nn.Dropout(dropout)
self.dropout_2 = nn.Dropout(dropout)
self.dropout_3 = nn.Dropout(dropout)
self.attn_1 = MultiHeadAttention(heads, emb_dim, dropout=dropout)
self.attn_2 = MultiHeadAttention(heads, emb_dim, dropout=dropout)
self.ff = FeedForward(emb_dim, dropout=dropout)
def forward(self, de_out, de_mask, en_out, en_mask):
"""
inputs:
de_out - decoder ouputs so far (batch size, output sequence length,
embedding dimensions)
de_mask (batch size, output sequence length, output sequence length)
en_out - encoder output (batch size, input sequence length, embedding
dimensions)
en_mask (batch size, 1, input sequence length)
ouputs:
de_out (next decoder output) (batch size, output sequence length,
embedding dimensions)
"""
de_nrm = self.norm_1(de_out)
self_attn, _self_scores = self.attn_1(de_nrm, de_nrm, de_nrm, de_mask)
de_out = de_out + self.dropout_1(self_attn)
de_nrm = self.norm_2(de_out)
en_attn, _en_scores = self.attn_2(de_nrm, en_out, en_out, en_mask)
de_out = de_out + self.dropout_2(en_attn)
de_nrm = self.norm_3(de_out)
de_out = de_out + self.dropout_3(self.ff(de_nrm))
return de_out
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4,
4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'emb_dim': 4, 'heads': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.nn as nn
import torch.nn.functional as F
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp2 - tmp10
tmp13 = tmp12 * tmp12
tmp14 = tmp3 - tmp10
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp10
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp7 - tmp10
tmp21 = tmp20 * tmp20
tmp22 = tmp19 + tmp21
tmp23 = 3.0
tmp24 = tmp22 / tmp23
tmp25 = libdevice.sqrt(tmp24)
tmp26 = 1e-06
tmp27 = tmp25 + tmp26
tmp28 = tmp11 / tmp27
tmp29 = tmp0 * tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + x2, tmp31, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_eq_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_div_masked_fill_3(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (4 * x0 + 16 * x2), xmask, eviction_policy=
'evict_last').to(tl.int1)
tmp1 = tl.load(in_ptr1 + 4 * x3, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * x2), xmask, eviction_policy
='evict_last').to(tl.int1)
tmp7 = tl.load(in_ptr1 + (1 + 4 * x3), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * x2), xmask,
eviction_policy='evict_last').to(tl.int1)
tmp12 = tl.load(in_ptr1 + (2 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * x2), xmask,
eviction_policy='evict_last').to(tl.int1)
tmp17 = tl.load(in_ptr1 + (3 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = -1000000000.0
tmp5 = tl.where(tmp0, tmp4, tmp3)
tmp8 = tmp7 * tmp2
tmp9 = tl.where(tmp6, tmp4, tmp8)
tmp10 = triton_helpers.maximum(tmp5, tmp9)
tmp13 = tmp12 * tmp2
tmp14 = tl.where(tmp11, tmp4, tmp13)
tmp15 = triton_helpers.maximum(tmp10, tmp14)
tmp18 = tmp17 * tmp2
tmp19 = tl.where(tmp16, tmp4, tmp18)
tmp20 = triton_helpers.maximum(tmp15, tmp19)
tmp21 = tmp5 - tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp9 - tmp20
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = tmp14 - tmp20
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tmp19 - tmp20
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp28 + tmp30
tl.store(out_ptr0 + x3, tmp20, xmask)
tl.store(out_ptr1 + x3, tmp31, xmask)
@triton.jit
def triton_poi_fused__softmax_div_masked_fill_4(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 64
x4 = xindex % 16
x5 = xindex
x6 = xindex // 4
tmp0 = tl.load(in_ptr0 + (x4 + 16 * x3), xmask, eviction_policy=
'evict_last').to(tl.int1)
tmp1 = tl.load(in_out_ptr0 + x5, xmask)
tmp6 = tl.load(in_ptr1 + x6, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + x6, xmask, eviction_policy='evict_last')
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = -1000000000.0
tmp5 = tl.where(tmp0, tmp4, tmp3)
tmp7 = tmp5 - tmp6
tmp8 = tl_math.exp(tmp7)
tmp10 = tmp8 / tmp9
tl.store(in_out_ptr0 + x5, tmp10, xmask)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_mean_std_6(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = 3.0
tmp29 = tmp27 / tmp28
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(in_out_ptr0 + x0, tmp29, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_7(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x2, xmask)
tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 - tmp4
tmp7 = libdevice.sqrt(tmp6)
tmp8 = 1e-06
tmp9 = tmp7 + tmp8
tmp10 = tmp5 / tmp9
tmp11 = tmp0 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_out_ptr0 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_9(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 2048
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, None)
tl.store(out_ptr1 + x2, tmp7, None)
@triton.jit
def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (4,), (1,))
assert_size_stride(primals_15, (4, 4), (4, 1))
assert_size_stride(primals_16, (4,), (1,))
assert_size_stride(primals_17, (4, 4), (4, 1))
assert_size_stride(primals_18, (4,), (1,))
assert_size_stride(primals_19, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_20, (4, 4), (4, 1))
assert_size_stride(primals_21, (4,), (1,))
assert_size_stride(primals_22, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_23, (4, 4), (4, 1))
assert_size_stride(primals_24, (4,), (1,))
assert_size_stride(primals_25, (4,), (1,))
assert_size_stride(primals_26, (4,), (1,))
assert_size_stride(primals_27, (2048, 4), (4, 1))
assert_size_stride(primals_28, (2048,), (1,))
assert_size_stride(primals_29, (4, 2048), (2048, 1))
assert_size_stride(primals_30, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_std_sub_0[grid(64)](primals_2,
primals_1, primals_3, buf0, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del primals_2
del primals_3
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_1[grid(16, 4)](buf1, primals_5, buf4, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf5 = reinterpret_tensor(buf1, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf1
triton_poi_fused_clone_1[grid(16, 4)](buf2, primals_7, buf5, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6)
buf7 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.bool)
triton_poi_fused_eq_2[grid(64)](primals_10, buf7, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_10
buf8 = reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf2
buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_div_masked_fill_3[grid(64)](buf7, buf6,
buf8, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf10 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf6
triton_poi_fused__softmax_div_masked_fill_4[grid(256)](buf10, buf7,
buf8, buf9, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf11 = reinterpret_tensor(buf9, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf9
triton_poi_fused_clone_1[grid(16, 4)](buf3, primals_9, buf11, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_9
buf12 = reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 1), 0)
del buf3
extern_kernels.bmm(reinterpret_tensor(buf10, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf11, (16, 4, 1), (4, 1, 0), 0), out=buf12)
buf13 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf8
triton_poi_fused_clone_5[grid(16, 4)](buf12, buf13, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf14 = reinterpret_tensor(buf12, (16, 4), (4, 1), 0)
del buf12
extern_kernels.addmm(primals_12, reinterpret_tensor(buf13, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf14)
del primals_12
buf15 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf16 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf17 = buf16
del buf16
triton_poi_fused_add_mean_std_6[grid(16)](buf17, primals_1, buf14,
buf15, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf18 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_std_sub_7[grid(64)](primals_13,
primals_1, buf14, buf15, buf17, primals_14, buf18, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del buf15
del buf17
del primals_14
buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf19)
buf20 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_19, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_17, (4, 4), (1, 4), 0), out=buf20)
del primals_17
buf21 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_19, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_20, (4, 4), (1, 4), 0), out=buf21)
del primals_20
buf22 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_1[grid(16, 4)](buf19, primals_16, buf22, 16,
4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_16
buf23 = reinterpret_tensor(buf19, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf19
triton_poi_fused_clone_1[grid(16, 4)](buf20, primals_18, buf23, 16,
4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_18
buf24 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf22, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf23, (16, 1, 4), (4, 0, 1), 0), out=buf24)
buf25 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.bool)
triton_poi_fused_eq_2[grid(64)](primals_22, buf25, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_22
buf26 = reinterpret_tensor(buf20, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf20
buf27 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_div_masked_fill_3[grid(64)](buf25, buf24,
buf26, buf27, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf28 = reinterpret_tensor(buf24, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf24
triton_poi_fused__softmax_div_masked_fill_4[grid(256)](buf28, buf25,
buf26, buf27, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf29 = reinterpret_tensor(buf27, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf27
triton_poi_fused_clone_1[grid(16, 4)](buf21, primals_21, buf29, 16,
4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_21
buf30 = reinterpret_tensor(buf21, (16, 4, 1), (4, 1, 1), 0)
del buf21
extern_kernels.bmm(reinterpret_tensor(buf28, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf29, (16, 4, 1), (4, 1, 0), 0), out=buf30)
buf31 = reinterpret_tensor(buf26, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf26
triton_poi_fused_clone_5[grid(16, 4)](buf30, buf31, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf32 = reinterpret_tensor(buf30, (16, 4), (4, 1), 0)
del buf30
extern_kernels.mm(reinterpret_tensor(buf31, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_23, (4, 4), (1, 4), 0), out=buf32)
buf33 = reinterpret_tensor(buf32, (4, 4, 4), (16, 4, 1), 0)
del buf32
triton_poi_fused_add_8[grid(64)](buf33, primals_1, buf14,
primals_24, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_24
buf34 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_std_sub_0[grid(64)](primals_25,
buf33, primals_26, buf34, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_26
buf35 = empty_strided_cuda((16, 2048), (2048, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf34, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_27, (4, 2048), (1, 4), 0), out=buf35)
buf36 = empty_strided_cuda((4, 4, 2048), (8192, 2048, 1), torch.bool)
buf37 = empty_strided_cuda((4, 4, 2048), (8192, 2048, 1), torch.float32
)
triton_poi_fused_leaky_relu_9[grid(32768)](buf35, primals_28, buf36,
buf37, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del buf35
del primals_28
buf38 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf37, (16, 2048), (2048, 1),
0), reinterpret_tensor(primals_29, (2048, 4), (1, 2048), 0),
out=buf38)
buf39 = reinterpret_tensor(buf38, (4, 4, 4), (16, 4, 1), 0)
del buf38
triton_poi_fused_add_10[grid(64)](buf39, buf33, primals_30, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_30
return buf39, primals_1, primals_13, primals_25, reinterpret_tensor(buf0,
(16, 4), (4, 1), 0), buf7, buf10, reinterpret_tensor(buf13, (16, 4),
(4, 1), 0), buf14, reinterpret_tensor(buf18, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_19, (16, 4), (4, 1), 0
), buf25, buf28, reinterpret_tensor(buf31, (16, 4), (4, 1), 0
), buf33, reinterpret_tensor(buf34, (16, 4), (4, 1), 0
), buf36, reinterpret_tensor(buf37, (16, 2048), (2048, 1), 0
), primals_29, primals_27, primals_23, reinterpret_tensor(buf29, (
16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf22, (16, 1, 4), (4,
1, 1), 0), reinterpret_tensor(buf23, (16, 4, 1), (4, 1, 4), 0
), primals_15, primals_11, reinterpret_tensor(buf11, (16, 1, 4), (4,
1, 1), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0
), primals_8, primals_6, primals_4
class Norm(nn.Module):
def __init__(self, emb_dim, eps=1e-06):
super().__init__()
self.size = emb_dim
self.alpha = nn.Parameter(torch.ones(self.size))
self.bias = nn.Parameter(torch.zeros(self.size))
self.eps = eps
def forward(self, x):
"""
inputs:
x: input of shape: (batch size, sequence length, embedding dimensions)
outputs: Scaled, normalized x
"""
norm = (x - x.mean(dim=-1, keepdim=True)) / (x.std(dim=-1, keepdim=
True) + self.eps)
norm = self.alpha * norm + self.bias
return norm
class MultiHeadAttention(nn.Module):
def __init__(self, num_heads, emb_dim, dim_k=None, dropout=0.1):
super().__init__()
self.emb_dim = emb_dim
self.dim_k = dim_k if dim_k else emb_dim // num_heads
self.num_heads = num_heads
self.q_linear = nn.Linear(emb_dim, self.dim_k * num_heads)
self.k_linear = nn.Linear(emb_dim, self.dim_k * num_heads)
self.v_linear = nn.Linear(emb_dim, self.dim_k * num_heads)
self.dropout = nn.Dropout(dropout)
self.out = nn.Linear(self.dim_k * num_heads, emb_dim)
def attention(self, q, k, v, dim_k, mask=None, dropout=None, explain=False
):
k = k.transpose(-2, -1)
if explain:
None
scores = torch.matmul(q, k) / math.sqrt(dim_k)
if explain:
None
if mask is not None:
mask = mask.unsqueeze(1)
if explain:
None
scores = scores.masked_fill(mask == 0, -1000000000.0)
softscores = F.softmax(scores, dim=-1)
if dropout is not None:
softscores = dropout(softscores)
output = torch.matmul(softscores, v)
return output, scores
def forward(self, q, k, v, mask=None, explain=False):
"""
inputs:
q has shape (batch size, q_sequence length, embedding dimensions)
k,v have shape (batch size, kv_sequence length, embedding dimensions)
mask of shape (batch size, 1, kv_sequence length)
explain: boolean, prints intermediate values if True
outputs: sequence of vectors, re-represented using attention
shape (batch size, q_sequence length, embedding dimensions)
use:
The encoder layer places the same source vector sequence into q,k,v
and mask into mask.
The decoder layer uses this twice, once with decoder inputs as q,k,v
and target mask as mask. then with decoder inputs as q, encoder outputs
as k, v and source mask as mask
"""
batch_size = q.size(0)
q = self.q_linear(q)
k = self.k_linear(k)
v = self.v_linear(v)
if explain:
None
k = k.view(batch_size, -1, self.num_heads, self.dim_k)
q = q.view(batch_size, -1, self.num_heads, self.dim_k)
v = v.view(batch_size, -1, self.num_heads, self.dim_k)
k = k.transpose(1, 2)
q = q.transpose(1, 2)
v = v.transpose(1, 2)
if explain:
None
attn, scores = self.attention(q, k, v, self.dim_k, mask, self.
dropout, explain)
if explain:
None
concat = attn.transpose(1, 2).contiguous().view(batch_size, -1,
self.dim_k * self.num_heads)
if explain:
None
output = self.out(concat)
if explain:
None
return output, scores
class FeedForward(nn.Module):
def __init__(self, emb_dim, ff_dim=2048, dropout=0.1):
super().__init__()
self.linear_1 = nn.Linear(emb_dim, ff_dim)
self.dropout = nn.Dropout(dropout)
self.linear_2 = nn.Linear(ff_dim, emb_dim)
def forward(self, x):
x = self.dropout(F.leaky_relu(self.linear_1(x)))
x = self.linear_2(x)
return x
class DecoderLayerNew(nn.Module):
def __init__(self, emb_dim, heads, dropout=0.1):
super().__init__()
self.norm_1 = Norm(emb_dim)
self.norm_2 = Norm(emb_dim)
self.norm_3 = Norm(emb_dim)
self.dropout_1 = nn.Dropout(dropout)
self.dropout_2 = nn.Dropout(dropout)
self.dropout_3 = nn.Dropout(dropout)
self.attn_1 = MultiHeadAttention(heads, emb_dim, dropout=dropout)
self.attn_2 = MultiHeadAttention(heads, emb_dim, dropout=dropout)
self.ff = FeedForward(emb_dim, dropout=dropout)
def forward(self, input_0, input_1, input_2, input_3):
primals_2 = self.norm_1.alpha
primals_3 = self.norm_1.bias
primals_5 = self.norm_2.alpha
primals_7 = self.norm_2.bias
primals_9 = self.norm_3.alpha
primals_12 = self.norm_3.bias
primals_4 = self.attn_1.q_linear.weight
primals_13 = self.attn_1.q_linear.bias
primals_6 = self.attn_1.k_linear.weight
primals_14 = self.attn_1.k_linear.bias
primals_8 = self.attn_1.v_linear.weight
primals_16 = self.attn_1.v_linear.bias
primals_11 = self.attn_1.out.weight
primals_18 = self.attn_1.out.bias
primals_15 = self.attn_2.q_linear.weight
primals_21 = self.attn_2.q_linear.bias
primals_17 = self.attn_2.k_linear.weight
primals_24 = self.attn_2.k_linear.bias
primals_20 = self.attn_2.v_linear.weight
primals_25 = self.attn_2.v_linear.bias
primals_23 = self.attn_2.out.weight
primals_26 = self.attn_2.out.bias
primals_27 = self.ff.linear_1.weight
primals_28 = self.ff.linear_1.bias
primals_29 = self.ff.linear_2.weight
primals_30 = self.ff.linear_2.bias
primals_1 = input_0
primals_10 = input_1
primals_19 = input_2
primals_22 = input_3
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30])
return output[0]
|
chandar-lab/CriticalGradientOptimization
|
DecoderLayer
| false
| 6,451
|
[
"MIT"
] | 1
|
1af4b1df40489991289bb50bb69859a00b2c97c6
|
https://github.com/chandar-lab/CriticalGradientOptimization/tree/1af4b1df40489991289bb50bb69859a00b2c97c6
|
logreg
|
import torch
import torch.nn as nn
import torch.utils.data
from torch.nn.utils import weight_norm
class logreg(nn.Module):
def __init__(self, input_size, classes):
super(logreg, self).__init__()
linear = nn.Linear(input_size, classes)
self.logistic_reg = weight_norm(linear, name='weight')
def forward(self, x):
return self.logistic_reg(x)
def predict(self, x):
return torch.sigmoid(self.forward(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'classes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils.data
from torch.nn.utils import weight_norm
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__weight_norm_interface_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = libdevice.sqrt(tmp10)
tl.store(out_ptr0 + x0, tmp11, xmask)
@triton.jit
def triton_poi_fused__weight_norm_interface_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 / tmp2
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 1), (1, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__weight_norm_interface_0[grid(4)](primals_2, buf0,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(16)](primals_2,
primals_1, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_4, (64,
4), (4, 1), 0), reinterpret_tensor(buf1, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_3
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), buf1, primals_1, primals_2, buf0, reinterpret_tensor(primals_4,
(64, 4), (4, 1), 0)
class logregNew(nn.Module):
def __init__(self, input_size, classes):
super(logregNew, self).__init__()
linear = nn.Linear(input_size, classes)
self.logistic_reg = weight_norm(linear, name='weight')
def predict(self, x):
return torch.sigmoid(self.forward(x))
def forward(self, input_0):
primals_3 = self.logistic_reg.bias
primals_1 = self.logistic_reg.weight_g
primals_2 = self.logistic_reg.weight_v
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
cjbumgardner/HE_for_Medical_Data
|
logreg
| false
| 6,452
|
[
"MIT"
] | 1
|
248dcd8b48924fe1f6edbeee4e16282d4a31069a
|
https://github.com/cjbumgardner/HE_for_Medical_Data/tree/248dcd8b48924fe1f6edbeee4e16282d4a31069a
|
affinity_loss
|
import torch
from torch import nn
class affinity_loss(nn.Module):
def __init__(self):
super(affinity_loss, self).__init__()
def forward(self, pixel_affinity, sal_affinity, sal_diff):
loss = torch.mean(pixel_affinity * (1 - sal_affinity)
) + 4 * torch.mean(sal_diff * sal_affinity)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mean_mul_rsub_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp8 = tl.load(in_ptr2 + r0, None)
tmp2 = 1.0
tmp3 = tmp2 - tmp1
tmp4 = tmp0 * tmp3
tmp5 = tl.broadcast_to(tmp4, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp9 = tmp8 * tmp1
tmp10 = tl.broadcast_to(tmp9, [RBLOCK])
tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0))
tmp13 = 256.0
tmp14 = tmp7 / tmp13
tmp15 = tmp12 / tmp13
tmp16 = 4.0
tmp17 = tmp15 * tmp16
tmp18 = tmp14 + tmp17
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp18, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_mean_mul_rsub_0[grid(1)](buf2, arg1_1, arg0_1,
arg2_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf2,
class affinity_lossNew(nn.Module):
def __init__(self):
super(affinity_lossNew, self).__init__()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
cj4L/DeepCO3-python
|
affinity_loss
| false
| 6,453
|
[
"MIT"
] | 1
|
fa28ed7b43a3a236d0cc7bf31ce9fd68c01b5888
|
https://github.com/cj4L/DeepCO3-python/tree/fa28ed7b43a3a236d0cc7bf31ce9fd68c01b5888
|
MulScalarNegative
|
import torch
import torch.nn as nn
from torch.quantization import QuantStub
from torch.quantization import DeQuantStub
class MulScalarNegative(nn.Module):
def __init__(self):
super().__init__()
self.float_op = nn.quantized.FloatFunctional()
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
mul = self.float_op.mul_scalar(x, -0.3)
return self.dequant(mul)
def fuse_model(self):
pass
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from torch.quantization import QuantStub
from torch.quantization import DeQuantStub
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = -0.3
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class MulScalarNegativeNew(nn.Module):
def __init__(self):
super().__init__()
self.float_op = nn.quantized.FloatFunctional()
self.quant = QuantStub()
self.dequant = DeQuantStub()
def fuse_model(self):
pass
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
cli99/tvm
|
MulScalarNegative
| false
| 6,454
|
[
"Apache-2.0"
] | 1
|
6c6e873a1325a32418108daad6e38f3df8c37660
|
https://github.com/cli99/tvm/tree/6c6e873a1325a32418108daad6e38f3df8c37660
|
GramMatrix
|
import torch
import torch.utils.data
import torch
import torch.nn as nn
class GramMatrix(nn.Module):
def forward(self, input):
b, c, h, w = input.size()
F = input.view(b, c, h * w)
G = torch.bmm(F, F.transpose(1, 2))
G.div_(h * w)
return G
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
import torch
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = 0.0625
tmp2 = tmp0 * tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 4, 16), (64, 16,
1), 0), reinterpret_tensor(arg0_1, (4, 16, 4), (64, 1, 16), 0),
out=buf0)
del arg0_1
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_div_0[grid(64)](buf1, 64, XBLOCK=64, num_warps=1,
num_stages=1)
return buf1,
class GramMatrixNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
ckxy/1d_expan
|
GramMatrix
| false
| 6,455
|
[
"MIT"
] | 1
|
29cc294e0314d738e8e041f34c995fd22f9f980b
|
https://github.com/ckxy/1d_expan/tree/29cc294e0314d738e8e041f34c995fd22f9f980b
|
GramMSELoss
|
import torch
import torch.utils.data
import torch
import torch.nn as nn
class GramMatrix(nn.Module):
def forward(self, input):
b, c, h, w = input.size()
F = input.view(b, c, h * w)
G = torch.bmm(F, F.transpose(1, 2))
G.div_(h * w)
return G
class GramMSELoss(nn.Module):
def forward(self, input, target):
out = nn.MSELoss()(GramMatrix()(input), target)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
import torch
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mse_loss_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex % 64
r2 = rindex
tmp0 = tl.load(in_ptr0 + r0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + r2, None)
tmp1 = 0.0625
tmp2 = tmp0 * tmp1
tmp4 = tmp2 - tmp3
tmp5 = tmp4 * tmp4
tmp6 = tl.broadcast_to(tmp5, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = 256.0
tmp10 = tmp8 / tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 4, 16), (64, 16,
1), 0), reinterpret_tensor(arg0_1, (4, 16, 4), (64, 1, 16), 0),
out=buf0)
del arg0_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_mse_loss_0[grid(1)](buf2, buf0, arg1_1, 1, 256,
num_warps=2, num_stages=1)
del arg1_1
del buf0
return buf2,
class GramMatrix(nn.Module):
def forward(self, input):
b, c, h, w = input.size()
F = input.view(b, c, h * w)
G = torch.bmm(F, F.transpose(1, 2))
G.div_(h * w)
return G
class GramMSELossNew(nn.Module):
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
ckxy/1d_expan
|
GramMSELoss
| false
| 6,456
|
[
"MIT"
] | 1
|
29cc294e0314d738e8e041f34c995fd22f9f980b
|
https://github.com/ckxy/1d_expan/tree/29cc294e0314d738e8e041f34c995fd22f9f980b
|
PlanarNormalizingFlow
|
import torch
import torch.nn.functional as F
from torch import nn
class PlanarNormalizingFlow(nn.Module):
"""
Planar normalizing flow [Rezende & Mohamed 2015].
Provides a tighter bound on the ELBO by giving more expressive
power to the approximate distribution, such as by introducing
covariance between terms.
"""
def __init__(self, in_features):
super(PlanarNormalizingFlow, self).__init__()
self.u = nn.Parameter(torch.randn(in_features))
self.w = nn.Parameter(torch.randn(in_features))
self.b = nn.Parameter(torch.ones(1))
def forward(self, z):
uw = torch.dot(self.u, self.w)
muw = -1 + F.softplus(uw)
uhat = self.u + (muw - uw) * torch.transpose(self.w, 0, -1
) / torch.sum(self.w ** 2)
zwb = torch.mv(z, self.w) + self.b
f_z = z + uhat.view(1, -1) * F.tanh(zwb).view(-1, 1)
psi = (1 - F.tanh(zwb) ** 2).view(-1, 1) * self.w.view(1, -1)
psi_u = torch.mv(psi, uhat)
logdet_jacobian = torch.log(torch.abs(1 + psi_u) + 1e-08)
return f_z, logdet_jacobian
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_features': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_add_div_dot_log_mul_mv_pow_softplus_sub_sum_0(
in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp10 = tl.load(in_ptr2 + 4 * r0, None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr1 + 0)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.load(in_ptr2 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + 1)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp19 = tl.load(in_ptr2 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr1 + 2)
tmp21 = tl.broadcast_to(tmp20, [XBLOCK, RBLOCK])
tmp24 = tl.load(in_ptr2 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr1 + 3)
tmp26 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK])
tmp29 = tl.load(in_ptr3 + 0)
tmp30 = tl.broadcast_to(tmp29, [XBLOCK, RBLOCK])
tmp37 = tl.load(in_ptr0 + 0)
tmp38 = tl.broadcast_to(tmp37, [XBLOCK, RBLOCK])
tmp52 = tl.load(in_ptr0 + 1)
tmp53 = tl.broadcast_to(tmp52, [XBLOCK, RBLOCK])
tmp60 = tl.load(in_ptr0 + 2)
tmp61 = tl.broadcast_to(tmp60, [XBLOCK, RBLOCK])
tmp68 = tl.load(in_ptr0 + 3)
tmp69 = tl.broadcast_to(tmp68, [XBLOCK, RBLOCK])
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.sum(tmp3, 1)[:, None]
tmp6 = tmp1 * tmp1
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.sum(tmp7, 1)[:, None]
tmp13 = tmp10 * tmp12
tmp17 = tmp14 * tmp16
tmp18 = tmp13 + tmp17
tmp22 = tmp19 * tmp21
tmp23 = tmp18 + tmp22
tmp27 = tmp24 * tmp26
tmp28 = tmp23 + tmp27
tmp31 = tmp28 + tmp30
tmp32 = libdevice.tanh(tmp31)
tmp33 = tmp32 * tmp32
tmp34 = 1.0
tmp35 = tmp34 - tmp33
tmp36 = tmp35 * tmp12
tmp39 = 20.0
tmp40 = tmp5 > tmp39
tmp41 = tl_math.exp(tmp5)
tmp42 = libdevice.log1p(tmp41)
tmp43 = tl.where(tmp40, tmp5, tmp42)
tmp44 = -1.0
tmp45 = tmp43 + tmp44
tmp46 = tmp45 - tmp5
tmp47 = tmp46 * tmp12
tmp48 = tmp47 / tmp9
tmp49 = tmp38 + tmp48
tmp50 = tmp36 * tmp49
tmp51 = tmp35 * tmp16
tmp54 = tmp46 * tmp16
tmp55 = tmp54 / tmp9
tmp56 = tmp53 + tmp55
tmp57 = tmp51 * tmp56
tmp58 = tmp50 + tmp57
tmp59 = tmp35 * tmp21
tmp62 = tmp46 * tmp21
tmp63 = tmp62 / tmp9
tmp64 = tmp61 + tmp63
tmp65 = tmp59 * tmp64
tmp66 = tmp58 + tmp65
tmp67 = tmp35 * tmp26
tmp70 = tmp46 * tmp26
tmp71 = tmp70 / tmp9
tmp72 = tmp69 + tmp71
tmp73 = tmp67 * tmp72
tmp74 = tmp66 + tmp73
tmp75 = tmp74 + tmp34
tmp76 = tl_math.abs(tmp75)
tmp77 = 1e-08
tmp78 = tmp76 + tmp77
tmp79 = tl_math.log(tmp78)
tl.store(out_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp31, None)
tl.store(in_out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp79, None)
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp5, None)
tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp9, None)
@triton.jit
def triton_poi_fused_add_mul_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp12 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr4 + 0)
tmp15 = tl.broadcast_to(tmp14, [XBLOCK])
tmp18 = tl.load(in_ptr5 + x1, xmask, eviction_policy='evict_last')
tmp4 = 20.0
tmp5 = tmp3 > tmp4
tmp6 = tl_math.exp(tmp3)
tmp7 = libdevice.log1p(tmp6)
tmp8 = tl.where(tmp5, tmp3, tmp7)
tmp9 = -1.0
tmp10 = tmp8 + tmp9
tmp11 = tmp10 - tmp3
tmp13 = tmp11 * tmp12
tmp16 = tmp13 / tmp15
tmp17 = tmp1 + tmp16
tmp19 = libdevice.tanh(tmp18)
tmp20 = tmp17 * tmp19
tmp21 = tmp0 + tmp20
tl.store(out_ptr0 + x2, tmp21, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = empty_strided_cuda((4,), (1,), torch.float32)
buf4 = empty_strided_cuda((4,), (1,), torch.float32)
buf5 = buf4
del buf4
get_raw_stream(0)
triton_per_fused_abs_add_div_dot_log_mul_mv_pow_softplus_sub_sum_0[grid
(1)](buf5, primals_1, primals_2, primals_3, primals_4, buf0,
buf1, buf2, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_mul_1[grid(16)](primals_3, primals_1, buf0,
primals_2, buf1, buf2, buf3, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del buf0
del buf1
del buf2
return buf3, buf5, primals_1, primals_2, primals_3, primals_4
class PlanarNormalizingFlowNew(nn.Module):
"""
Planar normalizing flow [Rezende & Mohamed 2015].
Provides a tighter bound on the ELBO by giving more expressive
power to the approximate distribution, such as by introducing
covariance between terms.
"""
def __init__(self, in_features):
super(PlanarNormalizingFlowNew, self).__init__()
self.u = nn.Parameter(torch.randn(in_features))
self.w = nn.Parameter(torch.randn(in_features))
self.b = nn.Parameter(torch.ones(1))
def forward(self, input_0):
primals_1 = self.u
primals_2 = self.w
primals_4 = self.b
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0], output[1]
|
chunglabmit/phathom
|
PlanarNormalizingFlow
| false
| 6,457
|
[
"MIT"
] | 1
|
304db7a95e898e9b03d6b2640172752d21a7e3ed
|
https://github.com/chunglabmit/phathom/tree/304db7a95e898e9b03d6b2640172752d21a7e3ed
|
poly
|
import torch
import numpy as np
import torch.nn as nn
import torch.utils.data
class poly(nn.Module):
"""Polynomial activation function.
degreelist: list of powers of the polynomial.
"""
def __init__(self, degreelist):
super(poly, self).__init__()
self.degreelist = degreelist
p = len(degreelist)
arr = np.ones(p, dtype=np.float32)
coeff = torch.nn.Parameter(torch.tensor(arr), requires_grad=True)
self.register_parameter('coefficients', coeff)
def forward(self, x):
out = [torch.pow(x, n) for n in self.degreelist]
shape = x.shape
out = torch.cat([j.reshape(*shape, 1) for j in out], dim=-1)
out = out * self.coefficients
out = out.sum(-1)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'degreelist': [4, 4]}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import numpy as np
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tmp5 * tmp5
tmp7 = tmp6 * tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp13 = tl.load(in_ptr0 + x1, tmp10 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp14 = tmp13 * tmp13
tmp15 = tmp14 * tmp14
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp10, tmp15, tmp16)
tmp18 = tl.where(tmp4, tmp9, tmp17)
tl.store(out_ptr0 + x2, tmp18, xmask)
@triton.jit
def triton_poi_fused_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 2 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr0 + (1 + 2 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + 1)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp3 = tmp0 * tmp2
tmp7 = tmp4 * tmp6
tmp8 = tmp3 + tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 2), (128, 32, 8, 2, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_1, buf0, 512, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sum_1[grid(256)](buf0, primals_2, buf1, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return buf1, buf0
class polyNew(nn.Module):
"""Polynomial activation function.
degreelist: list of powers of the polynomial.
"""
def __init__(self, degreelist):
super(polyNew, self).__init__()
self.degreelist = degreelist
p = len(degreelist)
arr = np.ones(p, dtype=np.float32)
coeff = torch.nn.Parameter(torch.tensor(arr), requires_grad=True)
self.register_parameter('coefficients', coeff)
def forward(self, input_0):
primals_2 = self.coefficients
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
cjbumgardner/HE_for_Medical_Data
|
poly
| false
| 6,458
|
[
"MIT"
] | 1
|
248dcd8b48924fe1f6edbeee4e16282d4a31069a
|
https://github.com/cjbumgardner/HE_for_Medical_Data/tree/248dcd8b48924fe1f6edbeee4e16282d4a31069a
|
GCN
|
from torch.nn import Module
import math
import torch
import numpy as np
import torch.nn as nn
from torch.nn.modules.module import Module
class GraphConvolution(Module):
def __init__(self, in_features, out_features, bias=True):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.FloatTensor(in_features, out_features)
)
if bias:
self.bias = nn.Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def glorot_init(self, input_dim, output_dim):
init_range = np.sqrt(6.0 / (input_dim + output_dim))
initial = torch.rand(input_dim, output_dim
) * 2 * init_range - init_range
return nn.Parameter(initial / 2)
def forward(self, input, adj):
support = torch.mm(input, self.weight)
output = torch.spmm(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
class GCN(nn.Module):
def __init__(self, nfeat, nhid, dropout, alpha):
super(GCN, self).__init__()
self.gc1 = GraphConvolution(nfeat, nhid)
self.dropout = dropout
self.leakyrelu = nn.LeakyReLU(alpha)
def forward(self, x, adj):
x = self.leakyrelu(self.gc1(x, adj))
return x
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'nfeat': 4, 'nhid': 4, 'dropout': 0.5, 'alpha': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.nn import Module
import math
import numpy as np
import torch.nn as nn
from torch.nn.modules.module import Module
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 4.0
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_2, primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_3, buf0, out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
buf3 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_add_leaky_relu_0[grid(16)](buf1, primals_4, buf2,
buf3, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf1
del primals_4
return buf3, buf2, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0
), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0)
class GraphConvolution(Module):
def __init__(self, in_features, out_features, bias=True):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.FloatTensor(in_features, out_features)
)
if bias:
self.bias = nn.Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def glorot_init(self, input_dim, output_dim):
init_range = np.sqrt(6.0 / (input_dim + output_dim))
initial = torch.rand(input_dim, output_dim
) * 2 * init_range - init_range
return nn.Parameter(initial / 2)
def forward(self, input, adj):
support = torch.mm(input, self.weight)
output = torch.spmm(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
class GCNNew(nn.Module):
def __init__(self, nfeat, nhid, dropout, alpha):
super(GCNNew, self).__init__()
self.gc1 = GraphConvolution(nfeat, nhid)
self.dropout = dropout
self.leakyrelu = nn.LeakyReLU(alpha)
def forward(self, input_0, input_1):
primals_1 = self.gc1.weight
primals_4 = self.gc1.bias
primals_2 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
cjx96/CDRIB
|
GCN
| false
| 6,459
|
[
"MIT"
] | 1
|
e0d2d2b70ec195a76b479b94fb7758d286350c39
|
https://github.com/cjx96/CDRIB/tree/e0d2d2b70ec195a76b479b94fb7758d286350c39
|
SafeLength
|
import torch
from torch import nn
class SafeLength(nn.Module):
def __init__(self, dim=2, keepdim=False, eps=1e-07):
super(SafeLength, self).__init__()
self.dim = dim
self.keepdim = keepdim
self.eps = eps
def forward(self, x):
squared_norm = torch.sum(torch.square(x), axis=self.dim, keepdim=
self.keepdim)
return torch.sqrt(squared_norm + self.eps)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_pow_sqrt_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp8 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = 1e-07
tmp12 = tmp10 + tmp11
tmp13 = libdevice.sqrt(tmp12)
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_pow_sqrt_sum_0[grid(64)](arg0_1, buf0, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
return buf0,
class SafeLengthNew(nn.Module):
def __init__(self, dim=2, keepdim=False, eps=1e-07):
super(SafeLengthNew, self).__init__()
self.dim = dim
self.keepdim = keepdim
self.eps = eps
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
clementpoiret/3D-AGSCaps
|
SafeLength
| false
| 6,460
|
[
"MIT"
] | 1
|
475eb1915bc1425cebbd0bec36e9096c9c2cb53c
|
https://github.com/clementpoiret/3D-AGSCaps/tree/475eb1915bc1425cebbd0bec36e9096c9c2cb53c
|
StatsPool
|
import torch
import warnings
import torch.nn as nn
from typing import Optional
import torch.optim
import torch.nn.functional as F
class StatsPool(nn.Module):
"""Statistics pooling
Compute temporal mean and (unbiased) standard deviation
and returns their concatenation.
Reference
---------
https://en.wikipedia.org/wiki/Weighted_arithmetic_mean
"""
def forward(self, sequences: 'torch.Tensor', weights:
'Optional[torch.Tensor]'=None) ->torch.Tensor:
"""Forward pass
Parameters
----------
sequences : (batch, channel, frames) torch.Tensor
Sequences.
weights : (batch, frames) torch.Tensor, optional
When provided, compute weighted mean and standard deviation.
Returns
-------
output : (batch, 2 * channel) torch.Tensor
Concatenation of mean and (unbiased) standard deviation.
"""
if weights is None:
mean = sequences.mean(dim=2)
std = sequences.std(dim=2, unbiased=True)
else:
weights = weights.unsqueeze(dim=1)
num_frames = sequences.shape[2]
num_weights = weights.shape[2]
if num_frames != num_weights:
warnings.warn(
f'Mismatch between frames ({num_frames}) and weights ({num_weights}) numbers.'
)
weights = F.interpolate(weights, size=num_frames, mode=
'linear', align_corners=False)
v1 = weights.sum(dim=2)
mean = torch.sum(sequences * weights, dim=2) / v1
dx2 = torch.square(sequences - mean.unsqueeze(2))
v2 = torch.square(weights).sum(dim=2)
var = torch.sum(dx2 * weights, dim=2) / (v1 - v2 / v1)
std = torch.sqrt(var)
return torch.cat([mean, std], dim=1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 8
x0 = xindex % 4
x2 = xindex // 32
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tl.load(in_ptr0 + (4 + x0 + 16 * x1 + 64 * x2), tmp4 & xmask,
other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.load(in_ptr0 + (8 + x0 + 16 * x1 + 64 * x2), tmp4 & xmask,
other=0.0)
tmp9 = tmp7 + tmp8
tmp10 = tl.load(in_ptr0 + (12 + x0 + 16 * x1 + 64 * x2), tmp4 & xmask,
other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp4, tmp13, tmp14)
tmp16 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp19 = tl.load(in_ptr0 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp16 &
xmask, other=0.0)
tmp20 = tl.load(in_ptr0 + (4 + x0 + 16 * (-4 + x1) + 64 * x2), tmp16 &
xmask, other=0.0)
tmp21 = tmp19 + tmp20
tmp22 = tl.load(in_ptr0 + (8 + x0 + 16 * (-4 + x1) + 64 * x2), tmp16 &
xmask, other=0.0)
tmp23 = tmp21 + tmp22
tmp24 = tl.load(in_ptr0 + (12 + x0 + 16 * (-4 + x1) + 64 * x2), tmp16 &
xmask, other=0.0)
tmp25 = tmp23 + tmp24
tmp26 = tmp25 / tmp12
tmp27 = tmp19 - tmp26
tmp28 = tmp27 * tmp27
tmp29 = tmp20 - tmp26
tmp30 = tmp29 * tmp29
tmp31 = tmp28 + tmp30
tmp32 = tmp22 - tmp26
tmp33 = tmp32 * tmp32
tmp34 = tmp31 + tmp33
tmp35 = tmp24 - tmp26
tmp36 = tmp35 * tmp35
tmp37 = tmp34 + tmp36
tmp38 = 3.0
tmp39 = tmp37 / tmp38
tmp40 = libdevice.sqrt(tmp39)
tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype)
tmp42 = tl.where(tmp16, tmp40, tmp41)
tmp43 = tl.where(tmp4, tmp15, tmp42)
tl.store(out_ptr0 + x3, tmp43, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(128)](arg0_1, buf0, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class StatsPoolNew(nn.Module):
"""Statistics pooling
Compute temporal mean and (unbiased) standard deviation
and returns their concatenation.
Reference
---------
https://en.wikipedia.org/wiki/Weighted_arithmetic_mean
"""
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
clmpt/pyannote-audio
|
StatsPool
| false
| 6,461
|
[
"MIT"
] | 1
|
7d1b7959ca5f817e08176e44d52a7499bbd3149c
|
https://github.com/clmpt/pyannote-audio/tree/7d1b7959ca5f817e08176e44d52a7499bbd3149c
|
UpsamplingBilinear
|
import torch
import torch.nn as nn
from torch.quantization import QuantStub
from torch.quantization import DeQuantStub
class UpsamplingBilinear(nn.Module):
def __init__(self):
super().__init__()
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
upsample = nn.functional.interpolate(x, scale_factor=2, mode=
'bilinear', align_corners=True)
return self.dequant(upsample)
def fuse_model(self):
pass
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from torch.quantization import QuantStub
from torch.quantization import DeQuantStub
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(
in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 8 % 8
x0 = xindex % 8
x2 = xindex // 64
x4 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.42857142857142855
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 3, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tmp11 = x0
tmp12 = tmp11.to(tl.float32)
tmp13 = tmp12 * tmp2
tmp14 = triton_helpers.maximum(tmp13, tmp4)
tmp15 = tmp14.to(tl.int32)
tmp16 = tl.load(in_ptr0 + (tmp15 + 4 * tmp10 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp17 = tmp15 + tmp7
tmp18 = triton_helpers.minimum(tmp17, tmp9)
tmp19 = tl.load(in_ptr0 + (tmp18 + 4 * tmp10 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp20 = tmp19 - tmp16
tmp21 = tmp15.to(tl.float32)
tmp22 = tmp14 - tmp21
tmp23 = triton_helpers.maximum(tmp22, tmp4)
tmp24 = 1.0
tmp25 = triton_helpers.minimum(tmp23, tmp24)
tmp26 = tmp20 * tmp25
tmp27 = tmp16 + tmp26
tmp28 = tl.load(in_ptr0 + (tmp15 + 4 * tmp6 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (tmp18 + 4 * tmp6 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp30 = tmp29 - tmp28
tmp31 = tmp30 * tmp25
tmp32 = tmp28 + tmp31
tmp33 = tmp27 - tmp32
tmp34 = tmp6.to(tl.float32)
tmp35 = tmp5 - tmp34
tmp36 = triton_helpers.maximum(tmp35, tmp4)
tmp37 = triton_helpers.minimum(tmp36, tmp24)
tmp38 = tmp33 * tmp37
tmp39 = tmp32 + tmp38
tl.store(in_out_ptr0 + x4, tmp39, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0[grid
(1024)](buf1, arg0_1, 1024, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf1,
class UpsamplingBilinearNew(nn.Module):
def __init__(self):
super().__init__()
self.quant = QuantStub()
self.dequant = DeQuantStub()
def fuse_model(self):
pass
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
cli99/tvm
|
UpsamplingBilinear
| false
| 6,462
|
[
"Apache-2.0"
] | 1
|
6c6e873a1325a32418108daad6e38f3df8c37660
|
https://github.com/cli99/tvm/tree/6c6e873a1325a32418108daad6e38f3df8c37660
|
BinaryDiceLoss
|
import torch
import torch.nn as nn
class BinaryDiceLoss(nn.Module):
"""Dice loss of binary class
Args:
smooth: A float number to smooth loss, and avoid NaN error, default: 1
p: Denominator value: \\sum{x^p} + \\sum{y^p}, default: 2
predict: A tensor of shape [N, *]
target: A tensor of shape same with predict
Returns:
Loss tensor according to arg reduction
Raise:
Exception if unexpected reduction
"""
def __init__(self, smooth=1, p=2):
super(BinaryDiceLoss, self).__init__()
self.smooth = smooth
self.p = p
def forward(self, predict, target):
assert predict.shape[0] == target.shape[0
], "predict & target batch size don't match"
predict = predict.contiguous().view(predict.shape[0], -1)
target = target.contiguous().view(target.shape[0], -1)
num = torch.sum(torch.mul(predict, target)) * 2 + self.smooth
den = torch.sum(predict.pow(self.p) + target.pow(self.p)) + self.smooth
dice = num / den
loss = 1 - dice
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mul_pow_rsub_sum_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tmp0 * tmp0
tmp7 = tmp1 * tmp1
tmp8 = tmp6 + tmp7
tmp9 = tl.broadcast_to(tmp8, [RBLOCK])
tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0))
tmp12 = 2.0
tmp13 = tmp5 * tmp12
tmp14 = 1.0
tmp15 = tmp13 + tmp14
tmp16 = tmp11 + tmp14
tmp17 = tmp15 / tmp16
tmp18 = tmp14 - tmp17
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp18, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mul_pow_rsub_sum_0[grid(1)](buf2, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf2,
class BinaryDiceLossNew(nn.Module):
"""Dice loss of binary class
Args:
smooth: A float number to smooth loss, and avoid NaN error, default: 1
p: Denominator value: \\sum{x^p} + \\sum{y^p}, default: 2
predict: A tensor of shape [N, *]
target: A tensor of shape same with predict
Returns:
Loss tensor according to arg reduction
Raise:
Exception if unexpected reduction
"""
def __init__(self, smooth=1, p=2):
super(BinaryDiceLossNew, self).__init__()
self.smooth = smooth
self.p = p
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
cnuzh/CSNet
|
BinaryDiceLoss
| false
| 6,463
|
[
"MIT"
] | 1
|
a6c3163624f55dc294ec2e5a6de020d77bd4ff91
|
https://github.com/cnuzh/CSNet/tree/a6c3163624f55dc294ec2e5a6de020d77bd4ff91
|
BERTMultSelfOutput
|
from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
class BERTLayerNorm(nn.Module):
def __init__(self, config, multi_params=None, variance_epsilon=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BERTLayerNorm, self).__init__()
if multi_params is not None:
self.gamma = nn.Parameter(torch.ones(config.hidden_size_aug))
self.beta = nn.Parameter(torch.zeros(config.hidden_size_aug))
else:
self.gamma = nn.Parameter(torch.ones(config.hidden_size))
self.beta = nn.Parameter(torch.zeros(config.hidden_size))
self.variance_epsilon = variance_epsilon
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.gamma * x + self.beta
class BERTMultSelfOutput(nn.Module):
def __init__(self, config, multi_params=None):
super(BERTMultSelfOutput, self).__init__()
self.LayerNorm = BERTLayerNorm(config, multi_params)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, hidden_dropout_prob=
0.5)}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mean_pow_sub_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_sqrt_sub_1(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-12
tmp7 = tmp5 + tmp6
tmp8 = libdevice.sqrt(tmp7)
tmp9 = tmp4 / tmp8
tmp11 = tmp10 * tmp9
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp9, xmask)
tl.store(out_ptr1 + x2, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mean_pow_sub_0[grid(64)](primals_1, primals_2,
buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_sqrt_sub_1[grid(256)](primals_1,
primals_2, buf0, buf1, primals_3, primals_4, buf2, buf3, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del buf1
del primals_1
del primals_2
del primals_3
del primals_4
return buf3, buf2
class BERTLayerNorm(nn.Module):
def __init__(self, config, multi_params=None, variance_epsilon=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BERTLayerNorm, self).__init__()
if multi_params is not None:
self.gamma = nn.Parameter(torch.ones(config.hidden_size_aug))
self.beta = nn.Parameter(torch.zeros(config.hidden_size_aug))
else:
self.gamma = nn.Parameter(torch.ones(config.hidden_size))
self.beta = nn.Parameter(torch.zeros(config.hidden_size))
self.variance_epsilon = variance_epsilon
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.gamma * x + self.beta
class BERTMultSelfOutputNew(nn.Module):
def __init__(self, config, multi_params=None):
super(BERTMultSelfOutputNew, self).__init__()
self.LayerNorm = BERTLayerNorm(config, multi_params)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_0, input_1):
primals_3 = self.LayerNorm.gamma
primals_4 = self.LayerNorm.beta
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
DAQuestionAnswering/Bert-n-Pals
|
BERTMultSelfOutput
| false
| 6,464
|
[
"MIT"
] | 1
|
d5a288b9ac62259e70c249635108ba3906e19f00
|
https://github.com/DAQuestionAnswering/Bert-n-Pals/tree/d5a288b9ac62259e70c249635108ba3906e19f00
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.