entry_point
stringlengths 1
65
| original_triton_python_code
stringlengths 208
619k
| optimised_triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
listlengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
|
|---|---|---|---|---|---|---|---|---|---|---|
Network
|
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.nn.init as I
class Network(nn.Module):
"""
Q-network
"""
def __init__(self, state_size, action_size, seed, fc1_units=64,
fc2_units=32):
"""
Build model and Intialize it
Params
======
state_size (int) : State space size
action_size (int) : Action space size
seed (int) : Random seed
fc1_unit (int)
fc2_unit (int)
"""
super(Network, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, fc1_units)
self.fc2 = nn.Linear(fc1_units, fc2_units)
self.fc3 = nn.Linear(fc2_units, action_size)
self.reset_parameters()
def reset_parameters(self):
"""
Initialize parameters of the layers
xavier_normal is used.
See "Understanding the difficulty of training deep feedforward neural networks" - Glorot, X. & Bengio, Y. (2010) for details.
"""
for m in self.modules():
if isinstance(m, nn.Linear):
I.xavier_normal_(m.weight)
def forward(self, state):
"""
Forward pass state -> action
Params
======
state (Torch Tensor) [batch_size, state_size]: state vector
Returns
======
actions (Torch Tensor) [batch_size, action_size]: action values
"""
x = F.relu(self.fc1(state))
x = F.relu(self.fc2(x))
actions = F.relu(self.fc3(x))
return actions
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_size': 4, 'action_size': 4, 'seed': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.init as I
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (64, 4), (4, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (32, 64), (64, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (4, 32), (32, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf0
buf8 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool
)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(4096)](buf1,
primals_2, buf8, 4096, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 32), (32, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0),
reinterpret_tensor(primals_4, (64, 32), (1, 64), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 32), (512, 128, 32, 1), 0)
del buf2
buf7 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(2048)](buf3,
primals_5, buf7, 2048, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 32), (32, 1), 0),
reinterpret_tensor(primals_6, (32, 4), (1, 32), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(256)](buf5,
primals_7, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(
buf3, (64, 32), (32, 1), 0), buf6, primals_6, buf7, primals_4, buf8
class NetworkNew(nn.Module):
"""
Q-network
"""
def __init__(self, state_size, action_size, seed, fc1_units=64,
fc2_units=32):
"""
Build model and Intialize it
Params
======
state_size (int) : State space size
action_size (int) : Action space size
seed (int) : Random seed
fc1_unit (int)
fc2_unit (int)
"""
super(NetworkNew, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, fc1_units)
self.fc2 = nn.Linear(fc1_units, fc2_units)
self.fc3 = nn.Linear(fc2_units, action_size)
self.reset_parameters()
def reset_parameters(self):
"""
Initialize parameters of the layers
xavier_normal is used.
See "Understanding the difficulty of training deep feedforward neural networks" - Glorot, X. & Bengio, Y. (2010) for details.
"""
for m in self.modules():
if isinstance(m, nn.Linear):
I.xavier_normal_(m.weight)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
tae-yeop/Udacity_DRLND_navigation
|
Network
| false
| 10,839
|
[
"MIT"
] | 0
|
dd4a4609c5fe3e00cb4deea3ebd9922dd0772447
|
https://github.com/tae-yeop/Udacity_DRLND_navigation/tree/dd4a4609c5fe3e00cb4deea3ebd9922dd0772447
|
AffineGridGen
|
from torch.nn import Module
import torch
import torch.nn.functional as F
import torch.nn
from torch.nn.modules.module import Module
class AffineGridGen(Module):
def __init__(self, out_h=240, out_w=240, out_ch=3, use_cuda=True):
super(AffineGridGen, self).__init__()
self.out_h = out_h
self.out_w = out_w
self.out_ch = out_ch
def forward(self, theta):
b = theta.size()[0]
if not theta.size() == (b, 2, 3):
theta = theta.view(-1, 2, 3)
theta = theta.contiguous()
batch_size = theta.size()[0]
out_size = torch.Size((batch_size, self.out_ch, self.out_h, self.out_w)
)
return F.affine_grid(theta, out_size)
def get_inputs():
return [torch.rand([4, 2, 3])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.nn import Module
import torch.nn
from torch.nn.modules.module import Module
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_affine_grid_generator_0(out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 172800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x1 = xindex // 3 % 240
x2 = xindex // 720
x5 = xindex
tmp0 = x0
tmp1 = tl.full([1], 1, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = x1
tmp4 = tmp3.to(tl.float32)
tmp5 = 120.0
tmp6 = tmp4 < tmp5
tmp7 = 0.008333333333333333
tmp8 = tmp4 * tmp7
tmp9 = -0.9958333333333333
tmp10 = tmp8 + tmp9
tmp11 = 239 + -1 * x1
tmp12 = tmp11.to(tl.float32)
tmp13 = tmp12 * tmp7
tmp14 = 0.9958333333333333
tmp15 = tmp14 - tmp13
tmp16 = tl.where(tmp6, tmp10, tmp15)
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp2, tmp16, tmp17)
tmp19 = -1 + x0
tmp20 = tl.full([1], 0, tl.int64)
tmp21 = tmp19 >= tmp20
tmp22 = tmp19 < tmp1
tmp23 = tmp21 & tmp22
tmp24 = x2
tmp25 = tmp24.to(tl.float32)
tmp26 = tmp25 < tmp5
tmp27 = tmp25 * tmp7
tmp28 = tmp27 + tmp9
tmp29 = 239 + -1 * x2
tmp30 = tmp29.to(tl.float32)
tmp31 = tmp30 * tmp7
tmp32 = tmp14 - tmp31
tmp33 = tl.where(tmp26, tmp28, tmp32)
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp23, tmp33, tmp34)
tmp36 = tmp18 + tmp35
tmp37 = -2 + x0
tmp38 = tmp37 >= tmp20
tmp39 = 1.0
tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype)
tmp41 = tl.where(tmp38, tmp39, tmp40)
tmp42 = tmp36 + tmp41
tl.store(out_ptr0 + x5, tmp42, xmask)
@triton.jit
def triton_poi_fused_affine_grid_generator_1(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 2 % 57600
x0 = xindex % 2
x2 = xindex // 115200
x3 = xindex
tmp0 = tl.load(in_ptr0 + 3 * x1, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (3 * x0 + 6 * x2), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 3 * x1), None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 3 * x0 + 6 * x2), None, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 3 * x1), None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 3 * x0 + 6 * x2), None, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tl.store(out_ptr0 + x3, tmp10, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 2, 3), (6, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((240, 240, 3), (720, 3, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_affine_grid_generator_0[grid(172800)](buf1, 172800,
XBLOCK=512, num_warps=8, num_stages=1)
buf2 = empty_strided_cuda((4, 57600, 2), (115200, 2, 1), torch.float32)
triton_poi_fused_affine_grid_generator_1[grid(460800)](buf1, arg0_1,
buf2, 460800, XBLOCK=1024, num_warps=4, num_stages=1)
del arg0_1
del buf1
return reinterpret_tensor(buf2, (4, 240, 240, 2), (115200, 480, 2, 1), 0),
class AffineGridGenNew(Module):
def __init__(self, out_h=240, out_w=240, out_ch=3, use_cuda=True):
super(AffineGridGenNew, self).__init__()
self.out_h = out_h
self.out_w = out_w
self.out_ch = out_ch
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
sebastian-echeverria/ncnet
|
AffineGridGen
| false
| 10,840
|
[
"MIT"
] | 0
|
c7249fe8f908813bab6443ebfa4590bd362a0dc2
|
https://github.com/sebastian-echeverria/ncnet/tree/c7249fe8f908813bab6443ebfa4590bd362a0dc2
|
BahdanauAttn
|
import torch
import torch.nn as nn
class BahdanauAttn(nn.Module):
"""Bahdabau attention mechanism"""
def __init__(self, size):
super(BahdanauAttn, self).__init__()
self.query_layer = nn.Linear(size, size, bias=False)
self.tanh = nn.Tanh()
self.v = nn.Linear(size, 1, bias=False)
def forward(self, query, memory):
"""
Args:
query: (batch_size, 1, size) or (batch_size, size)
memory: (batch_size, timesteps, size)
"""
if query.dim() == 2:
query = query.unsqueeze(1)
Q = self.query_layer(query)
K = memory
alignment = self.v(self.tanh(Q + K))
alignment = alignment.squeeze(-1)
return alignment
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_add_tanh_0[grid(256)](buf1, primals_3, 256, XBLOCK
=128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), out=buf2)
return reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf1, primals_4
class BahdanauAttnNew(nn.Module):
"""Bahdabau attention mechanism"""
def __init__(self, size):
super(BahdanauAttnNew, self).__init__()
self.query_layer = nn.Linear(size, size, bias=False)
self.tanh = nn.Tanh()
self.v = nn.Linear(size, 1, bias=False)
def forward(self, input_0, input_1):
primals_2 = self.query_layer.weight
primals_4 = self.v.weight
primals_1 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
seo3650/Tacotron-pytorch
|
BahdanauAttn
| false
| 10,841
|
[
"MIT"
] | 0
|
223e4f39a3624c409484a1ad55edab1563cf8c87
|
https://github.com/seo3650/Tacotron-pytorch/tree/223e4f39a3624c409484a1ad55edab1563cf8c87
|
StackTime
|
import torch
import torch.onnx
class StackTime(torch.nn.Module):
__constants__ = ['factor']
def __init__(self, factor):
super().__init__()
self.factor = int(factor)
def forward(self, x, x_lens):
seq = [x]
for i in range(1, self.factor):
tmp = torch.zeros_like(x)
tmp[:-i, :, :] = x[i:, :, :]
seq.append(tmp)
x_lens = torch.ceil(x_lens.float() / self.factor).int()
return torch.cat(seq, dim=2)[::self.factor, :, :], x_lens
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'factor': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 16
x0 = xindex % 4
x4 = xindex // 64
x3 = xindex // 256
x2 = xindex // 64 % 4
x5 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x4), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = x3
tmp11 = tl.full([1], 3, tl.int64)
tmp12 = tmp10 < tmp11
tmp13 = tmp12 & tmp9
tmp14 = tl.load(in_ptr0 + (64 + x0 + 4 * (-4 + x1) + 16 * x4), tmp13 &
xmask, other=0.0)
tmp15 = 0.0
tmp16 = tl.where(tmp12, tmp14, tmp15)
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp9, tmp16, tmp17)
tmp19 = tmp0 >= tmp7
tmp20 = tl.full([1], 12, tl.int64)
tmp21 = tmp0 < tmp20
tmp22 = tmp19 & tmp21
tmp23 = tl.full([1], 2, tl.int64)
tmp24 = tmp10 < tmp23
tmp25 = tmp24 & tmp22
tmp26 = tl.load(in_ptr0 + (128 + x0 + 4 * (-8 + x1) + 16 * x4), tmp25 &
xmask, other=0.0)
tmp27 = tl.where(tmp24, tmp26, tmp15)
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp22, tmp27, tmp28)
tmp30 = tmp0 >= tmp20
tl.full([1], 16, tl.int64)
tmp33 = tl.full([1], 1, tl.int64)
tmp34 = tmp10 < tmp33
tmp35 = tmp34 & tmp30
tmp36 = tl.load(in_ptr0 + (192 + x0 + 4 * (-12 + x1) + 16 * x2), tmp35 &
xmask, eviction_policy='evict_last', other=0.0)
tmp37 = tl.where(tmp34, tmp36, tmp15)
tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype)
tmp39 = tl.where(tmp30, tmp37, tmp38)
tmp40 = tl.where(tmp22, tmp29, tmp39)
tmp41 = tl.where(tmp9, tmp18, tmp40)
tmp42 = tl.where(tmp4, tmp5, tmp41)
tl.store(out_ptr0 + x5, tmp42, xmask)
@triton.jit
def triton_poi_fused__to_copy_ceil_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.25
tmp2 = tmp0 * tmp1
tmp3 = libdevice.ceil(tmp2)
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 16, 4), (256, 64, 4, 1), torch.float32
)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(1024)](arg0_1, buf0, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int32)
triton_poi_fused__to_copy_ceil_div_1[grid(256)](arg1_1, buf1, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg1_1
return reinterpret_tensor(buf0, (1, 4, 16, 4), (1024, 64, 4, 1), 0), buf1
class StackTimeNew(torch.nn.Module):
__constants__ = ['factor']
def __init__(self, factor):
super().__init__()
self.factor = int(factor)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0], output[1]
|
swiftdiaries/inference
|
StackTime
| false
| 10,842
|
[
"Apache-2.0"
] | 0
|
dbb39947d4515449b1a3393cde39ca0dba935b1d
|
https://github.com/swiftdiaries/inference/tree/dbb39947d4515449b1a3393cde39ca0dba935b1d
|
ShiftedConv
|
import math
import torch
import torch.nn as nn
from numpy import prod
def getLayerNormalizationFactor(x):
"""
Get He's constant for the given layer
https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf
"""
size = x.weight.size()
fan_in = prod(size[1:])
return math.sqrt(2.0 / fan_in)
class ConstrainedLayer(nn.Module):
"""
A handy refactor that allows the user to:
- initialize one layer's bias to zero
- apply He's initialization at runtime
"""
def __init__(self, module, equalized=True, lrMul=1.0, initBiasToZero=True):
"""
equalized (bool): if true, the layer's weight should evolve within
the range (-1, 1)
initBiasToZero (bool): if true, bias will be initialized to zero
"""
super(ConstrainedLayer, self).__init__()
self.module = module
self.equalized = equalized
if initBiasToZero and module.bias is not None:
self.module.bias.data.fill_(0)
if self.equalized:
self.module.weight.data.normal_(0, 1)
self.weight = getLayerNormalizationFactor(self.module) * lrMul
def forward(self, x):
x = self.module(x)
if self.equalized:
x *= self.weight
return x
class EqualizedConv1d(ConstrainedLayer):
def __init__(self, nChannelsPrevious, nChannels, kernelSize, padding=0,
bias=True, stride=1, **kwargs):
"""
A nn.Conv2d module with specific constraints
Args:
nChannelsPrevious (int): number of channels in the previous layer
nChannels (int): number of channels of the current layer
kernelSize (int): size of the convolutional kernel
padding (int): convolution's padding
bias (bool): with bias ?
"""
ConstrainedLayer.__init__(self, nn.Conv1d(nChannelsPrevious,
nChannels, kernelSize, padding=padding, bias=bias, stride=
stride), **kwargs)
class ShiftedConv(nn.Module):
def __init__(self, dimOutputAR, dimOutputEncoder, kernelSize):
super(ShiftedConv, self).__init__()
self.module = EqualizedConv1d(dimOutputAR, dimOutputEncoder,
kernelSize, equalized=True, padding=0)
self.kernelSize = kernelSize
def forward(self, x):
N, _S, C = x.size()
x = x.permute(0, 2, 1)
padding = torch.zeros(N, C, self.kernelSize - 1, device=x.device)
x = torch.cat([padding, x], dim=2)
x = self.module(x)
x = x.permute(0, 2, 1)
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dimOutputAR': 4, 'dimOutputEncoder': 4, 'kernelSize': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import torch.nn as nn
from numpy import prod
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 112
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 7
x1 = xindex // 7 % 4
x2 = xindex // 28
x3 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 3, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 0.0
tmp6 = tl.full(tmp5.shape, 0.0, tmp5.dtype)
tmp7 = tl.where(tmp4, tmp5, tmp6)
tmp8 = tmp0 >= tmp3
tl.full([1], 7, tl.int64)
tmp11 = tl.load(in_ptr0 + (x1 + 4 * (-3 + x0) + 16 * x2), tmp8 & xmask,
eviction_policy='evict_last', other=0.0)
tmp12 = tl.where(tmp4, tmp7, tmp11)
tl.store(out_ptr0 + x3, tmp12, xmask)
@triton.jit
def triton_poi_fused_convolution_mul_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.3535533905932738
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x3, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 7), (28, 7, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(112)](primals_1, buf0, 112, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4), (16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_mul_1[grid(64)](buf2, primals_3, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
return reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), primals_2, buf0
def getLayerNormalizationFactor(x):
"""
Get He's constant for the given layer
https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf
"""
size = x.weight.size()
fan_in = prod(size[1:])
return math.sqrt(2.0 / fan_in)
class ConstrainedLayer(nn.Module):
"""
A handy refactor that allows the user to:
- initialize one layer's bias to zero
- apply He's initialization at runtime
"""
def __init__(self, module, equalized=True, lrMul=1.0, initBiasToZero=True):
"""
equalized (bool): if true, the layer's weight should evolve within
the range (-1, 1)
initBiasToZero (bool): if true, bias will be initialized to zero
"""
super(ConstrainedLayer, self).__init__()
self.module = module
self.equalized = equalized
if initBiasToZero and module.bias is not None:
self.module.bias.data.fill_(0)
if self.equalized:
self.module.weight.data.normal_(0, 1)
self.weight = getLayerNormalizationFactor(self.module) * lrMul
def forward(self, x):
x = self.module(x)
if self.equalized:
x *= self.weight
return x
class EqualizedConv1d(ConstrainedLayer):
def __init__(self, nChannelsPrevious, nChannels, kernelSize, padding=0,
bias=True, stride=1, **kwargs):
"""
A nn.Conv2d module with specific constraints
Args:
nChannelsPrevious (int): number of channels in the previous layer
nChannels (int): number of channels of the current layer
kernelSize (int): size of the convolutional kernel
padding (int): convolution's padding
bias (bool): with bias ?
"""
ConstrainedLayer.__init__(self, nn.Conv1d(nChannelsPrevious,
nChannels, kernelSize, padding=padding, bias=bias, stride=
stride), **kwargs)
class ShiftedConvNew(nn.Module):
def __init__(self, dimOutputAR, dimOutputEncoder, kernelSize):
super(ShiftedConvNew, self).__init__()
self.module = EqualizedConv1d(dimOutputAR, dimOutputEncoder,
kernelSize, equalized=True, padding=0)
self.kernelSize = kernelSize
def forward(self, input_0):
primals_1 = self.module.module.weight
primals_3 = self.module.module.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
raphaelreme/CPC_audio
|
ShiftedConv
| false
| 10,843
|
[
"MIT"
] | 0
|
a2b045d5f03f4a73beaab9b481244e454edacbaa
|
https://github.com/raphaelreme/CPC_audio/tree/a2b045d5f03f4a73beaab9b481244e454edacbaa
|
DiceLoss
|
import torch
import torch.nn as nn
class DiceLoss(nn.Module):
def __init__(self, smooth: 'float'=1.0, apply_sigmoid: 'bool'=False):
super().__init__()
self.smooth = smooth
self.apply_sigmoid = apply_sigmoid
def forward(self, y_pred: 'torch.Tensor', y_true: 'torch.Tensor'
) ->torch.Tensor:
assert y_pred.size() == y_true.size()
if self.apply_sigmoid:
y_pred = torch.sigmoid(y_pred)
y_pred = y_pred.contiguous().view(-1)
y_true = y_true.contiguous().view(-1)
intersection = (y_pred * y_true).sum()
dsc = (2.0 * intersection + self.smooth) / (y_pred.sum() + y_true.
sum() + self.smooth)
return 1.0 - dsc
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.broadcast_to(tmp0, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = tl.broadcast_to(tmp1, [RBLOCK])
tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0))
tmp12 = 2.0
tmp13 = tmp5 * tmp12
tmp14 = 1.0
tmp15 = tmp13 + tmp14
tmp16 = tmp8 + tmp11
tmp17 = tmp16 + tmp14
tmp18 = tmp15 / tmp17
tmp19 = tmp14 - tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp19, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mul_rsub_sum_0[grid(1)](buf3, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf3,
class DiceLossNew(nn.Module):
def __init__(self, smooth: 'float'=1.0, apply_sigmoid: 'bool'=False):
super().__init__()
self.smooth = smooth
self.apply_sigmoid = apply_sigmoid
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
tfmoraes/deep_heart_torch
|
DiceLoss
| false
| 10,844
|
[
"MIT"
] | 0
|
4168ce01d600e69baf82c752a3e57af86861b6ea
|
https://github.com/tfmoraes/deep_heart_torch/tree/4168ce01d600e69baf82c752a3e57af86861b6ea
|
NAC
|
from torch.nn import Module
import math
import torch
from torch.nn.parameter import Parameter
import torch.nn.functional as F
class NAC(Module):
"""Neural Accumulator: :math:`y = Wx` where :math:`W = \\tanh(\\hat{W}) * \\sigma(\\hat{M})`
Args:
in_features: size of each input sample
out_features: size of each output sample
Shape:
- Input: :math:`(N, *, in\\_features)` where :math:`*` means any number of
additional dimensions
- Output: :math:`(N, *, out\\_features)` where all but the last dimension
are the same shape as the input.
Attributes:
weight_tanh: the learnable weights of the module of shape
`(out_features x in_features)`
weight_sigma: the learnable weights of the module of shape
`(out_features x in_features)`
Examples:
>>> m = NAC(20, 30)
>>> input = torch.randn(128, 20)
>>> output = m(input)
>>> print(output.size())
torch.Size([128, 30])
>>> m = NAC(2, 1)
>>> _ = m.weight_tanh.data.fill_(4), m.weight_sigma.data.fill_(4)
>>> m.weight
tensor([[0.9814, 0.9814]], grad_fn=<ThMulBackward>)
>>> input = torch.Tensor([[0, 1], [2, 5], [-1, 4]])
>>> output = m(input)
>>> output
tensor([[0.9814],
[6.8695],
[2.9441]], grad_fn=<MmBackward>)
"""
def __init__(self, in_features, out_features):
super(NAC, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight_tanh = Parameter(torch.Tensor(out_features, in_features))
self.weight_sigma = Parameter(torch.Tensor(out_features, in_features))
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.out_features)
self.weight_tanh.data.uniform_(-stdv, stdv)
self.weight_sigma.data.uniform_(-stdv, stdv)
@property
def weight(self):
"""
Effective weight of NAC
:return:
"""
return torch.tanh(self.weight_tanh) * torch.sigmoid(self.weight_sigma)
def forward(self, input):
return F.linear(input, weight=self.weight)
def extra_repr(self):
return 'in_features={}, out_features={}'.format(self.in_features,
self.out_features)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch.nn import Module
import math
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_sigmoid_tanh_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tmp3 = tl.sigmoid(tmp2)
tmp4 = tmp1 * tmp3
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sigmoid_tanh_0[grid(16)](primals_1, primals_2,
buf0, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(buf0, (4, 4), (1, 4), 0), out=buf1)
del buf0
return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_1, primals_2, reinterpret_tensor(primals_3, (64, 4), (4,
1), 0)
class NACNew(Module):
"""Neural Accumulator: :math:`y = Wx` where :math:`W = \\tanh(\\hat{W}) * \\sigma(\\hat{M})`
Args:
in_features: size of each input sample
out_features: size of each output sample
Shape:
- Input: :math:`(N, *, in\\_features)` where :math:`*` means any number of
additional dimensions
- Output: :math:`(N, *, out\\_features)` where all but the last dimension
are the same shape as the input.
Attributes:
weight_tanh: the learnable weights of the module of shape
`(out_features x in_features)`
weight_sigma: the learnable weights of the module of shape
`(out_features x in_features)`
Examples:
>>> m = NAC(20, 30)
>>> input = torch.randn(128, 20)
>>> output = m(input)
>>> print(output.size())
torch.Size([128, 30])
>>> m = NAC(2, 1)
>>> _ = m.weight_tanh.data.fill_(4), m.weight_sigma.data.fill_(4)
>>> m.weight
tensor([[0.9814, 0.9814]], grad_fn=<ThMulBackward>)
>>> input = torch.Tensor([[0, 1], [2, 5], [-1, 4]])
>>> output = m(input)
>>> output
tensor([[0.9814],
[6.8695],
[2.9441]], grad_fn=<MmBackward>)
"""
def __init__(self, in_features, out_features):
super(NACNew, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight_tanh = Parameter(torch.Tensor(out_features, in_features))
self.weight_sigma = Parameter(torch.Tensor(out_features, in_features))
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.out_features)
self.weight_tanh.data.uniform_(-stdv, stdv)
self.weight_sigma.data.uniform_(-stdv, stdv)
@property
def weight(self):
"""
Effective weight of NAC
:return:
"""
return torch.tanh(self.weight_tanh) * torch.sigmoid(self.weight_sigma)
def extra_repr(self):
return 'in_features={}, out_features={}'.format(self.in_features,
self.out_features)
def forward(self, input_0):
primals_1 = self.weight_tanh
primals_2 = self.weight_sigma
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
tanbur/pytorch-nalu
|
NAC
| false
| 10,845
|
[
"MIT"
] | 0
|
91cb036230144b166137a8f3533850f2d4123d4f
|
https://github.com/tanbur/pytorch-nalu/tree/91cb036230144b166137a8f3533850f2d4123d4f
|
UNETAdd
|
import torch
from torch import nn
class UNETAdd(nn.Module):
"""UNET Without concatenation during decoding"""
def __init__(self):
super(UNETAdd, self).__init__()
self.conv1_1 = nn.Conv2d(in_channels=3, out_channels=16,
kernel_size=3, stride=1, padding=1)
self.conv1_2 = nn.Conv2d(in_channels=16, out_channels=16,
kernel_size=3, stride=1, padding=1)
self.relu1 = nn.SELU()
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv2_1 = nn.Conv2d(in_channels=16, out_channels=32,
kernel_size=3, stride=1, padding=1)
self.conv2_2 = nn.Conv2d(in_channels=32, out_channels=32,
kernel_size=3, stride=1, padding=1)
self.relu2 = nn.SELU()
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv3_1 = nn.Conv2d(in_channels=32, out_channels=64,
kernel_size=3, stride=1, padding=1)
self.conv3_2 = nn.Conv2d(in_channels=64, out_channels=64,
kernel_size=3, stride=1, padding=1)
self.relu3 = nn.SELU()
self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv4_1 = nn.Conv2d(in_channels=64, out_channels=128,
kernel_size=3, stride=1, padding=1)
self.conv4_2 = nn.Conv2d(in_channels=128, out_channels=128,
kernel_size=3, stride=1, padding=1)
self.relu4 = nn.SELU()
self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv5_1 = nn.Conv2d(in_channels=128, out_channels=256,
kernel_size=3, stride=1, padding=1)
self.conv5_2 = nn.Conv2d(in_channels=256, out_channels=256,
kernel_size=3, stride=1, padding=1)
self.relu5 = nn.SELU()
self.upsample1 = nn.ConvTranspose2d(in_channels=256, out_channels=
128, kernel_size=2, stride=2)
self.conv6_1 = nn.Conv2d(in_channels=128, out_channels=128,
kernel_size=3, stride=1, padding=1)
self.conv6_2 = nn.Conv2d(in_channels=128, out_channels=128,
kernel_size=3, stride=1, padding=1)
self.relu6 = nn.SELU()
self.upsample2 = nn.ConvTranspose2d(in_channels=128, out_channels=
64, kernel_size=2, stride=2)
self.conv7_1 = nn.Conv2d(in_channels=64, out_channels=64,
kernel_size=3, stride=1, padding=1)
self.conv7_2 = nn.Conv2d(in_channels=64, out_channels=64,
kernel_size=3, stride=1, padding=1)
self.relu7 = nn.SELU()
self.upsample3 = nn.ConvTranspose2d(in_channels=64, out_channels=32,
kernel_size=2, stride=2)
self.conv8_1 = nn.Conv2d(in_channels=32, out_channels=32,
kernel_size=3, stride=1, padding=1)
self.conv8_2 = nn.Conv2d(in_channels=32, out_channels=32,
kernel_size=3, stride=1, padding=1)
self.relu8 = nn.SELU()
self.upsample4 = nn.ConvTranspose2d(in_channels=32, out_channels=16,
kernel_size=2, stride=2)
self.conv9_1 = nn.Conv2d(in_channels=16, out_channels=16,
kernel_size=3, stride=1, padding=1)
self.conv9_2 = nn.Conv2d(in_channels=16, out_channels=16,
kernel_size=3, stride=1, padding=1)
self.relu9 = nn.SELU()
self.conv10 = nn.Conv2d(in_channels=16, out_channels=2, kernel_size=1)
self.relu10 = nn.SELU()
def forward(self, x):
c1 = self.relu1(self.conv1_2(self.conv1_1(x)))
p1 = self.pool1(c1)
c2 = self.relu2(self.conv2_2(self.conv2_1(p1)))
p2 = self.pool2(c2)
c3 = self.relu3(self.conv3_2(self.conv3_1(p2)))
p3 = self.pool3(c3)
c4 = self.relu4(self.conv4_2(self.conv4_1(p3)))
p4 = self.pool4(c4)
c5 = self.relu5(self.conv5_2(self.conv5_1(p4)))
u6 = self.upsample1(c5)
u6 = torch.add(u6, c4)
c6 = self.relu6(self.conv6_2(self.conv6_1(u6)))
u7 = self.upsample2(c6)
u7 = torch.add(u7, c3)
c7 = self.relu7(self.conv7_2(self.conv7_1(u7)))
u8 = self.upsample3(c7)
u8 = torch.add(u8, c2)
c8 = self.relu8(self.conv8_2(self.conv8_1(u8)))
u9 = self.upsample4(c8)
u9 = torch.add(u9, c1)
c9 = self.relu9(self.conv9_2(self.conv9_1(u9)))
c10 = self.relu10(self.conv10(c9))
return c10
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 48
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 256
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 16
y1 = yindex // 16
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 16 * x2 + 144 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 512
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 16
y1 = yindex // 16
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 16 * x2 + 144 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 32 * x2 + 288 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 32 * x2 + 288 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_9(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_10(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_11(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 512 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_12(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 256 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_13(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 32 * x2 + 128 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_14(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 512
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 16
y1 = yindex // 16
tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 16 * x2 + 64 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_15(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_elu_16(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 1.0507009873554805
tmp6 = tmp2 * tmp5
tmp7 = 1.0
tmp8 = tmp2 * tmp7
tmp9 = libdevice.expm1(tmp8)
tmp10 = 1.7580993408473766
tmp11 = tmp9 * tmp10
tmp12 = tl.where(tmp4, tmp6, tmp11)
tl.store(in_out_ptr0 + x2, tmp2, None)
tl.store(out_ptr0 + x2, tmp12, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_17(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = xindex // 16 % 32
x2 = xindex // 512
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 32 * x1 + 2048 * x2), None)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 32 * x1 + 2048 * x2), None)
tmp3 = tl.load(in_ptr0 + (1024 + x0 + 32 * x1 + 2048 * x2), None)
tmp5 = tl.load(in_ptr0 + (1040 + x0 + 32 * x1 + 2048 * x2), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, None)
tl.store(out_ptr1 + x3, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_18(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_elu_19(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 1.0507009873554805
tmp6 = tmp2 * tmp5
tmp7 = 1.0
tmp8 = tmp2 * tmp7
tmp9 = libdevice.expm1(tmp8)
tmp10 = 1.7580993408473766
tmp11 = tmp9 * tmp10
tmp12 = tl.where(tmp4, tmp6, tmp11)
tl.store(in_out_ptr0 + x2, tmp2, None)
tl.store(out_ptr0 + x2, tmp12, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_20(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32 % 16
x2 = xindex // 512
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1 + 2048 * x2), None)
tmp1 = tl.load(in_ptr0 + (32 + x0 + 64 * x1 + 2048 * x2), None)
tmp3 = tl.load(in_ptr0 + (1024 + x0 + 64 * x1 + 2048 * x2), None)
tmp5 = tl.load(in_ptr0 + (1056 + x0 + 64 * x1 + 2048 * x2), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, None)
tl.store(out_ptr1 + x3, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_21(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_elu_22(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 1.0507009873554805
tmp6 = tmp2 * tmp5
tmp7 = 1.0
tmp8 = tmp2 * tmp7
tmp9 = libdevice.expm1(tmp8)
tmp10 = 1.7580993408473766
tmp11 = tmp9 * tmp10
tmp12 = tl.where(tmp4, tmp6, tmp11)
tl.store(in_out_ptr0 + x2, tmp2, None)
tl.store(out_ptr0 + x2, tmp12, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_23(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 64
x1 = xindex // 64 % 8
x2 = xindex // 512
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 2048 * x2), None)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 2048 * x2), None)
tmp3 = tl.load(in_ptr0 + (1024 + x0 + 128 * x1 + 2048 * x2), None)
tmp5 = tl.load(in_ptr0 + (1088 + x0 + 128 * x1 + 2048 * x2), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, None)
tl.store(out_ptr1 + x3, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_24(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_elu_25(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 1.0507009873554805
tmp6 = tmp2 * tmp5
tmp7 = 1.0
tmp8 = tmp2 * tmp7
tmp9 = libdevice.expm1(tmp8)
tmp10 = 1.7580993408473766
tmp11 = tmp9 * tmp10
tmp12 = tl.where(tmp4, tmp6, tmp11)
tl.store(in_out_ptr0 + x2, tmp2, None)
tl.store(out_ptr0 + x2, tmp12, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_26(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 128
x1 = xindex // 128 % 4
x2 = xindex // 512
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1 + 2048 * x2), None)
tmp1 = tl.load(in_ptr0 + (128 + x0 + 256 * x1 + 2048 * x2), None)
tmp3 = tl.load(in_ptr0 + (1024 + x0 + 256 * x1 + 2048 * x2), None)
tmp5 = tl.load(in_ptr0 + (1152 + x0 + 256 * x1 + 2048 * x2), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, None)
tl.store(out_ptr1 + x3, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_27(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_elu_28(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 1.0507009873554805
tmp6 = tmp2 * tmp5
tmp7 = 1.0
tmp8 = tmp2 * tmp7
tmp9 = libdevice.expm1(tmp8)
tmp10 = 1.7580993408473766
tmp11 = tmp9 * tmp10
tmp12 = tl.where(tmp4, tmp6, tmp11)
tl.store(in_out_ptr0 + x2, tmp2, None)
tl.store(out_ptr0 + x2, tmp12, None)
@triton.jit
def triton_poi_fused_add_convolution_29(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_add_convolution_30(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_add_convolution_31(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_add_convolution_32(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_elu_33(in_out_ptr0, in_ptr0, out_ptr0,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
xnumel = 2
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
y2 = yindex % 4096
y3 = yindex // 4096
tmp0 = tl.load(in_out_ptr0 + (x1 + 2 * y0), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 1.0507009873554805
tmp6 = tmp2 * tmp5
tmp7 = 1.0
tmp8 = tmp2 * tmp7
tmp9 = libdevice.expm1(tmp8)
tmp10 = 1.7580993408473766
tmp11 = tmp9 * tmp10
tmp12 = tl.where(tmp4, tmp6, tmp11)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x1 + 2 * y0), tmp2, xmask)
tl.store(out_ptr0 + (y2 + 4096 * x1 + 8192 * y3), tmp12, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32,
primals_33, primals_34, primals_35, primals_36, primals_37,
primals_38, primals_39, primals_40, primals_41, primals_42,
primals_43, primals_44, primals_45, primals_46, primals_47) = args
args.clear()
assert_size_stride(primals_1, (16, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (32, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_7, (32,), (1,))
assert_size_stride(primals_8, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_9, (32,), (1,))
assert_size_stride(primals_10, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_11, (64,), (1,))
assert_size_stride(primals_12, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_13, (64,), (1,))
assert_size_stride(primals_14, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_15, (128,), (1,))
assert_size_stride(primals_16, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_17, (128,), (1,))
assert_size_stride(primals_18, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_19, (256,), (1,))
assert_size_stride(primals_20, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_21, (256,), (1,))
assert_size_stride(primals_22, (256, 128, 2, 2), (512, 4, 2, 1))
assert_size_stride(primals_23, (128,), (1,))
assert_size_stride(primals_24, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_25, (128,), (1,))
assert_size_stride(primals_26, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_27, (128,), (1,))
assert_size_stride(primals_28, (128, 64, 2, 2), (256, 4, 2, 1))
assert_size_stride(primals_29, (64,), (1,))
assert_size_stride(primals_30, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_31, (64,), (1,))
assert_size_stride(primals_32, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_33, (64,), (1,))
assert_size_stride(primals_34, (64, 32, 2, 2), (128, 4, 2, 1))
assert_size_stride(primals_35, (32,), (1,))
assert_size_stride(primals_36, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_37, (32,), (1,))
assert_size_stride(primals_38, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_39, (32,), (1,))
assert_size_stride(primals_40, (32, 16, 2, 2), (64, 4, 2, 1))
assert_size_stride(primals_41, (16,), (1,))
assert_size_stride(primals_42, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_43, (16,), (1,))
assert_size_stride(primals_44, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_45, (16,), (1,))
assert_size_stride(primals_46, (2, 16, 1, 1), (16, 1, 1, 1))
assert_size_stride(primals_47, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 3, 3, 3), (27, 1, 9, 3), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(48, 9)](primals_1, buf0, 48, 9, XBLOCK=16,
YBLOCK=64, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch
.float32)
triton_poi_fused_1[grid(12, 4096)](primals_3, buf1, 12, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((16, 16, 3, 3), (144, 1, 48, 16), torch.
float32)
triton_poi_fused_2[grid(256, 9)](primals_4, buf2, 256, 9, XBLOCK=16,
YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((32, 16, 3, 3), (144, 1, 48, 16), torch.
float32)
triton_poi_fused_3[grid(512, 9)](primals_6, buf3, 512, 9, XBLOCK=16,
YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf4 = empty_strided_cuda((32, 32, 3, 3), (288, 1, 96, 32), torch.
float32)
triton_poi_fused_4[grid(1024, 9)](primals_8, buf4, 1024, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_8
buf5 = empty_strided_cuda((64, 32, 3, 3), (288, 1, 96, 32), torch.
float32)
triton_poi_fused_5[grid(2048, 9)](primals_10, buf5, 2048, 9, XBLOCK
=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_10
buf6 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch.
float32)
triton_poi_fused_6[grid(4096, 9)](primals_12, buf6, 4096, 9, XBLOCK
=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_12
buf7 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch
.float32)
triton_poi_fused_7[grid(8192, 9)](primals_14, buf7, 8192, 9, XBLOCK
=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_14
buf8 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_8[grid(16384, 9)](primals_16, buf8, 16384, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_16
buf9 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_9[grid(32768, 9)](primals_18, buf9, 32768, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_18
buf10 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_10[grid(65536, 9)](primals_20, buf10, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_20
buf11 = empty_strided_cuda((256, 128, 2, 2), (512, 1, 256, 128),
torch.float32)
triton_poi_fused_11[grid(32768, 4)](primals_22, buf11, 32768, 4,
XBLOCK=4, YBLOCK=256, num_warps=4, num_stages=1)
del primals_22
buf12 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_8[grid(16384, 9)](primals_24, buf12, 16384, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_24
buf13 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_8[grid(16384, 9)](primals_26, buf13, 16384, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_26
buf14 = empty_strided_cuda((128, 64, 2, 2), (256, 1, 128, 64),
torch.float32)
triton_poi_fused_12[grid(8192, 4)](primals_28, buf14, 8192, 4,
XBLOCK=4, YBLOCK=256, num_warps=4, num_stages=1)
del primals_28
buf15 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch
.float32)
triton_poi_fused_6[grid(4096, 9)](primals_30, buf15, 4096, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_30
buf16 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch
.float32)
triton_poi_fused_6[grid(4096, 9)](primals_32, buf16, 4096, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_32
buf17 = empty_strided_cuda((64, 32, 2, 2), (128, 1, 64, 32), torch.
float32)
triton_poi_fused_13[grid(2048, 4)](primals_34, buf17, 2048, 4,
XBLOCK=4, YBLOCK=256, num_warps=4, num_stages=1)
del primals_34
buf18 = empty_strided_cuda((32, 32, 3, 3), (288, 1, 96, 32), torch.
float32)
triton_poi_fused_4[grid(1024, 9)](primals_36, buf18, 1024, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_36
buf19 = empty_strided_cuda((32, 32, 3, 3), (288, 1, 96, 32), torch.
float32)
triton_poi_fused_4[grid(1024, 9)](primals_38, buf19, 1024, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_38
buf20 = empty_strided_cuda((32, 16, 2, 2), (64, 1, 32, 16), torch.
float32)
triton_poi_fused_14[grid(512, 4)](primals_40, buf20, 512, 4, XBLOCK
=4, YBLOCK=256, num_warps=4, num_stages=1)
del primals_40
buf21 = empty_strided_cuda((16, 16, 3, 3), (144, 1, 48, 16), torch.
float32)
triton_poi_fused_2[grid(256, 9)](primals_42, buf21, 256, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_42
buf22 = empty_strided_cuda((16, 16, 3, 3), (144, 1, 48, 16), torch.
float32)
triton_poi_fused_2[grid(256, 9)](primals_44, buf22, 256, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_44
buf23 = extern_kernels.convolution(buf1, buf0, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf23, (4, 16, 64, 64), (65536, 1, 1024, 16))
buf24 = buf23
del buf23
triton_poi_fused_convolution_15[grid(262144)](buf24, primals_2,
262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf25 = extern_kernels.convolution(buf24, buf2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf25, (4, 16, 64, 64), (65536, 1, 1024, 16))
buf26 = buf25
del buf25
buf27 = empty_strided_cuda((4, 16, 64, 64), (65536, 1, 1024, 16),
torch.float32)
triton_poi_fused_convolution_elu_16[grid(262144)](buf26, primals_5,
buf27, 262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf28 = empty_strided_cuda((4, 16, 32, 32), (16384, 1, 512, 16),
torch.float32)
buf29 = empty_strided_cuda((4, 16, 32, 32), (16384, 1, 512, 16),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_17[grid(65536)](buf27,
buf28, buf29, 65536, XBLOCK=512, num_warps=4, num_stages=1)
buf30 = extern_kernels.convolution(buf28, buf3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf30, (4, 32, 32, 32), (32768, 1, 1024, 32))
buf31 = buf30
del buf30
triton_poi_fused_convolution_18[grid(131072)](buf31, primals_7,
131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_7
buf32 = extern_kernels.convolution(buf31, buf4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf32, (4, 32, 32, 32), (32768, 1, 1024, 32))
buf33 = buf32
del buf32
buf34 = empty_strided_cuda((4, 32, 32, 32), (32768, 1, 1024, 32),
torch.float32)
triton_poi_fused_convolution_elu_19[grid(131072)](buf33, primals_9,
buf34, 131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_9
buf35 = empty_strided_cuda((4, 32, 16, 16), (8192, 1, 512, 32),
torch.float32)
buf36 = empty_strided_cuda((4, 32, 16, 16), (8192, 1, 512, 32),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_20[grid(32768)](buf34,
buf35, buf36, 32768, XBLOCK=128, num_warps=4, num_stages=1)
buf37 = extern_kernels.convolution(buf35, buf5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf37, (4, 64, 16, 16), (16384, 1, 1024, 64))
buf38 = buf37
del buf37
triton_poi_fused_convolution_21[grid(65536)](buf38, primals_11,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_11
buf39 = extern_kernels.convolution(buf38, buf6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf39, (4, 64, 16, 16), (16384, 1, 1024, 64))
buf40 = buf39
del buf39
buf41 = empty_strided_cuda((4, 64, 16, 16), (16384, 1, 1024, 64),
torch.float32)
triton_poi_fused_convolution_elu_22[grid(65536)](buf40, primals_13,
buf41, 65536, XBLOCK=256, num_warps=4, num_stages=1)
del primals_13
buf42 = empty_strided_cuda((4, 64, 8, 8), (4096, 1, 512, 64), torch
.float32)
buf43 = empty_strided_cuda((4, 64, 8, 8), (4096, 1, 512, 64), torch
.int8)
triton_poi_fused_max_pool2d_with_indices_23[grid(16384)](buf41,
buf42, buf43, 16384, XBLOCK=256, num_warps=4, num_stages=1)
buf44 = extern_kernels.convolution(buf42, buf7, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf44, (4, 128, 8, 8), (8192, 1, 1024, 128))
buf45 = buf44
del buf44
triton_poi_fused_convolution_24[grid(32768)](buf45, primals_15,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_15
buf46 = extern_kernels.convolution(buf45, buf8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf46, (4, 128, 8, 8), (8192, 1, 1024, 128))
buf47 = buf46
del buf46
buf48 = empty_strided_cuda((4, 128, 8, 8), (8192, 1, 1024, 128),
torch.float32)
triton_poi_fused_convolution_elu_25[grid(32768)](buf47, primals_17,
buf48, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_17
buf49 = empty_strided_cuda((4, 128, 4, 4), (2048, 1, 512, 128),
torch.float32)
buf50 = empty_strided_cuda((4, 128, 4, 4), (2048, 1, 512, 128),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_26[grid(8192)](buf48,
buf49, buf50, 8192, XBLOCK=256, num_warps=4, num_stages=1)
buf51 = extern_kernels.convolution(buf49, buf9, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf51, (4, 256, 4, 4), (4096, 1, 1024, 256))
buf52 = buf51
del buf51
triton_poi_fused_convolution_27[grid(16384)](buf52, primals_19,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_19
buf53 = extern_kernels.convolution(buf52, buf10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf53, (4, 256, 4, 4), (4096, 1, 1024, 256))
buf54 = buf53
del buf53
buf55 = empty_strided_cuda((4, 256, 4, 4), (4096, 1, 1024, 256),
torch.float32)
triton_poi_fused_convolution_elu_28[grid(16384)](buf54, primals_21,
buf55, 16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_21
buf56 = extern_kernels.convolution(buf55, buf11, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf56, (4, 128, 8, 8), (8192, 1, 1024, 128))
buf57 = buf56
del buf56
triton_poi_fused_add_convolution_29[grid(32768)](buf57, primals_23,
buf48, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_23
buf58 = extern_kernels.convolution(buf57, buf12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf58, (4, 128, 8, 8), (8192, 1, 1024, 128))
buf59 = buf58
del buf58
triton_poi_fused_convolution_24[grid(32768)](buf59, primals_25,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_25
buf60 = extern_kernels.convolution(buf59, buf13, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf60, (4, 128, 8, 8), (8192, 1, 1024, 128))
buf61 = buf60
del buf60
buf62 = empty_strided_cuda((4, 128, 8, 8), (8192, 1, 1024, 128),
torch.float32)
triton_poi_fused_convolution_elu_25[grid(32768)](buf61, primals_27,
buf62, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_27
buf63 = extern_kernels.convolution(buf62, buf14, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf63, (4, 64, 16, 16), (16384, 1, 1024, 64))
buf64 = buf63
del buf63
triton_poi_fused_add_convolution_30[grid(65536)](buf64, primals_29,
buf41, 65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_29
buf65 = extern_kernels.convolution(buf64, buf15, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf65, (4, 64, 16, 16), (16384, 1, 1024, 64))
buf66 = buf65
del buf65
triton_poi_fused_convolution_21[grid(65536)](buf66, primals_31,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_31
buf67 = extern_kernels.convolution(buf66, buf16, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf67, (4, 64, 16, 16), (16384, 1, 1024, 64))
buf68 = buf67
del buf67
buf69 = empty_strided_cuda((4, 64, 16, 16), (16384, 1, 1024, 64),
torch.float32)
triton_poi_fused_convolution_elu_22[grid(65536)](buf68, primals_33,
buf69, 65536, XBLOCK=256, num_warps=4, num_stages=1)
del primals_33
buf70 = extern_kernels.convolution(buf69, buf17, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf70, (4, 32, 32, 32), (32768, 1, 1024, 32))
buf71 = buf70
del buf70
triton_poi_fused_add_convolution_31[grid(131072)](buf71, primals_35,
buf34, 131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_35
buf72 = extern_kernels.convolution(buf71, buf18, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf72, (4, 32, 32, 32), (32768, 1, 1024, 32))
buf73 = buf72
del buf72
triton_poi_fused_convolution_18[grid(131072)](buf73, primals_37,
131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_37
buf74 = extern_kernels.convolution(buf73, buf19, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf74, (4, 32, 32, 32), (32768, 1, 1024, 32))
buf75 = buf74
del buf74
buf76 = empty_strided_cuda((4, 32, 32, 32), (32768, 1, 1024, 32),
torch.float32)
triton_poi_fused_convolution_elu_19[grid(131072)](buf75, primals_39,
buf76, 131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_39
buf77 = extern_kernels.convolution(buf76, buf20, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf77, (4, 16, 64, 64), (65536, 1, 1024, 16))
buf78 = buf77
del buf77
triton_poi_fused_add_convolution_32[grid(262144)](buf78, primals_41,
buf27, 262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_41
buf79 = extern_kernels.convolution(buf78, buf21, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf79, (4, 16, 64, 64), (65536, 1, 1024, 16))
buf80 = buf79
del buf79
triton_poi_fused_convolution_15[grid(262144)](buf80, primals_43,
262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_43
buf81 = extern_kernels.convolution(buf80, buf22, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf81, (4, 16, 64, 64), (65536, 1, 1024, 16))
buf82 = buf81
del buf81
buf83 = empty_strided_cuda((4, 16, 64, 64), (65536, 1, 1024, 16),
torch.float32)
triton_poi_fused_convolution_elu_16[grid(262144)](buf82, primals_45,
buf83, 262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_45
buf84 = extern_kernels.convolution(buf83, primals_46, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf84, (4, 2, 64, 64), (8192, 1, 128, 2))
buf85 = buf84
del buf84
buf86 = empty_strided_cuda((4, 2, 64, 64), (8192, 4096, 64, 1),
torch.float32)
triton_poi_fused_convolution_elu_33[grid(16384, 2)](buf85,
primals_47, buf86, 16384, 2, XBLOCK=1, YBLOCK=256, num_warps=4,
num_stages=1)
del primals_47
return (buf86, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf8,
buf9, buf10, buf11, buf12, buf13, buf14, buf15, buf16, buf17, buf18,
buf19, buf20, buf21, buf22, primals_46, buf24, buf26, buf27, buf28,
buf29, buf31, buf33, buf34, buf35, buf36, buf38, buf40, buf41,
buf42, buf43, buf45, buf47, buf48, buf49, buf50, buf52, buf54,
buf55, buf57, buf59, buf61, buf62, buf64, buf66, buf68, buf69,
buf71, buf73, buf75, buf76, buf78, buf80, buf82, buf83, buf85)
class UNETAddNew(nn.Module):
"""UNET Without concatenation during decoding"""
def __init__(self):
super(UNETAddNew, self).__init__()
self.conv1_1 = nn.Conv2d(in_channels=3, out_channels=16,
kernel_size=3, stride=1, padding=1)
self.conv1_2 = nn.Conv2d(in_channels=16, out_channels=16,
kernel_size=3, stride=1, padding=1)
self.relu1 = nn.SELU()
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv2_1 = nn.Conv2d(in_channels=16, out_channels=32,
kernel_size=3, stride=1, padding=1)
self.conv2_2 = nn.Conv2d(in_channels=32, out_channels=32,
kernel_size=3, stride=1, padding=1)
self.relu2 = nn.SELU()
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv3_1 = nn.Conv2d(in_channels=32, out_channels=64,
kernel_size=3, stride=1, padding=1)
self.conv3_2 = nn.Conv2d(in_channels=64, out_channels=64,
kernel_size=3, stride=1, padding=1)
self.relu3 = nn.SELU()
self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv4_1 = nn.Conv2d(in_channels=64, out_channels=128,
kernel_size=3, stride=1, padding=1)
self.conv4_2 = nn.Conv2d(in_channels=128, out_channels=128,
kernel_size=3, stride=1, padding=1)
self.relu4 = nn.SELU()
self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv5_1 = nn.Conv2d(in_channels=128, out_channels=256,
kernel_size=3, stride=1, padding=1)
self.conv5_2 = nn.Conv2d(in_channels=256, out_channels=256,
kernel_size=3, stride=1, padding=1)
self.relu5 = nn.SELU()
self.upsample1 = nn.ConvTranspose2d(in_channels=256, out_channels=
128, kernel_size=2, stride=2)
self.conv6_1 = nn.Conv2d(in_channels=128, out_channels=128,
kernel_size=3, stride=1, padding=1)
self.conv6_2 = nn.Conv2d(in_channels=128, out_channels=128,
kernel_size=3, stride=1, padding=1)
self.relu6 = nn.SELU()
self.upsample2 = nn.ConvTranspose2d(in_channels=128, out_channels=
64, kernel_size=2, stride=2)
self.conv7_1 = nn.Conv2d(in_channels=64, out_channels=64,
kernel_size=3, stride=1, padding=1)
self.conv7_2 = nn.Conv2d(in_channels=64, out_channels=64,
kernel_size=3, stride=1, padding=1)
self.relu7 = nn.SELU()
self.upsample3 = nn.ConvTranspose2d(in_channels=64, out_channels=32,
kernel_size=2, stride=2)
self.conv8_1 = nn.Conv2d(in_channels=32, out_channels=32,
kernel_size=3, stride=1, padding=1)
self.conv8_2 = nn.Conv2d(in_channels=32, out_channels=32,
kernel_size=3, stride=1, padding=1)
self.relu8 = nn.SELU()
self.upsample4 = nn.ConvTranspose2d(in_channels=32, out_channels=16,
kernel_size=2, stride=2)
self.conv9_1 = nn.Conv2d(in_channels=16, out_channels=16,
kernel_size=3, stride=1, padding=1)
self.conv9_2 = nn.Conv2d(in_channels=16, out_channels=16,
kernel_size=3, stride=1, padding=1)
self.relu9 = nn.SELU()
self.conv10 = nn.Conv2d(in_channels=16, out_channels=2, kernel_size=1)
self.relu10 = nn.SELU()
def forward(self, input_0):
primals_1 = self.conv1_1.weight
primals_2 = self.conv1_1.bias
primals_4 = self.conv1_2.weight
primals_5 = self.conv1_2.bias
primals_6 = self.conv2_1.weight
primals_7 = self.conv2_1.bias
primals_8 = self.conv2_2.weight
primals_9 = self.conv2_2.bias
primals_10 = self.conv3_1.weight
primals_11 = self.conv3_1.bias
primals_12 = self.conv3_2.weight
primals_13 = self.conv3_2.bias
primals_14 = self.conv4_1.weight
primals_15 = self.conv4_1.bias
primals_16 = self.conv4_2.weight
primals_17 = self.conv4_2.bias
primals_18 = self.conv5_1.weight
primals_19 = self.conv5_1.bias
primals_20 = self.conv5_2.weight
primals_21 = self.conv5_2.bias
primals_22 = self.upsample1.weight
primals_23 = self.upsample1.bias
primals_24 = self.conv6_1.weight
primals_25 = self.conv6_1.bias
primals_26 = self.conv6_2.weight
primals_27 = self.conv6_2.bias
primals_28 = self.upsample2.weight
primals_29 = self.upsample2.bias
primals_30 = self.conv7_1.weight
primals_31 = self.conv7_1.bias
primals_32 = self.conv7_2.weight
primals_33 = self.conv7_2.bias
primals_34 = self.upsample3.weight
primals_35 = self.upsample3.bias
primals_36 = self.conv8_1.weight
primals_37 = self.conv8_1.bias
primals_38 = self.conv8_2.weight
primals_39 = self.conv8_2.bias
primals_40 = self.upsample4.weight
primals_41 = self.upsample4.bias
primals_42 = self.conv9_1.weight
primals_43 = self.conv9_1.bias
primals_44 = self.conv9_2.weight
primals_45 = self.conv9_2.bias
primals_46 = self.conv10.weight
primals_47 = self.conv10.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33, primals_34,
primals_35, primals_36, primals_37, primals_38, primals_39,
primals_40, primals_41, primals_42, primals_43, primals_44,
primals_45, primals_46, primals_47])
return output[0]
|
quenting44/semantic_segmentation
|
UNETAdd
| false
| 10,846
|
[
"MIT"
] | 0
|
bd197ddda3c6891d69ff7e552a0c224c7ec1269a
|
https://github.com/quenting44/semantic_segmentation/tree/bd197ddda3c6891d69ff7e552a0c224c7ec1269a
|
NALU
|
from torch.nn import Module
import math
import torch
from torch.nn.parameter import Parameter
import torch.nn.functional as F
class NAC(Module):
"""Neural Accumulator: :math:`y = Wx` where :math:`W = \\tanh(\\hat{W}) * \\sigma(\\hat{M})`
Args:
in_features: size of each input sample
out_features: size of each output sample
Shape:
- Input: :math:`(N, *, in\\_features)` where :math:`*` means any number of
additional dimensions
- Output: :math:`(N, *, out\\_features)` where all but the last dimension
are the same shape as the input.
Attributes:
weight_tanh: the learnable weights of the module of shape
`(out_features x in_features)`
weight_sigma: the learnable weights of the module of shape
`(out_features x in_features)`
Examples:
>>> m = NAC(20, 30)
>>> input = torch.randn(128, 20)
>>> output = m(input)
>>> print(output.size())
torch.Size([128, 30])
>>> m = NAC(2, 1)
>>> _ = m.weight_tanh.data.fill_(4), m.weight_sigma.data.fill_(4)
>>> m.weight
tensor([[0.9814, 0.9814]], grad_fn=<ThMulBackward>)
>>> input = torch.Tensor([[0, 1], [2, 5], [-1, 4]])
>>> output = m(input)
>>> output
tensor([[0.9814],
[6.8695],
[2.9441]], grad_fn=<MmBackward>)
"""
def __init__(self, in_features, out_features):
super(NAC, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight_tanh = Parameter(torch.Tensor(out_features, in_features))
self.weight_sigma = Parameter(torch.Tensor(out_features, in_features))
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.out_features)
self.weight_tanh.data.uniform_(-stdv, stdv)
self.weight_sigma.data.uniform_(-stdv, stdv)
@property
def weight(self):
"""
Effective weight of NAC
:return:
"""
return torch.tanh(self.weight_tanh) * torch.sigmoid(self.weight_sigma)
def forward(self, input):
return F.linear(input, weight=self.weight)
def extra_repr(self):
return 'in_features={}, out_features={}'.format(self.in_features,
self.out_features)
class NALU(Module):
"""Neural Arithmetic Logic Unit: :math:`y = g * a + (1-g) * m` where :math:`g` is a sigmoidal gate,
:math:`a` is a NAC and :math:`m` is a log-space NAC enabling multiplication.
Args:
in_features: size of each input sample
out_features: size of each output sample
Shape:
- Input: :math:`(N, *, in\\_features)` where :math:`*` means any number of
additional dimensions
- Output: :math:`(N, *, out\\_features)` where all but the last dimension
are the same shape as the input.
Attributes:
addition_cell: a NAC with learnable weights with the same input and output shapes as the NALU
multiplication_cell: a NAC with learnable weights with the same input and output shapes as the NALU
gate_weights: the learnable weights of the gate that interpolates between addition and multiplication of shape
`(out_features x in_features)`
Examples:
>>> m = NALU(20, 30)
>>> input = torch.randn(128, 20)
>>> output = m(input)
>>> print(output.size())
torch.Size([128, 30])
Check we can multiply using the multiplication cell:
>>> m = NALU(2, 1)
>>> _ = (m.multiplication_cell.weight_tanh.data.fill_(4), m.multiplication_cell.weight_sigma.data.fill_(4),
... m.gate_weights.data.fill_(-4))
>>> m.multiplication_cell.weight
tensor([[0.9814, 0.9814]], grad_fn=<ThMulBackward>)
>>> input = torch.Tensor([[0, 10], [2, 5], [-1, 4]])
>>> output = m(input)
>>> torch.round(output)
tensor([[ 0.],
[10.],
[ 4.]], grad_fn=<RoundBackward>)
"""
def __init__(self, in_features, out_features):
super(NALU, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.addition_cell = NAC(in_features=in_features, out_features=
out_features)
self.multiplication_cell = NAC(in_features=in_features,
out_features=out_features)
self.gate_weights = Parameter(torch.Tensor(out_features, in_features))
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.out_features)
self.gate_weights.data.uniform_(-stdv, stdv)
self.addition_cell.reset_parameters()
self.multiplication_cell.reset_parameters()
def forward(self, input):
summation = self.addition_cell(input)
multiplication = torch.exp(self.multiplication_cell(torch.log(torch
.abs(input) + 0.001)))
gate = torch.sigmoid(F.linear(input=input, weight=self.gate_weights))
return gate * summation + (1 - gate) * multiplication
def extra_repr(self):
return 'in_features={}, out_features={}'.format(self.in_features,
self.out_features)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch.nn import Module
import math
from torch.nn.parameter import Parameter
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_sigmoid_tanh_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tmp3 = tl.sigmoid(tmp2)
tmp4 = tmp1 * tmp3
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_abs_add_log_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl_math.abs(tmp0)
tmp2 = 0.001
tmp3 = tmp1 + tmp2
tmp4 = tl_math.log(tmp3)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_exp_mul_rsub_sigmoid_2(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp6 = tl.load(in_ptr2 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp4 = 1.0
tmp5 = tmp4 - tmp1
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 * tmp7
tmp9 = tmp3 + tmp8
tl.store(out_ptr0 + x0, tmp9, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sigmoid_tanh_0[grid(16)](primals_1, primals_2,
buf0, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(buf0, (4, 4), (1, 4), 0), out=buf1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_abs_add_log_1[grid(256)](primals_3, buf2, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf3 = buf0
del buf0
triton_poi_fused_mul_sigmoid_tanh_0[grid(16)](primals_4, primals_5,
buf3, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0),
reinterpret_tensor(buf3, (4, 4), (1, 4), 0), out=buf4)
del buf3
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf5)
del primals_6
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_exp_mul_rsub_sigmoid_2[grid(256)](buf5, buf1,
buf4, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1)
return (buf6, primals_1, primals_2, primals_4, primals_5,
reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1,
reinterpret_tensor(buf2, (64, 4), (4, 1), 0), buf4, buf5)
class NAC(Module):
"""Neural Accumulator: :math:`y = Wx` where :math:`W = \\tanh(\\hat{W}) * \\sigma(\\hat{M})`
Args:
in_features: size of each input sample
out_features: size of each output sample
Shape:
- Input: :math:`(N, *, in\\_features)` where :math:`*` means any number of
additional dimensions
- Output: :math:`(N, *, out\\_features)` where all but the last dimension
are the same shape as the input.
Attributes:
weight_tanh: the learnable weights of the module of shape
`(out_features x in_features)`
weight_sigma: the learnable weights of the module of shape
`(out_features x in_features)`
Examples:
>>> m = NAC(20, 30)
>>> input = torch.randn(128, 20)
>>> output = m(input)
>>> print(output.size())
torch.Size([128, 30])
>>> m = NAC(2, 1)
>>> _ = m.weight_tanh.data.fill_(4), m.weight_sigma.data.fill_(4)
>>> m.weight
tensor([[0.9814, 0.9814]], grad_fn=<ThMulBackward>)
>>> input = torch.Tensor([[0, 1], [2, 5], [-1, 4]])
>>> output = m(input)
>>> output
tensor([[0.9814],
[6.8695],
[2.9441]], grad_fn=<MmBackward>)
"""
def __init__(self, in_features, out_features):
super(NAC, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight_tanh = Parameter(torch.Tensor(out_features, in_features))
self.weight_sigma = Parameter(torch.Tensor(out_features, in_features))
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.out_features)
self.weight_tanh.data.uniform_(-stdv, stdv)
self.weight_sigma.data.uniform_(-stdv, stdv)
@property
def weight(self):
"""
Effective weight of NAC
:return:
"""
return torch.tanh(self.weight_tanh) * torch.sigmoid(self.weight_sigma)
def forward(self, input):
return F.linear(input, weight=self.weight)
def extra_repr(self):
return 'in_features={}, out_features={}'.format(self.in_features,
self.out_features)
class NALUNew(Module):
"""Neural Arithmetic Logic Unit: :math:`y = g * a + (1-g) * m` where :math:`g` is a sigmoidal gate,
:math:`a` is a NAC and :math:`m` is a log-space NAC enabling multiplication.
Args:
in_features: size of each input sample
out_features: size of each output sample
Shape:
- Input: :math:`(N, *, in\\_features)` where :math:`*` means any number of
additional dimensions
- Output: :math:`(N, *, out\\_features)` where all but the last dimension
are the same shape as the input.
Attributes:
addition_cell: a NAC with learnable weights with the same input and output shapes as the NALU
multiplication_cell: a NAC with learnable weights with the same input and output shapes as the NALU
gate_weights: the learnable weights of the gate that interpolates between addition and multiplication of shape
`(out_features x in_features)`
Examples:
>>> m = NALU(20, 30)
>>> input = torch.randn(128, 20)
>>> output = m(input)
>>> print(output.size())
torch.Size([128, 30])
Check we can multiply using the multiplication cell:
>>> m = NALU(2, 1)
>>> _ = (m.multiplication_cell.weight_tanh.data.fill_(4), m.multiplication_cell.weight_sigma.data.fill_(4),
... m.gate_weights.data.fill_(-4))
>>> m.multiplication_cell.weight
tensor([[0.9814, 0.9814]], grad_fn=<ThMulBackward>)
>>> input = torch.Tensor([[0, 10], [2, 5], [-1, 4]])
>>> output = m(input)
>>> torch.round(output)
tensor([[ 0.],
[10.],
[ 4.]], grad_fn=<RoundBackward>)
"""
def __init__(self, in_features, out_features):
super(NALUNew, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.addition_cell = NAC(in_features=in_features, out_features=
out_features)
self.multiplication_cell = NAC(in_features=in_features,
out_features=out_features)
self.gate_weights = Parameter(torch.Tensor(out_features, in_features))
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.out_features)
self.gate_weights.data.uniform_(-stdv, stdv)
self.addition_cell.reset_parameters()
self.multiplication_cell.reset_parameters()
def extra_repr(self):
return 'in_features={}, out_features={}'.format(self.in_features,
self.out_features)
def forward(self, input_0):
primals_1 = self.gate_weights
primals_2 = self.addition_cell.weight_tanh
primals_4 = self.addition_cell.weight_sigma
primals_5 = self.multiplication_cell.weight_tanh
primals_6 = self.multiplication_cell.weight_sigma
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
tanbur/pytorch-nalu
|
NALU
| false
| 10,847
|
[
"MIT"
] | 0
|
91cb036230144b166137a8f3533850f2d4123d4f
|
https://github.com/tanbur/pytorch-nalu/tree/91cb036230144b166137a8f3533850f2d4123d4f
|
TripletLoss
|
import torch
from torch.nn.modules.distance import PairwiseDistance
class TripletLoss(torch.nn.Module):
def __init__(self, margin):
super(TripletLoss, self).__init__()
self.margin = margin
self.pdist = PairwiseDistance(2)
def forward(self, anchor, positive, negative):
pos_dist = self.pdist.forward(anchor, positive)
neg_dist = self.pdist.forward(anchor, negative)
hinge_dist = torch.clamp(self.margin + pos_dist - neg_dist, min=0.0)
loss = torch.mean(hinge_dist)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'margin': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch.nn.modules.distance import PairwiseDistance
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_clamp_mean_norm_sub_0(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr2 + 4 * r0, None, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr2 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp36 = tl.load(in_ptr2 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp41 = tl.load(in_ptr2 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = 1e-06
tmp4 = tmp2 + tmp3
tmp5 = tmp4 * tmp4
tmp8 = tmp6 - tmp7
tmp9 = tmp8 + tmp3
tmp10 = tmp9 * tmp9
tmp11 = tmp5 + tmp10
tmp14 = tmp12 - tmp13
tmp15 = tmp14 + tmp3
tmp16 = tmp15 * tmp15
tmp17 = tmp11 + tmp16
tmp20 = tmp18 - tmp19
tmp21 = tmp20 + tmp3
tmp22 = tmp21 * tmp21
tmp23 = tmp17 + tmp22
tmp24 = libdevice.sqrt(tmp23)
tmp25 = 4.0
tmp26 = tmp24 + tmp25
tmp28 = tmp0 - tmp27
tmp29 = tmp28 + tmp3
tmp30 = tmp29 * tmp29
tmp32 = tmp6 - tmp31
tmp33 = tmp32 + tmp3
tmp34 = tmp33 * tmp33
tmp35 = tmp30 + tmp34
tmp37 = tmp12 - tmp36
tmp38 = tmp37 + tmp3
tmp39 = tmp38 * tmp38
tmp40 = tmp35 + tmp39
tmp42 = tmp18 - tmp41
tmp43 = tmp42 + tmp3
tmp44 = tmp43 * tmp43
tmp45 = tmp40 + tmp44
tmp46 = libdevice.sqrt(tmp45)
tmp47 = tmp26 - tmp46
tmp48 = 0.0
tmp49 = triton_helpers.maximum(tmp47, tmp48)
tmp50 = tl.broadcast_to(tmp49, [XBLOCK, RBLOCK])
tmp52 = tl.sum(tmp50, 1)[:, None]
tmp53 = 64.0
tmp54 = tmp52 / tmp53
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp54, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_add_clamp_mean_norm_sub_0[grid(1)](buf2, arg1_1,
arg0_1, arg2_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf2,
class TripletLossNew(torch.nn.Module):
def __init__(self, margin):
super(TripletLossNew, self).__init__()
self.margin = margin
self.pdist = PairwiseDistance(2)
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
tobysuwindra/Bird-Similarity
|
TripletLoss
| false
| 10,848
|
[
"MIT"
] | 0
|
92f182fe89645f6ce6dd4e99f12c1185f52d5d9e
|
https://github.com/tobysuwindra/Bird-Similarity/tree/92f182fe89645f6ce6dd4e99f12c1185f52d5d9e
|
Net
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self, n_states, n_actions, n_hidden):
super(Net, self).__init__()
self.fc1 = nn.Linear(n_states, n_hidden)
self.fc2 = nn.Linear(n_hidden, n_hidden * 2)
self.fc3 = nn.Linear(n_hidden * 2, n_hidden)
self.out = nn.Linear(n_hidden, n_actions)
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
x = F.relu(x)
Q = self.out(x)
return Q
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_states': 4, 'n_actions': 4, 'n_hidden': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (8, 4), (4, 1))
assert_size_stride(primals_5, (8,), (1,))
assert_size_stride(primals_6, (4, 8), (8, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf9, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 8), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 8), (128, 32, 8, 1), 0)
del buf2
buf8 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(512)](buf3,
primals_5, buf8, 512, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 8), (8, 1), 0),
reinterpret_tensor(primals_6, (8, 4), (1, 8), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf5,
primals_7, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf6)
del primals_9
return reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(
buf3, (64, 8), (8, 1), 0), reinterpret_tensor(buf5, (64, 4), (4, 1), 0
), primals_8, buf7, primals_6, buf8, primals_4, buf9
class NetNew(nn.Module):
def __init__(self, n_states, n_actions, n_hidden):
super(NetNew, self).__init__()
self.fc1 = nn.Linear(n_states, n_hidden)
self.fc2 = nn.Linear(n_hidden, n_hidden * 2)
self.fc3 = nn.Linear(n_hidden * 2, n_hidden)
self.out = nn.Linear(n_hidden, n_actions)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_8 = self.out.weight
primals_9 = self.out.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
tom99763/implement-DQN-on-maze-game
|
Net
| false
| 10,849
|
[
"BSD-2-Clause"
] | 0
|
24135a06e348b6f8b88a22c58b4a2c930bf7d7b6
|
https://github.com/tom99763/implement-DQN-on-maze-game/tree/24135a06e348b6f8b88a22c58b4a2c930bf7d7b6
|
SimpleNN
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class SimpleNN(nn.Module):
def __init__(self, in_values, out_values):
super().__init__()
self.dense1 = nn.Linear(in_values, 12673)
self.drop1 = nn.Dropout()
self.dense2 = nn.Linear(12673, 4000)
self.drop2 = nn.Dropout()
self.dense3 = nn.Linear(4000, 500)
self.drop3 = nn.Dropout()
self.last_dense = nn.Linear(500, out_values)
def forward(self, x):
x = F.relu(self.dense1(x))
x = self.drop1(x)
x = F.relu(self.dense2(x))
x = self.drop2(x)
x = F.relu(self.dense3(x))
x = self.drop3(x)
x = self.last_dense(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_values': 4, 'out_values': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 811072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 12673
x1 = xindex // 12673
tmp0 = tl.load(in_out_ptr0 + (x0 + 12704 * x1), xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x0 + 12704 * x1), tmp4, xmask)
tl.store(out_ptr0 + (x0 + 12800 * x1), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 4000
x1 = xindex // 4000
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + (x0 + 4096 * x1), tmp6, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 32000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 500
x2 = xindex // 2000
x3 = xindex % 2000
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3 + 2016 * x2), tmp4, xmask)
tl.store(out_ptr1 + (x3 + 2048 * x2), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_view_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 500
x1 = xindex // 500
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 500 * (x1 % 4) + 2016 * (x1 // 4)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (12673, 4), (4, 1))
assert_size_stride(primals_2, (12673,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4000, 12673), (12673, 1))
assert_size_stride(primals_5, (4000,), (1,))
assert_size_stride(primals_6, (500, 4000), (4000, 1))
assert_size_stride(primals_7, (500,), (1,))
assert_size_stride(primals_8, (4, 500), (500, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 12673), (12704, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 12673), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 12673), (203264, 50816,
12704, 1), 0)
del buf0
buf10 = empty_strided_cuda((4, 4, 4, 12673), (204800, 51200, 12800,
1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(811072)](buf1,
primals_2, buf10, 811072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4000), (4000, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 12673), (12704, 1),
0), reinterpret_tensor(primals_4, (12673, 4000), (1, 12673), 0),
out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4000), (64000, 16000,
4000, 1), 0)
del buf2
buf9 = empty_strided_cuda((4, 4, 4, 4000), (65536, 16384, 4096, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(256000)](buf3,
primals_5, buf9, 256000, XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 500), (500, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4000), (4000, 1), 0
), reinterpret_tensor(primals_6, (4000, 500), (1, 4000), 0),
out=buf4)
buf5 = empty_strided_cuda((4, 4, 4, 500), (8064, 2016, 500, 1),
torch.float32)
buf8 = empty_strided_cuda((4, 4, 4, 500), (8192, 2048, 500, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(32000)](buf4,
primals_7, buf5, buf8, 32000, XBLOCK=256, num_warps=4, num_stages=1
)
del primals_7
buf6 = buf4
del buf4
triton_poi_fused_relu_view_3[grid(32000)](buf5, buf6, 32000, XBLOCK
=256, num_warps=4, num_stages=1)
del buf5
buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, buf6, reinterpret_tensor(primals_8,
(500, 4), (1, 500), 0), alpha=1, beta=1, out=buf7)
del primals_9
return reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 12673), (12704, 1), 0
), reinterpret_tensor(buf3, (64, 4000), (4000, 1), 0
), buf6, primals_8, buf8, primals_6, buf9, primals_4, buf10
class SimpleNNNew(nn.Module):
def __init__(self, in_values, out_values):
super().__init__()
self.dense1 = nn.Linear(in_values, 12673)
self.drop1 = nn.Dropout()
self.dense2 = nn.Linear(12673, 4000)
self.drop2 = nn.Dropout()
self.dense3 = nn.Linear(4000, 500)
self.drop3 = nn.Dropout()
self.last_dense = nn.Linear(500, out_values)
def forward(self, input_0):
primals_1 = self.dense1.weight
primals_2 = self.dense1.bias
primals_4 = self.dense2.weight
primals_5 = self.dense2.bias
primals_6 = self.dense3.weight
primals_7 = self.dense3.bias
primals_8 = self.last_dense.weight
primals_9 = self.last_dense.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
sboomi/cp1
|
SimpleNN
| false
| 10,850
|
[
"MIT"
] | 0
|
7f7aa96e8ba9cfe00802028a61bfba5e90c999f6
|
https://github.com/sboomi/cp1/tree/7f7aa96e8ba9cfe00802028a61bfba5e90c999f6
|
PositionwiseFeedForward
|
import math
import torch
from torch import nn
class GELU(nn.Module):
def forward(self, x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x +
0.044715 * torch.pow(x, 3))))
class PositionwiseFeedForward(nn.Module):
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
self.activation = GELU()
def forward(self, x):
return self.w_2(self.dropout(self.activation(self.w_1(x))))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'd_ff': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_pow_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tmp0 * tmp0
tmp4 = tmp3 * tmp0
tmp5 = 0.044715
tmp6 = tmp4 * tmp5
tmp7 = tmp0 + tmp6
tmp8 = 0.7978845608028654
tmp9 = tmp7 * tmp8
tmp10 = libdevice.tanh(tmp9)
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp2 * tmp12
tl.store(out_ptr0 + x0, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_pow_tanh_0[grid(256)](buf0, buf1, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4
class GELU(nn.Module):
def forward(self, x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x +
0.044715 * torch.pow(x, 3))))
class PositionwiseFeedForwardNew(nn.Module):
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForwardNew, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
self.activation = GELU()
def forward(self, input_0):
primals_1 = self.w_1.weight
primals_2 = self.w_1.bias
primals_4 = self.w_2.weight
primals_5 = self.w_2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
tnat410/smiles-transformer
|
PositionwiseFeedForward
| false
| 10,851
|
[
"MIT"
] | 0
|
e64196945ed44cfce529484bcc8b6c77b662cdc8
|
https://github.com/tnat410/smiles-transformer/tree/e64196945ed44cfce529484bcc8b6c77b662cdc8
|
FFN
|
import torch
import torch.nn as nn
import torch.utils.data
class Conv(nn.Module):
"""
Convolution Module
"""
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=0, dilation=1, bias=True, w_init='linear'):
"""
:param in_channels: dimension of input
:param out_channels: dimension of output
:param kernel_size: size of kernel
:param stride: size of stride
:param padding: size of padding
:param dilation: dilation rate
:param bias: boolean. if True, bias is included.
:param w_init: str. weight inits with xavier initialization.
"""
super(Conv, self).__init__()
self.conv = nn.Conv1d(in_channels, out_channels, kernel_size=
kernel_size, stride=stride, padding=padding, dilation=dilation,
bias=bias)
nn.init.xavier_uniform_(self.conv.weight, gain=nn.init.
calculate_gain(w_init))
def forward(self, x):
x = self.conv(x)
return x
class FFN(nn.Module):
"""
Positionwise Feed-Forward Network
"""
def __init__(self, num_hidden):
"""
:param num_hidden: dimension of hidden
"""
super(FFN, self).__init__()
self.w_1 = Conv(num_hidden, num_hidden * 4, kernel_size=3, padding=
1, w_init='relu')
self.w_2 = Conv(num_hidden * 4, num_hidden, kernel_size=3, padding=1)
self.dropout = nn.Dropout(p=0.1)
self.layer_norm = nn.LayerNorm(num_hidden)
def forward(self, input_):
x = input_
x = self.w_2(torch.relu(self.w_1(x)))
x = x + input_
x = self.dropout(x)
x = self.layer_norm(x)
return x
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'num_hidden': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_3(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (16, 4, 3), (12, 3, 1))
assert_size_stride(primals_3, (16,), (1,))
assert_size_stride(primals_4, (4, 16, 3), (48, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1,
4, 4), (16, 4, 1), 0), primals_2, stride=(1,), padding=(1,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf0, (1, 16, 4), (64, 4, 1))
buf1 = reinterpret_tensor(buf0, (16, 4), (4, 1), 0)
del buf0
buf7 = empty_strided_cuda((16, 4), (4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(64)](buf1,
primals_3, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (1, 16,
4), (0, 4, 1), 0), primals_4, stride=(1,), padding=(1,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf2, (1, 4, 4), (16, 4, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_1[grid(16)](buf3, primals_5, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf5 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused_add_native_layer_norm_2[grid(4)](buf3, primals_1,
buf4, buf5, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_3[grid(16)](buf3, primals_1,
buf4, buf5, primals_6, primals_7, buf6, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf4
del buf5
del primals_7
return (buf6, primals_1, primals_2, primals_4, primals_6,
reinterpret_tensor(buf1, (1, 16, 4), (64, 4, 1), 0), buf3, buf7)
class Conv(nn.Module):
"""
Convolution Module
"""
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=0, dilation=1, bias=True, w_init='linear'):
"""
:param in_channels: dimension of input
:param out_channels: dimension of output
:param kernel_size: size of kernel
:param stride: size of stride
:param padding: size of padding
:param dilation: dilation rate
:param bias: boolean. if True, bias is included.
:param w_init: str. weight inits with xavier initialization.
"""
super(Conv, self).__init__()
self.conv = nn.Conv1d(in_channels, out_channels, kernel_size=
kernel_size, stride=stride, padding=padding, dilation=dilation,
bias=bias)
nn.init.xavier_uniform_(self.conv.weight, gain=nn.init.
calculate_gain(w_init))
def forward(self, x):
x = self.conv(x)
return x
class FFNNew(nn.Module):
"""
Positionwise Feed-Forward Network
"""
def __init__(self, num_hidden):
"""
:param num_hidden: dimension of hidden
"""
super(FFNNew, self).__init__()
self.w_1 = Conv(num_hidden, num_hidden * 4, kernel_size=3, padding=
1, w_init='relu')
self.w_2 = Conv(num_hidden * 4, num_hidden, kernel_size=3, padding=1)
self.dropout = nn.Dropout(p=0.1)
self.layer_norm = nn.LayerNorm(num_hidden)
def forward(self, input_0):
primals_2 = self.w_1.conv.weight
primals_3 = self.w_1.conv.bias
primals_4 = self.w_2.conv.weight
primals_5 = self.w_2.conv.bias
primals_6 = self.layer_norm.weight
primals_7 = self.layer_norm.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
stefantaubert/FastSpeech
|
FFN
| false
| 10,852
|
[
"MIT"
] | 0
|
4ef8ce2ff8f6a69f9b52ef9bd5b37f8e2783c17e
|
https://github.com/stefantaubert/FastSpeech/tree/4ef8ce2ff8f6a69f9b52ef9bd5b37f8e2783c17e
|
DiceBCELoss
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class DiceLoss(nn.Module):
def __init__(self, smooth: 'float'=1.0, apply_sigmoid: 'bool'=False):
super().__init__()
self.smooth = smooth
self.apply_sigmoid = apply_sigmoid
def forward(self, y_pred: 'torch.Tensor', y_true: 'torch.Tensor'
) ->torch.Tensor:
assert y_pred.size() == y_true.size()
if self.apply_sigmoid:
y_pred = torch.sigmoid(y_pred)
y_pred = y_pred.contiguous().view(-1)
y_true = y_true.contiguous().view(-1)
intersection = (y_pred * y_true).sum()
dsc = (2.0 * intersection + self.smooth) / (y_pred.sum() + y_true.
sum() + self.smooth)
return 1.0 - dsc
class DiceBCELoss(nn.Module):
def __init__(self, smooth: 'float'=1.0, apply_sigmoid: 'bool'=False):
super().__init__()
self.dice_loss = DiceLoss(smooth)
self.apply_sigmoid = apply_sigmoid
def forward(self, y_pred: 'torch.Tensor', y_true: 'torch.Tensor'
) ->torch.Tensor:
if self.apply_sigmoid:
y_pred = torch.sigmoid(y_pred)
dl = self.dice_loss(y_pred, y_true)
bce = F.binary_cross_entropy(y_pred, y_true, reduction='mean')
return dl + bce
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_binary_cross_entropy_div_mul_rsub_sum_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp4 = tl.load(in_ptr1 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0))
tmp5 = tmp4 * tmp0
tmp6 = tl.broadcast_to(tmp5, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = 1.0
tmp10 = tmp0 - tmp9
tmp11 = -tmp4
tmp12 = libdevice.log1p(tmp11)
tmp13 = -100.0
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp10 * tmp14
tmp16 = tl_math.log(tmp4)
tmp17 = triton_helpers.maximum(tmp16, tmp13)
tmp18 = tmp0 * tmp17
tmp19 = tmp15 - tmp18
tmp20 = tl.broadcast_to(tmp19, [RBLOCK])
tmp22 = triton_helpers.promote_to_tensor(tl.sum(tmp20, 0))
tmp23 = tl.broadcast_to(tmp4, [RBLOCK])
tmp25 = triton_helpers.promote_to_tensor(tl.sum(tmp23, 0))
tmp26 = 2.0
tmp27 = tmp8 * tmp26
tmp28 = tmp27 + tmp9
tmp29 = tmp25 + tmp3
tmp30 = tmp29 + tmp9
tmp31 = tmp28 / tmp30
tmp32 = tmp9 - tmp31
tmp33 = 256.0
tmp34 = tmp22 / tmp33
tmp35 = tmp32 + tmp34
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp35, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf4 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_binary_cross_entropy_div_mul_rsub_sum_0[grid(1)](
buf4, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf4,
class DiceLoss(nn.Module):
def __init__(self, smooth: 'float'=1.0, apply_sigmoid: 'bool'=False):
super().__init__()
self.smooth = smooth
self.apply_sigmoid = apply_sigmoid
def forward(self, y_pred: 'torch.Tensor', y_true: 'torch.Tensor'
) ->torch.Tensor:
assert y_pred.size() == y_true.size()
if self.apply_sigmoid:
y_pred = torch.sigmoid(y_pred)
y_pred = y_pred.contiguous().view(-1)
y_true = y_true.contiguous().view(-1)
intersection = (y_pred * y_true).sum()
dsc = (2.0 * intersection + self.smooth) / (y_pred.sum() + y_true.
sum() + self.smooth)
return 1.0 - dsc
class DiceBCELossNew(nn.Module):
def __init__(self, smooth: 'float'=1.0, apply_sigmoid: 'bool'=False):
super().__init__()
self.dice_loss = DiceLoss(smooth)
self.apply_sigmoid = apply_sigmoid
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
tfmoraes/deep_heart_torch
|
DiceBCELoss
| false
| 10,853
|
[
"MIT"
] | 0
|
4168ce01d600e69baf82c752a3e57af86861b6ea
|
https://github.com/tfmoraes/deep_heart_torch/tree/4168ce01d600e69baf82c752a3e57af86861b6ea
|
WorldNet
|
import torch
class WorldNet(torch.nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim):
super(WorldNet, self).__init__()
self.fc_in = torch.nn.Linear(input_dim, hidden_dim)
self.fc_1 = torch.nn.Linear(hidden_dim, hidden_dim)
self.fc_2 = torch.nn.Linear(hidden_dim, hidden_dim)
self.fc_3 = torch.nn.Linear(hidden_dim, hidden_dim)
self.fc_out = torch.nn.Linear(hidden_dim, output_dim * 2)
den = 2 * torch.tensor(input_dim).float().sqrt()
torch.nn.init.normal_(self.fc_in.weight, std=1.0 / den)
torch.nn.init.normal_(self.fc_1.weight, std=1.0 / den)
torch.nn.init.normal_(self.fc_2.weight, std=1.0 / den)
torch.nn.init.normal_(self.fc_3.weight, std=1.0 / den)
torch.nn.init.normal_(self.fc_out.weight, std=1.0 / den)
def forward(self, x):
out = self.swish(self.fc_in(x))
out = self.swish(self.fc_1(out))
out = self.swish(self.fc_2(out))
out = self.swish(self.fc_3(out))
out = self.fc_out(out)
return out
def swish(self, x):
return x * torch.sigmoid(x)
def get_decays(self):
decays = 2.5e-05 * (self.fc_in.weight ** 2).sum() / 2.0
decays += 5e-05 * (self.fc_1.weight ** 2).sum() / 2.0
decays += 7.5e-05 * (self.fc_2.weight ** 2).sum() / 2.0
decays += 7.5e-05 * (self.fc_3.weight ** 2).sum() / 2.0
decays += 0.0001 * (self.fc_out.weight ** 2).sum() / 2.0
return decays
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'hidden_dim': 4, 'output_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (8, 4), (4, 1))
assert_size_stride(primals_11, (8,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0[grid(256)](buf0, buf1, 256, XBLOCK=
256, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sigmoid_0[grid(256)](buf2, buf3, 256, XBLOCK=
256, num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sigmoid_0[grid(256)](buf4, buf5, 256, XBLOCK=
256, num_warps=4, num_stages=1)
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf6)
del primals_9
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sigmoid_0[grid(256)](buf6, buf7, 256, XBLOCK=
256, num_warps=4, num_stages=1)
buf8 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
extern_kernels.addmm(primals_11, reinterpret_tensor(buf7, (64, 4),
(4, 1), 0), reinterpret_tensor(primals_10, (4, 8), (1, 4), 0),
alpha=1, beta=1, out=buf8)
del primals_11
return reinterpret_tensor(buf8, (4, 4, 4, 8), (128, 32, 8, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0
), buf2, reinterpret_tensor(buf3, (64, 4), (4, 1), 0
), buf4, reinterpret_tensor(buf5, (64, 4), (4, 1), 0
), buf6, reinterpret_tensor(buf7, (64, 4), (4, 1), 0
), primals_10, primals_8, primals_6, primals_4
class WorldNetNew(torch.nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim):
super(WorldNetNew, self).__init__()
self.fc_in = torch.nn.Linear(input_dim, hidden_dim)
self.fc_1 = torch.nn.Linear(hidden_dim, hidden_dim)
self.fc_2 = torch.nn.Linear(hidden_dim, hidden_dim)
self.fc_3 = torch.nn.Linear(hidden_dim, hidden_dim)
self.fc_out = torch.nn.Linear(hidden_dim, output_dim * 2)
den = 2 * torch.tensor(input_dim).float().sqrt()
torch.nn.init.normal_(self.fc_in.weight, std=1.0 / den)
torch.nn.init.normal_(self.fc_1.weight, std=1.0 / den)
torch.nn.init.normal_(self.fc_2.weight, std=1.0 / den)
torch.nn.init.normal_(self.fc_3.weight, std=1.0 / den)
torch.nn.init.normal_(self.fc_out.weight, std=1.0 / den)
def swish(self, x):
return x * torch.sigmoid(x)
def get_decays(self):
decays = 2.5e-05 * (self.fc_in.weight ** 2).sum() / 2.0
decays += 5e-05 * (self.fc_1.weight ** 2).sum() / 2.0
decays += 7.5e-05 * (self.fc_2.weight ** 2).sum() / 2.0
decays += 7.5e-05 * (self.fc_3.weight ** 2).sum() / 2.0
decays += 0.0001 * (self.fc_out.weight ** 2).sum() / 2.0
return decays
def forward(self, input_0):
primals_1 = self.fc_in.weight
primals_2 = self.fc_in.bias
primals_4 = self.fc_1.weight
primals_5 = self.fc_1.bias
primals_6 = self.fc_2.weight
primals_7 = self.fc_2.bias
primals_8 = self.fc_3.weight
primals_9 = self.fc_3.bias
primals_10 = self.fc_out.weight
primals_11 = self.fc_out.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
tim-ts-chu/mbpo
|
WorldNet
| false
| 10,854
|
[
"MIT"
] | 0
|
0d98e6e80499a82812d3361658e0707c0b489fc5
|
https://github.com/tim-ts-chu/mbpo/tree/0d98e6e80499a82812d3361658e0707c0b489fc5
|
ConvLayer
|
import torch
import torch.nn as nn
class ConvLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, norm
='instance'):
super(ConvLayer, self).__init__()
padding_size = kernel_size // 2
self.reflection_pad = nn.ReflectionPad2d(padding_size)
self.conv_layer = nn.Conv2d(in_channels, out_channels, kernel_size,
stride)
self.norm_type = norm
if norm == 'instance':
self.norm_layer = nn.InstanceNorm2d(out_channels, affine=True)
elif norm == 'batch':
self.norm_layer = nn.BatchNorm2d(out_channels, affine=True)
def forward(self, x):
x = self.reflection_pad(x)
x = self.conv_layer(x)
if self.norm_type == 'None':
out = x
else:
out = self.norm_layer(x)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4,
'stride': 1}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8 % 8
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-2 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-2 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_repeat_1(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3, out_ptr4,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
rnumel = 25
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
x0 = xindex
r3 = rindex
x1 = xindex % 4
tmp0 = tl.load(in_ptr0 + x0 % 4, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_out_ptr0 + (r3 + 25 * x0), rmask & xmask, other=0.0)
tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr2 + x0 % 4, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tl.where(rmask & xmask, tmp4, 0)
tmp7 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl.full([XBLOCK, 1], 25, tl.int32)
tmp12 = tmp11.to(tl.float32)
tmp13 = tmp10 / tmp12
tmp14 = tmp4 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.where(rmask & xmask, tmp16, 0)
tmp19 = tl.sum(tmp18, 1)[:, None]
tmp20 = tmp3 - tmp13
tmp21 = 25.0
tmp22 = tmp19 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.rsqrt(tmp24)
tmp26 = tmp20 * tmp25
tmp27 = tmp26 * tmp0
tmp29 = tmp27 + tmp28
tl.store(out_ptr0 + x0, tmp0, xmask)
tl.store(in_out_ptr0 + (r3 + 25 * x0), tmp3, rmask & xmask)
tl.store(out_ptr3 + (r3 + 25 * x0), tmp29, rmask & xmask)
tl.store(out_ptr4 + x0, tmp25, xmask)
tl.store(out_ptr1 + x0, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0[grid(1024)](primals_1, buf0,
1024, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 5, 5), (100, 25, 5, 1))
buf3 = empty_strided_cuda((16,), (1,), torch.float32)
buf2 = buf1
del buf1
buf4 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf8 = empty_strided_cuda((1, 16, 5, 5), (400, 25, 5, 1), torch.float32
)
buf7 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
triton_per_fused__native_batch_norm_legit_convolution_repeat_1[grid(16)
](buf2, primals_4, primals_3, primals_5, buf3, buf4, buf8, buf7,
16, 25, XBLOCK=8, num_warps=2, num_stages=1)
del primals_3
del primals_4
del primals_5
return reinterpret_tensor(buf8, (4, 4, 5, 5), (100, 25, 5, 1), 0
), primals_2, buf0, buf2, buf3, reinterpret_tensor(buf7, (16,), (1,), 0
), reinterpret_tensor(buf4, (1, 16, 1, 1), (16, 1, 1, 1), 0)
class ConvLayerNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, norm
='instance'):
super(ConvLayerNew, self).__init__()
padding_size = kernel_size // 2
self.reflection_pad = nn.ReflectionPad2d(padding_size)
self.conv_layer = nn.Conv2d(in_channels, out_channels, kernel_size,
stride)
self.norm_type = norm
if norm == 'instance':
self.norm_layer = nn.InstanceNorm2d(out_channels, affine=True)
elif norm == 'batch':
self.norm_layer = nn.BatchNorm2d(out_channels, affine=True)
def forward(self, input_0):
primals_1 = self.conv_layer.weight
primals_3 = self.conv_layer.bias
primals_4 = self.norm_layer.weight
primals_5 = self.norm_layer.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
suryawanshishantanu6/Multi-Style-Transfer
|
ConvLayer
| false
| 10,855
|
[
"MIT"
] | 0
|
c5c211847de676596580a8a9afda940ac76abbb1
|
https://github.com/suryawanshishantanu6/Multi-Style-Transfer/tree/c5c211847de676596580a8a9afda940ac76abbb1
|
DotProductAttention
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class DotProductAttention(nn.Module):
"""Dot product attention.
Given a set of vector values, and a vector query, attention is a technique
to compute a weighted sum of the values, dependent on the query.
NOTE: Here we use the terminology in Stanford cs224n-2018-lecture11.
"""
def __init__(self):
super(DotProductAttention, self).__init__()
def forward(self, queries, values):
"""
Args:
queries: N x To x H
values : N x Ti x H
Returns:
output: N x To x H
attention_distribution: N x To x Ti
"""
batch_size = queries.size(0)
input_lengths = values.size(1)
attention_scores = torch.bmm(queries, values.transpose(1, 2))
attention_distribution = F.softmax(attention_scores.view(-1,
input_lengths), dim=1).view(batch_size, -1, input_lengths)
attention_output = torch.bmm(attention_distribution, values)
return attention_output, attention_distribution
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(arg0_1, reinterpret_tensor(arg1_1, (4, 4, 4), (
16, 1, 4), 0), out=buf0)
del arg0_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf0, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf2 = reinterpret_tensor(buf0, (16, 4), (4, 1), 0)
del buf0
triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0)
del buf1
extern_kernels.bmm(reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1),
0), arg1_1, out=buf3)
del arg1_1
return buf3, reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0)
class DotProductAttentionNew(nn.Module):
"""Dot product attention.
Given a set of vector values, and a vector query, attention is a technique
to compute a weighted sum of the values, dependent on the query.
NOTE: Here we use the terminology in Stanford cs224n-2018-lecture11.
"""
def __init__(self):
super(DotProductAttentionNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0], output[1]
|
tompoek/Listen-Attend-Spell-v2
|
DotProductAttention
| false
| 10,856
|
[
"MIT"
] | 0
|
aa19543c9d23256a007d6e7a98d9cbc571e89f7f
|
https://github.com/tompoek/Listen-Attend-Spell-v2/tree/aa19543c9d23256a007d6e7a98d9cbc571e89f7f
|
DPRNNCell
|
import math
import torch
from torch import Tensor
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.parallel
from typing import Optional
class RNNLinear(nn.Linear):
"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`
This module is the same as a ``torch.nn.Linear``` layer, except that in the backward pass
the grad_samples get accumulated (instead of being concatenated as in the standard
nn.Linear).
When used with `PackedSequence`s, additional attribute `max_batch_len` is defined to determine
the size of per-sample grad tensor.
"""
max_batch_len: 'int'
def __init__(self, in_features: 'int', out_features: 'int', bias:
'bool'=True):
super().__init__(in_features, out_features, bias)
class DPRNNCellBase(nn.Module):
has_cell_state: 'bool' = False
def __init__(self, input_size: 'int', hidden_size: 'int', bias: 'bool',
num_chunks: 'int') ->None:
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = RNNLinear(input_size, num_chunks * hidden_size, bias)
self.hh = RNNLinear(hidden_size, num_chunks * hidden_size, bias)
self.reset_parameters()
def reset_parameters(self) ->None:
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
nn.init.uniform_(weight, -stdv, stdv)
def set_max_batch_length(self, max_batch_length: 'int') ->None:
self.ih.max_batch_len = max_batch_length
self.hh.max_batch_len = max_batch_length
class DPRNNCell(DPRNNCellBase):
"""An Elman RNN cell with tanh or ReLU non-linearity.
DP-friendly drop-in replacement of the ``torch.nn.RNNCell`` module to use in ``DPRNN``.
Refer to ``torch.nn.RNNCell`` documentation for the model description, parameters and inputs/outputs.
"""
def __init__(self, input_size: 'int', hidden_size: 'int', bias: 'bool',
nonlinearity: 'str'='tanh') ->None:
super().__init__(input_size, hidden_size, bias, num_chunks=1)
if nonlinearity not in ('tanh', 'relu'):
raise ValueError(f'Unsupported nonlinearity: {nonlinearity}')
self.nonlinearity = nonlinearity
def forward(self, input: 'Tensor', hx: 'Optional[Tensor]'=None,
batch_size_t: 'Optional[int]'=None) ->Tensor:
if hx is None:
hx = torch.zeros(input.shape[0], self.hidden_size, dtype=input.
dtype, device=input.device)
h_prev = hx
gates = self.ih(input) + self.hh(h_prev if batch_size_t is None else
h_prev[:batch_size_t, :])
if self.nonlinearity == 'tanh':
h_t = torch.tanh(gates)
elif self.nonlinearity == 'relu':
h_t = torch.relu(gates)
else:
raise RuntimeError(f'Unknown nonlinearity: {self.nonlinearity}')
return h_t
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4, 'bias': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_zeros_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_tanh_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x4 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp7 = libdevice.tanh(tmp6)
tl.store(in_out_ptr0 + x3, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_zeros_0[grid(16)](buf0, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_4, (4, 4), (1, 4
), 0), out=buf2)
del primals_4
buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
triton_poi_fused_add_tanh_1[grid(256)](buf3, primals_3, buf2,
primals_5, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf2
del primals_3
del primals_5
return buf3, buf0, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf3
class RNNLinear(nn.Linear):
"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`
This module is the same as a ``torch.nn.Linear``` layer, except that in the backward pass
the grad_samples get accumulated (instead of being concatenated as in the standard
nn.Linear).
When used with `PackedSequence`s, additional attribute `max_batch_len` is defined to determine
the size of per-sample grad tensor.
"""
max_batch_len: 'int'
def __init__(self, in_features: 'int', out_features: 'int', bias:
'bool'=True):
super().__init__(in_features, out_features, bias)
class DPRNNCellBase(nn.Module):
has_cell_state: 'bool' = False
def __init__(self, input_size: 'int', hidden_size: 'int', bias: 'bool',
num_chunks: 'int') ->None:
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = RNNLinear(input_size, num_chunks * hidden_size, bias)
self.hh = RNNLinear(hidden_size, num_chunks * hidden_size, bias)
self.reset_parameters()
def reset_parameters(self) ->None:
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
nn.init.uniform_(weight, -stdv, stdv)
def set_max_batch_length(self, max_batch_length: 'int') ->None:
self.ih.max_batch_len = max_batch_length
self.hh.max_batch_len = max_batch_length
class DPRNNCellNew(DPRNNCellBase):
"""An Elman RNN cell with tanh or ReLU non-linearity.
DP-friendly drop-in replacement of the ``torch.nn.RNNCell`` module to use in ``DPRNN``.
Refer to ``torch.nn.RNNCell`` documentation for the model description, parameters and inputs/outputs.
"""
def __init__(self, input_size: 'int', hidden_size: 'int', bias: 'bool',
nonlinearity: 'str'='tanh') ->None:
super().__init__(input_size, hidden_size, bias, num_chunks=1)
if nonlinearity not in ('tanh', 'relu'):
raise ValueError(f'Unsupported nonlinearity: {nonlinearity}')
self.nonlinearity = nonlinearity
def forward(self, input_0):
primals_2 = self.ih.weight
primals_3 = self.ih.bias
primals_4 = self.hh.weight
primals_5 = self.hh.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
romovpa/opacus
|
DPRNNCell
| false
| 10,857
|
[
"Apache-2.0"
] | 0
|
9cda8072e52049a06afba7ab524276bb6613a727
|
https://github.com/romovpa/opacus/tree/9cda8072e52049a06afba7ab524276bb6613a727
|
MonotonicMin
|
import torch
import torch.nn as nn
class MonotonicMin(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return torch.min(x, dim=1)[0].unsqueeze(1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_min_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp2 = triton_helpers.minimum(tmp0, tmp1)
tmp4 = triton_helpers.minimum(tmp2, tmp3)
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_min_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 1, 4, 4), (16, 16, 4, 1), 0),
class MonotonicMinNew(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
tiwalayo/monotonic-mlp
|
MonotonicMin
| false
| 10,858
|
[
"MIT"
] | 0
|
2f519797a753f7f297fac1365125c6da79f7b890
|
https://github.com/tiwalayo/monotonic-mlp/tree/2f519797a753f7f297fac1365125c6da79f7b890
|
DPGRUCell
|
import math
import torch
from torch import Tensor
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.parallel
from typing import Optional
class RNNLinear(nn.Linear):
"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`
This module is the same as a ``torch.nn.Linear``` layer, except that in the backward pass
the grad_samples get accumulated (instead of being concatenated as in the standard
nn.Linear).
When used with `PackedSequence`s, additional attribute `max_batch_len` is defined to determine
the size of per-sample grad tensor.
"""
max_batch_len: 'int'
def __init__(self, in_features: 'int', out_features: 'int', bias:
'bool'=True):
super().__init__(in_features, out_features, bias)
class DPRNNCellBase(nn.Module):
has_cell_state: 'bool' = False
def __init__(self, input_size: 'int', hidden_size: 'int', bias: 'bool',
num_chunks: 'int') ->None:
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = RNNLinear(input_size, num_chunks * hidden_size, bias)
self.hh = RNNLinear(hidden_size, num_chunks * hidden_size, bias)
self.reset_parameters()
def reset_parameters(self) ->None:
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
nn.init.uniform_(weight, -stdv, stdv)
def set_max_batch_length(self, max_batch_length: 'int') ->None:
self.ih.max_batch_len = max_batch_length
self.hh.max_batch_len = max_batch_length
class DPGRUCell(DPRNNCellBase):
"""A gated recurrent unit (GRU) cell
DP-friendly drop-in replacement of the ``torch.nn.GRUCell`` module to use in ``DPGRU``.
Refer to ``torch.nn.GRUCell`` documentation for the model description, parameters and inputs/outputs.
"""
def __init__(self, input_size: 'int', hidden_size: 'int', bias: 'bool'
) ->None:
super().__init__(input_size, hidden_size, bias, num_chunks=3)
def forward(self, input: 'Tensor', hx: 'Optional[Tensor]'=None,
batch_size_t: 'Optional[int]'=None) ->Tensor:
if hx is None:
hx = torch.zeros(input.shape[0], self.hidden_size, dtype=input.
dtype, device=input.device)
h_prev = hx if batch_size_t is None else hx[:batch_size_t, :]
gates_x = self.ih(input)
gates_h = self.hh(h_prev)
r_t_input_x, z_t_input_x, n_t_input_x = torch.split(gates_x, self.
hidden_size, 1)
r_t_input_h, z_t_input_h, n_t_input_h = torch.split(gates_h, self.
hidden_size, 1)
r_t = torch.sigmoid(r_t_input_x + r_t_input_h)
z_t = torch.sigmoid(z_t_input_x + z_t_input_h)
n_t = torch.tanh(n_t_input_x + r_t * n_t_input_h)
h_t = (1 - z_t) * n_t + z_t * h_prev
return h_t
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4, 'bias': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_zeros_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_tanh_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4 + x0 + 12 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (4 + x0 + 12 * x1), xmask)
tmp6 = tl.load(in_ptr0 + (x0 + 12 * x1), xmask)
tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (x0 + 12 * x1), xmask)
tmp12 = tl.load(in_ptr0 + (8 + x0 + 12 * x1), xmask)
tmp13 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr2 + (8 + x0 + 12 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.sigmoid(tmp4)
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tmp11 = tl.sigmoid(tmp10)
tmp14 = tmp12 + tmp13
tmp16 = tmp11 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = libdevice.tanh(tmp17)
tmp19 = 1.0
tmp20 = tmp19 - tmp5
tmp21 = tmp20 * tmp18
tmp22 = 0.0
tmp23 = tmp5 * tmp22
tmp24 = tmp21 + tmp23
tl.store(out_ptr0 + x2, tmp5, xmask)
tl.store(out_ptr1 + x2, tmp11, xmask)
tl.store(out_ptr2 + x2, tmp18, xmask)
tl.store(out_ptr3 + x2, tmp24, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (12,), (1,))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_zeros_0[grid(16)](buf0, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf1 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 12),
(1, 4), 0), out=buf1)
del primals_2
buf2 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
extern_kernels.addmm(primals_5, buf0, reinterpret_tensor(primals_4,
(4, 12), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_4
del primals_5
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_mul_rsub_sigmoid_tanh_1[grid(16)](buf1,
primals_3, buf2, buf4, buf3, buf5, buf6, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf1
del primals_3
return buf6, primals_1, buf0, reinterpret_tensor(buf2, (4, 4), (12, 1), 8
), buf3, buf4, buf5
class RNNLinear(nn.Linear):
"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`
This module is the same as a ``torch.nn.Linear``` layer, except that in the backward pass
the grad_samples get accumulated (instead of being concatenated as in the standard
nn.Linear).
When used with `PackedSequence`s, additional attribute `max_batch_len` is defined to determine
the size of per-sample grad tensor.
"""
max_batch_len: 'int'
def __init__(self, in_features: 'int', out_features: 'int', bias:
'bool'=True):
super().__init__(in_features, out_features, bias)
class DPRNNCellBase(nn.Module):
has_cell_state: 'bool' = False
def __init__(self, input_size: 'int', hidden_size: 'int', bias: 'bool',
num_chunks: 'int') ->None:
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = RNNLinear(input_size, num_chunks * hidden_size, bias)
self.hh = RNNLinear(hidden_size, num_chunks * hidden_size, bias)
self.reset_parameters()
def reset_parameters(self) ->None:
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
nn.init.uniform_(weight, -stdv, stdv)
def set_max_batch_length(self, max_batch_length: 'int') ->None:
self.ih.max_batch_len = max_batch_length
self.hh.max_batch_len = max_batch_length
class DPGRUCellNew(DPRNNCellBase):
"""A gated recurrent unit (GRU) cell
DP-friendly drop-in replacement of the ``torch.nn.GRUCell`` module to use in ``DPGRU``.
Refer to ``torch.nn.GRUCell`` documentation for the model description, parameters and inputs/outputs.
"""
def __init__(self, input_size: 'int', hidden_size: 'int', bias: 'bool'
) ->None:
super().__init__(input_size, hidden_size, bias, num_chunks=3)
def forward(self, input_0):
primals_2 = self.ih.weight
primals_3 = self.ih.bias
primals_4 = self.hh.weight
primals_5 = self.hh.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
romovpa/opacus
|
DPGRUCell
| false
| 10,859
|
[
"Apache-2.0"
] | 0
|
9cda8072e52049a06afba7ab524276bb6613a727
|
https://github.com/romovpa/opacus/tree/9cda8072e52049a06afba7ab524276bb6613a727
|
DPLSTMCell
|
import math
import torch
from torch import Tensor
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.parallel
from typing import Optional
from typing import Tuple
class RNNLinear(nn.Linear):
"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`
This module is the same as a ``torch.nn.Linear``` layer, except that in the backward pass
the grad_samples get accumulated (instead of being concatenated as in the standard
nn.Linear).
When used with `PackedSequence`s, additional attribute `max_batch_len` is defined to determine
the size of per-sample grad tensor.
"""
max_batch_len: 'int'
def __init__(self, in_features: 'int', out_features: 'int', bias:
'bool'=True):
super().__init__(in_features, out_features, bias)
class DPRNNCellBase(nn.Module):
has_cell_state: 'bool' = False
def __init__(self, input_size: 'int', hidden_size: 'int', bias: 'bool',
num_chunks: 'int') ->None:
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = RNNLinear(input_size, num_chunks * hidden_size, bias)
self.hh = RNNLinear(hidden_size, num_chunks * hidden_size, bias)
self.reset_parameters()
def reset_parameters(self) ->None:
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
nn.init.uniform_(weight, -stdv, stdv)
def set_max_batch_length(self, max_batch_length: 'int') ->None:
self.ih.max_batch_len = max_batch_length
self.hh.max_batch_len = max_batch_length
class DPLSTMCell(DPRNNCellBase):
"""A long short-term memory (LSTM) cell.
DP-friendly drop-in replacement of the ``torch.nn.LSTMCell`` module to use in ``DPLSTM``.
Refer to ``torch.nn.LSTMCell`` documentation for the model description, parameters and inputs/outputs.
"""
has_cell_state = True
def __init__(self, input_size: 'int', hidden_size: 'int', bias: 'bool'
) ->None:
super().__init__(input_size, hidden_size, bias, num_chunks=4)
def forward(self, input: 'Tensor', hx:
'Optional[Tuple[Tensor, Tensor]]'=None, batch_size_t:
'Optional[int]'=None) ->Tuple[Tensor, Tensor]:
if hx is None:
zeros = torch.zeros(input.shape[0], self.hidden_size, dtype=
input.dtype, device=input.device)
hx = zeros, zeros
h_prev, c_prev = hx
if batch_size_t is None:
gates = self.ih(input) + self.hh(h_prev)
else:
gates = self.ih(input) + self.hh(h_prev[:batch_size_t, :])
i_t_input, f_t_input, g_t_input, o_t_input = torch.split(gates,
self.hidden_size, 1)
i_t = torch.sigmoid(i_t_input)
f_t = torch.sigmoid(f_t_input)
g_t = torch.tanh(g_t_input)
o_t = torch.sigmoid(o_t_input)
if batch_size_t is None:
c_t = f_t * c_prev + i_t * g_t
else:
c_t = f_t * c_prev[:batch_size_t, :] + i_t * g_t
h_t = o_t * torch.tanh(c_t)
return h_t, c_t
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4, 'bias': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_zeros_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, out_ptr3,
out_ptr4, out_ptr5, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + 16 * x1), xmask)
tmp4 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp9 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr2 + (12 + x0 + 16 * x1), xmask)
tmp12 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp17 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr2 + (8 + x0 + 16 * x1), xmask)
tmp20 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp25 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr2 + (4 + x0 + 16 * x1), xmask)
tmp28 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp7 = tl.sigmoid(tmp6)
tmp10 = tmp8 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = tl.sigmoid(tmp14)
tmp18 = tmp16 + tmp17
tmp21 = tmp19 + tmp20
tmp22 = tmp18 + tmp21
tmp23 = libdevice.tanh(tmp22)
tmp26 = tmp24 + tmp25
tmp29 = tmp27 + tmp28
tmp30 = tmp26 + tmp29
tmp31 = tl.sigmoid(tmp30)
tmp32 = 0.0
tmp33 = tmp31 * tmp32
tmp34 = tmp7 * tmp23
tmp35 = tmp33 + tmp34
tmp36 = 1.0
tmp37 = tmp36 - tmp31
tmp38 = tmp31 * tmp37
tmp39 = libdevice.tanh(tmp35)
tmp40 = tmp15 * tmp39
tl.store(out_ptr0 + x2, tmp7, xmask)
tl.store(out_ptr1 + x2, tmp15, xmask)
tl.store(out_ptr2 + x2, tmp23, xmask)
tl.store(out_ptr3 + x2, tmp35, xmask)
tl.store(out_ptr4 + x2, tmp38, xmask)
tl.store(out_ptr5 + x2, tmp40, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (16, 4), (4, 1))
assert_size_stride(primals_3, (16,), (1,))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (16,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_zeros_0[grid(16)](buf0, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf1 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 16),
(1, 4), 0), out=buf1)
del primals_2
buf2 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_4, (4, 16), (1,
4), 0), out=buf2)
del primals_4
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1[grid(16)](buf1
, primals_3, buf2, primals_5, buf3, buf5, buf4, buf6, buf8,
buf7, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf1
del buf2
del primals_3
del primals_5
return buf7, buf6, primals_1, buf0, buf3, buf4, buf5, buf6, buf8
class RNNLinear(nn.Linear):
"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`
This module is the same as a ``torch.nn.Linear``` layer, except that in the backward pass
the grad_samples get accumulated (instead of being concatenated as in the standard
nn.Linear).
When used with `PackedSequence`s, additional attribute `max_batch_len` is defined to determine
the size of per-sample grad tensor.
"""
max_batch_len: 'int'
def __init__(self, in_features: 'int', out_features: 'int', bias:
'bool'=True):
super().__init__(in_features, out_features, bias)
class DPRNNCellBase(nn.Module):
has_cell_state: 'bool' = False
def __init__(self, input_size: 'int', hidden_size: 'int', bias: 'bool',
num_chunks: 'int') ->None:
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = RNNLinear(input_size, num_chunks * hidden_size, bias)
self.hh = RNNLinear(hidden_size, num_chunks * hidden_size, bias)
self.reset_parameters()
def reset_parameters(self) ->None:
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
nn.init.uniform_(weight, -stdv, stdv)
def set_max_batch_length(self, max_batch_length: 'int') ->None:
self.ih.max_batch_len = max_batch_length
self.hh.max_batch_len = max_batch_length
class DPLSTMCellNew(DPRNNCellBase):
"""A long short-term memory (LSTM) cell.
DP-friendly drop-in replacement of the ``torch.nn.LSTMCell`` module to use in ``DPLSTM``.
Refer to ``torch.nn.LSTMCell`` documentation for the model description, parameters and inputs/outputs.
"""
has_cell_state = True
def __init__(self, input_size: 'int', hidden_size: 'int', bias: 'bool'
) ->None:
super().__init__(input_size, hidden_size, bias, num_chunks=4)
def forward(self, input_0):
primals_2 = self.ih.weight
primals_3 = self.ih.bias
primals_4 = self.hh.weight
primals_5 = self.hh.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1]
|
romovpa/opacus
|
DPLSTMCell
| false
| 10,860
|
[
"Apache-2.0"
] | 0
|
9cda8072e52049a06afba7ab524276bb6613a727
|
https://github.com/romovpa/opacus/tree/9cda8072e52049a06afba7ab524276bb6613a727
|
FeatureAssembler
|
import torch
from typing import Optional
import torch.nn as nn
class FeatureAssembler(nn.Module):
def __init__(self, T: 'int', embed_static: 'Optional[FeatureEmbedder]'=
None, embed_dynamic: 'Optional[FeatureEmbedder]'=None) ->None:
super().__init__()
self.T = T
self.embeddings = nn.ModuleDict({'embed_static': embed_static,
'embed_dynamic': embed_dynamic})
def forward(self, feat_static_cat: 'torch.Tensor', feat_static_real:
'torch.Tensor', feat_dynamic_cat: 'torch.Tensor', feat_dynamic_real:
'torch.Tensor') ->torch.Tensor:
processed_features = [self.process_static_cat(feat_static_cat),
self.process_static_real(feat_static_real), self.
process_dynamic_cat(feat_dynamic_cat), self.
process_dynamic_real(feat_dynamic_real)]
return torch.cat(processed_features, dim=-1)
def process_static_cat(self, feature: 'torch.Tensor') ->torch.Tensor:
if self.embeddings['embed_static'] is not None:
feature = self.embeddings['embed_static'](feature)
return feature.unsqueeze(1).expand(-1, self.T, -1).float()
def process_dynamic_cat(self, feature: 'torch.Tensor') ->torch.Tensor:
if self.embeddings['embed_dynamic'] is None:
return feature.float()
else:
return self.embeddings['embed_dynamic'](feature)
def process_static_real(self, feature: 'torch.Tensor') ->torch.Tensor:
return feature.unsqueeze(1).expand(-1, self.T, -1)
def process_dynamic_real(self, feature: 'torch.Tensor') ->torch.Tensor:
return feature
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4, 4]),
torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'T': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from typing import Optional
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex // 64
x3 = xindex // 16
x4 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x2 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (4 * x2 + (-4 + x0)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + (4 * x3 + (-8 + x0)), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tl.full([1], 16, tl.int64)
tmp19 = tl.load(in_ptr3 + (4 * x3 + (-12 + x0)), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tl.store(out_ptr0 + x4, tmp22, xmask)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(256)](arg0_1, arg1_1, arg2_1, arg3_1,
buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
return buf0,
class FeatureAssemblerNew(nn.Module):
def __init__(self, T: 'int', embed_static: 'Optional[FeatureEmbedder]'=
None, embed_dynamic: 'Optional[FeatureEmbedder]'=None) ->None:
super().__init__()
self.T = T
self.embeddings = nn.ModuleDict({'embed_static': embed_static,
'embed_dynamic': embed_dynamic})
def process_static_cat(self, feature: 'torch.Tensor') ->torch.Tensor:
if self.embeddings['embed_static'] is not None:
feature = self.embeddings['embed_static'](feature)
return feature.unsqueeze(1).expand(-1, self.T, -1).float()
def process_dynamic_cat(self, feature: 'torch.Tensor') ->torch.Tensor:
if self.embeddings['embed_dynamic'] is None:
return feature.float()
else:
return self.embeddings['embed_dynamic'](feature)
def process_static_real(self, feature: 'torch.Tensor') ->torch.Tensor:
return feature.unsqueeze(1).expand(-1, self.T, -1)
def process_dynamic_real(self, feature: 'torch.Tensor') ->torch.Tensor:
return feature
def forward(self, input_0, input_1, input_2, input_3):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0]
|
ssmall41/pytorch-ts
|
FeatureAssembler
| false
| 10,861
|
[
"Apache-2.0",
"MIT"
] | 0
|
d0be718d443f8d676640b3aa75a7a154edad5dce
|
https://github.com/ssmall41/pytorch-ts/tree/d0be718d443f8d676640b3aa75a7a154edad5dce
|
ResidualLayer
|
import torch
import torch.nn as nn
class ConvLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, norm
='instance'):
super(ConvLayer, self).__init__()
padding_size = kernel_size // 2
self.reflection_pad = nn.ReflectionPad2d(padding_size)
self.conv_layer = nn.Conv2d(in_channels, out_channels, kernel_size,
stride)
self.norm_type = norm
if norm == 'instance':
self.norm_layer = nn.InstanceNorm2d(out_channels, affine=True)
elif norm == 'batch':
self.norm_layer = nn.BatchNorm2d(out_channels, affine=True)
def forward(self, x):
x = self.reflection_pad(x)
x = self.conv_layer(x)
if self.norm_type == 'None':
out = x
else:
out = self.norm_layer(x)
return out
class ResidualLayer(nn.Module):
def __init__(self, channels=128, kernel_size=3):
super(ResidualLayer, self).__init__()
self.conv1 = ConvLayer(channels, channels, kernel_size, stride=1)
self.relu = nn.ReLU()
self.conv2 = ConvLayer(channels, channels, kernel_size, stride=1)
def forward(self, x):
identity = x
out = self.relu(self.conv1(x))
out = self.conv2(out)
out = out + identity
return out
def get_inputs():
return [torch.rand([4, 128, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_reflection_pad2d_1(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 512
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex % 6
x3 = xindex // 6
y4 = yindex
x5 = xindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x2)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x3)) + 16 * y4),
xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + 128 * x5 + 4608 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, None)
@triton.jit
def triton_poi_fused_repeat_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0 % 128, xmask)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_4(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 512
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (128 * r1 + 2048 * (x0 // 128) + x0 % 128),
xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 16.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp21, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_poi_fused_reflection_pad2d_relu_5(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 128
x1 = xindex // 128 % 6
x2 = xindex // 768 % 6
x3 = xindex // 4608
x5 = xindex
tmp0 = tl.load(in_ptr0 + (1920 + x0 + -512 * tl_math.abs(-3 + tl_math.
abs(-1 + x2)) + -128 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) +
2048 * x3), None)
tmp1 = tl.load(in_ptr1 + (x0 + 128 * x3), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + 128 * x3), None, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr3 + (x0 + 128 * x3), None, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr4 + (x0 + 128 * x3), None, eviction_policy=
'evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tl.store(out_ptr0 + x5, tmp10, None)
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_repeat_6(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr3, out_ptr4, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 512
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
x0 = xindex
r1 = rindex
x2 = xindex % 128
x3 = xindex // 128
tmp0 = tl.load(in_ptr0 + x0 % 128, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (128 * r1 + 2048 * (x0 // 128) + x0 % 128),
xmask, other=0.0)
tmp23 = tl.load(in_ptr1 + (x2 + 128 * r1 + 2048 * x3), xmask, other=0.0)
tmp27 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr3 + (r1 + 16 * x0), xmask, other=0.0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tl.where(xmask, tmp2, 0)
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp5, 0)
tmp8 = tl.sum(tmp7, 1)[:, None]
tmp9 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp8 / tmp10
tmp12 = tmp2 - tmp11
tmp13 = tmp12 * tmp12
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = tl.where(xmask, tmp14, 0)
tmp17 = tl.sum(tmp16, 1)[:, None]
tmp18 = 16.0
tmp19 = tmp17 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp24 = tmp23 - tmp11
tmp25 = tmp24 * tmp22
tmp26 = tmp25 * tmp0
tmp28 = tmp26 + tmp27
tmp30 = tmp28 + tmp29
tl.store(out_ptr0 + x0, tmp0, xmask)
tl.store(out_ptr3 + x0, tmp22, xmask)
tl.store(out_ptr4 + (r1 + 16 * x0), tmp30, xmask)
tl.store(out_ptr1 + x0, tmp11, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 128, 4, 4), (2048, 16, 4, 1))
assert_size_stride(primals_2, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_3, (128,), (1,))
assert_size_stride(primals_4, (128,), (1,))
assert_size_stride(primals_5, (128,), (1,))
assert_size_stride(primals_6, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (128,), (1,))
assert_size_stride(primals_9, (128,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(16384, 9)](primals_2, buf0, 16384, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_0[grid(16384, 9)](primals_6, buf1, 16384, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf2 = empty_strided_cuda((4, 128, 6, 6), (4608, 1, 768, 128),
torch.float32)
triton_poi_fused_reflection_pad2d_1[grid(512, 36)](primals_1, buf2,
512, 36, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
buf3 = extern_kernels.convolution(buf2, buf0, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 128, 4, 4), (2048, 1, 512, 128))
buf4 = buf3
del buf3
triton_poi_fused_convolution_2[grid(8192)](buf4, primals_3, 8192,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf5 = empty_strided_cuda((512,), (1,), torch.float32)
triton_poi_fused_repeat_3[grid(512)](primals_4, buf5, 512, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_4
buf6 = empty_strided_cuda((512,), (1,), torch.float32)
triton_poi_fused_repeat_3[grid(512)](primals_5, buf6, 512, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_5
buf7 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch
.float32)
buf8 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch
.float32)
buf10 = buf8
del buf8
triton_per_fused__native_batch_norm_legit_4[grid(512)](buf10, buf4,
buf7, 512, 16, XBLOCK=8, num_warps=2, num_stages=1)
buf11 = empty_strided_cuda((4, 128, 6, 6), (4608, 1, 768, 128),
torch.float32)
triton_poi_fused_reflection_pad2d_relu_5[grid(18432)](buf4, buf7,
buf10, buf5, buf6, buf11, 18432, XBLOCK=256, num_warps=4,
num_stages=1)
buf12 = extern_kernels.convolution(buf11, buf1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 128, 4, 4), (2048, 1, 512, 128))
buf13 = buf12
del buf12
triton_poi_fused_convolution_2[grid(8192)](buf13, primals_7, 8192,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
buf14 = empty_strided_cuda((512,), (1,), torch.float32)
buf15 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
buf18 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
buf19 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.
float32)
triton_per_fused__native_batch_norm_legit_add_repeat_6[grid(512)](
primals_8, buf13, primals_9, primals_1, buf14, buf15, buf18,
buf19, 512, 16, XBLOCK=8, num_warps=2, num_stages=1)
del primals_1
del primals_8
del primals_9
return (buf19, buf0, buf1, buf2, buf4, buf5, buf6, buf7, buf10, buf11,
buf13, buf14, reinterpret_tensor(buf18, (512,), (1,), 0),
reinterpret_tensor(buf15, (1, 512, 1, 1), (512, 1, 1, 1), 0))
class ConvLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, norm
='instance'):
super(ConvLayer, self).__init__()
padding_size = kernel_size // 2
self.reflection_pad = nn.ReflectionPad2d(padding_size)
self.conv_layer = nn.Conv2d(in_channels, out_channels, kernel_size,
stride)
self.norm_type = norm
if norm == 'instance':
self.norm_layer = nn.InstanceNorm2d(out_channels, affine=True)
elif norm == 'batch':
self.norm_layer = nn.BatchNorm2d(out_channels, affine=True)
def forward(self, x):
x = self.reflection_pad(x)
x = self.conv_layer(x)
if self.norm_type == 'None':
out = x
else:
out = self.norm_layer(x)
return out
class ResidualLayerNew(nn.Module):
def __init__(self, channels=128, kernel_size=3):
super(ResidualLayerNew, self).__init__()
self.conv1 = ConvLayer(channels, channels, kernel_size, stride=1)
self.relu = nn.ReLU()
self.conv2 = ConvLayer(channels, channels, kernel_size, stride=1)
def forward(self, input_0):
primals_2 = self.conv1.conv_layer.weight
primals_3 = self.conv1.conv_layer.bias
primals_4 = self.conv1.norm_layer.weight
primals_5 = self.conv1.norm_layer.bias
primals_6 = self.conv2.conv_layer.weight
primals_7 = self.conv2.conv_layer.bias
primals_8 = self.conv2.norm_layer.weight
primals_9 = self.conv2.norm_layer.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
suryawanshishantanu6/Multi-Style-Transfer
|
ResidualLayer
| false
| 10,862
|
[
"MIT"
] | 0
|
c5c211847de676596580a8a9afda940ac76abbb1
|
https://github.com/suryawanshishantanu6/Multi-Style-Transfer/tree/c5c211847de676596580a8a9afda940ac76abbb1
|
MonotonicMax
|
import torch
import torch.nn as nn
class MonotonicMax(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return torch.cat(tuple(torch.max(i, dim=1)[0].unsqueeze(1) for i in
x), dim=1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp10 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tmp15 = tl.full([1], 2, tl.int64)
tmp16 = tmp0 < tmp15
tmp17 = tmp14 & tmp16
tmp18 = tl.load(in_ptr0 + (64 + x0 + 16 * x2), tmp17 & xmask,
eviction_policy='evict_last', other=0.0)
tmp19 = tl.load(in_ptr0 + (68 + x0 + 16 * x2), tmp17 & xmask,
eviction_policy='evict_last', other=0.0)
tmp20 = triton_helpers.maximum(tmp18, tmp19)
tmp21 = tl.load(in_ptr0 + (72 + x0 + 16 * x2), tmp17 & xmask,
eviction_policy='evict_last', other=0.0)
tmp22 = triton_helpers.maximum(tmp20, tmp21)
tmp23 = tl.load(in_ptr0 + (76 + x0 + 16 * x2), tmp17 & xmask,
eviction_policy='evict_last', other=0.0)
tmp24 = triton_helpers.maximum(tmp22, tmp23)
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp17, tmp24, tmp25)
tmp27 = tmp0 >= tmp15
tmp28 = tl.full([1], 3, tl.int64)
tmp29 = tmp0 < tmp28
tmp30 = tmp27 & tmp29
tmp31 = tl.load(in_ptr0 + (128 + x0 + 16 * x2), tmp30 & xmask,
eviction_policy='evict_last', other=0.0)
tmp32 = tl.load(in_ptr0 + (132 + x0 + 16 * x2), tmp30 & xmask,
eviction_policy='evict_last', other=0.0)
tmp33 = triton_helpers.maximum(tmp31, tmp32)
tmp34 = tl.load(in_ptr0 + (136 + x0 + 16 * x2), tmp30 & xmask,
eviction_policy='evict_last', other=0.0)
tmp35 = triton_helpers.maximum(tmp33, tmp34)
tmp36 = tl.load(in_ptr0 + (140 + x0 + 16 * x2), tmp30 & xmask,
eviction_policy='evict_last', other=0.0)
tmp37 = triton_helpers.maximum(tmp35, tmp36)
tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype)
tmp39 = tl.where(tmp30, tmp37, tmp38)
tmp40 = tmp0 >= tmp28
tl.full([1], 4, tl.int64)
tmp43 = tl.load(in_ptr0 + (192 + x0 + 16 * x2), tmp40 & xmask,
eviction_policy='evict_last', other=0.0)
tmp44 = tl.load(in_ptr0 + (196 + x0 + 16 * x2), tmp40 & xmask,
eviction_policy='evict_last', other=0.0)
tmp45 = triton_helpers.maximum(tmp43, tmp44)
tmp46 = tl.load(in_ptr0 + (200 + x0 + 16 * x2), tmp40 & xmask,
eviction_policy='evict_last', other=0.0)
tmp47 = triton_helpers.maximum(tmp45, tmp46)
tmp48 = tl.load(in_ptr0 + (204 + x0 + 16 * x2), tmp40 & xmask,
eviction_policy='evict_last', other=0.0)
tmp49 = triton_helpers.maximum(tmp47, tmp48)
tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype)
tmp51 = tl.where(tmp40, tmp49, tmp50)
tmp52 = tl.where(tmp30, tmp39, tmp51)
tmp53 = tl.where(tmp17, tmp26, tmp52)
tmp54 = tl.where(tmp4, tmp13, tmp53)
tl.store(out_ptr0 + x3, tmp54, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
class MonotonicMaxNew(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
tiwalayo/monotonic-mlp
|
MonotonicMax
| false
| 10,863
|
[
"MIT"
] | 0
|
2f519797a753f7f297fac1365125c6da79f7b890
|
https://github.com/tiwalayo/monotonic-mlp/tree/2f519797a753f7f297fac1365125c6da79f7b890
|
Encoder
|
import torch
import torch.utils.data
import torch.nn as nn
import torch.nn.functional as F
class Encoder(nn.Module):
""" VAE encoder """
def __init__(self, img_channels, latent_size):
super(Encoder, self).__init__()
self.latent_size = latent_size
self.img_channels = img_channels
self.conv1 = nn.Conv2d(img_channels, 32, 4, stride=2)
self.conv2 = nn.Conv2d(32, 64, 4, stride=2)
self.conv3 = nn.Conv2d(64, 128, 4, stride=2)
self.conv4 = nn.Conv2d(128, 256, 4, stride=2)
self.fc_mu = nn.Linear(2 * 2 * 256, latent_size)
self.fc_logsigma = nn.Linear(2 * 2 * 256, latent_size)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = x.view(x.size(0), -1)
mu = self.fc_mu(x)
logsigma = self.fc_logsigma(x)
return mu, logsigma
def get_inputs():
return [torch.rand([4, 4, 64, 64])]
def get_init_inputs():
return [[], {'img_channels': 4, 'latent_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 128
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 4 * x2 + 16384 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 32 * x2 + 512 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 64 * x2 + 1024 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 128 * x2 + 2048 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 123008
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 50176
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_8(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.
constexpr):
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 256
y1 = yindex // 256
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 256 * x2 + 1024 * y1), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask)
tl.store(out_ptr1 + (y0 + 256 * x2 + 1024 * y1), tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (32, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1))
assert_size_stride(primals_4, (64, 32, 4, 4), (512, 16, 4, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (128, 64, 4, 4), (1024, 16, 4, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (256, 128, 4, 4), (2048, 16, 4, 1))
assert_size_stride(primals_9, (256,), (1,))
assert_size_stride(primals_10, (4, 1024), (1024, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4, 1024), (1024, 1))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((32, 4, 4, 4), (64, 1, 16, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(128, 16)](primals_1, buf0, 128, 16, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4, 64, 64), (16384, 1, 256, 4), torch
.float32)
triton_poi_fused_1[grid(16, 4096)](primals_3, buf1, 16, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 32, 4, 4), (512, 1, 128, 32), torch.
float32)
triton_poi_fused_2[grid(2048, 16)](primals_4, buf2, 2048, 16,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((128, 64, 4, 4), (1024, 1, 256, 64),
torch.float32)
triton_poi_fused_3[grid(8192, 16)](primals_6, buf3, 8192, 16,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf4 = empty_strided_cuda((256, 128, 4, 4), (2048, 1, 512, 128),
torch.float32)
triton_poi_fused_4[grid(32768, 16)](primals_8, buf4, 32768, 16,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_8
buf5 = extern_kernels.convolution(buf1, buf0, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 32, 31, 31), (30752, 1, 992, 32))
buf6 = buf5
del buf5
triton_poi_fused_convolution_relu_5[grid(123008)](buf6, primals_2,
123008, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf7 = extern_kernels.convolution(buf6, buf2, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 64, 14, 14), (12544, 1, 896, 64))
buf8 = buf7
del buf7
triton_poi_fused_convolution_relu_6[grid(50176)](buf8, primals_5,
50176, XBLOCK=512, num_warps=4, num_stages=1)
del primals_5
buf9 = extern_kernels.convolution(buf8, buf3, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 128, 6, 6), (4608, 1, 768, 128))
buf10 = buf9
del buf9
triton_poi_fused_convolution_relu_7[grid(18432)](buf10, primals_7,
18432, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf11 = extern_kernels.convolution(buf10, buf4, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 256, 2, 2), (1024, 1, 512, 256))
buf12 = empty_strided_cuda((4, 256, 2, 2), (1024, 4, 2, 1), torch.
float32)
buf15 = empty_strided_cuda((4, 256, 2, 2), (1024, 1, 512, 256),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_8[grid(1024, 4)](
buf11, primals_9, buf12, buf15, 1024, 4, XBLOCK=4, YBLOCK=64,
num_warps=4, num_stages=1)
del buf11
del primals_9
buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_11, reinterpret_tensor(buf12, (4, 1024
), (1024, 1), 0), reinterpret_tensor(primals_10, (1024, 4), (1,
1024), 0), alpha=1, beta=1, out=buf13)
del primals_11
buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_13, reinterpret_tensor(buf12, (4, 1024
), (1024, 1), 0), reinterpret_tensor(primals_12, (1024, 4), (1,
1024), 0), alpha=1, beta=1, out=buf14)
del primals_13
return (buf13, buf14, buf0, buf1, buf2, buf3, buf4, buf6, buf8, buf10,
reinterpret_tensor(buf12, (4, 1024), (1024, 1), 0), primals_12,
primals_10, buf15)
class EncoderNew(nn.Module):
""" VAE encoder """
def __init__(self, img_channels, latent_size):
super(EncoderNew, self).__init__()
self.latent_size = latent_size
self.img_channels = img_channels
self.conv1 = nn.Conv2d(img_channels, 32, 4, stride=2)
self.conv2 = nn.Conv2d(32, 64, 4, stride=2)
self.conv3 = nn.Conv2d(64, 128, 4, stride=2)
self.conv4 = nn.Conv2d(128, 256, 4, stride=2)
self.fc_mu = nn.Linear(2 * 2 * 256, latent_size)
self.fc_logsigma = nn.Linear(2 * 2 * 256, latent_size)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.fc_mu.weight
primals_11 = self.fc_mu.bias
primals_12 = self.fc_logsigma.weight
primals_13 = self.fc_logsigma.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0], output[1]
|
susanwe/world-models
|
Encoder
| false
| 10,864
|
[
"MIT"
] | 0
|
0f246a430683e6ab741726df0a97f35830044356
|
https://github.com/susanwe/world-models/tree/0f246a430683e6ab741726df0a97f35830044356
|
LRN
|
import torch
import torch.nn as nn
class LRN(nn.Module):
def __init__(self, local_size=1, alpha=1.0, beta=0.75, ACROSS_CHANNELS=True
):
super(LRN, self).__init__()
self.ACROSS_CHANNELS = ACROSS_CHANNELS
if ACROSS_CHANNELS:
self.average = nn.AvgPool3d(kernel_size=(local_size, 1, 1),
stride=1, padding=(int((local_size - 1.0) / 2), 0, 0))
else:
self.average = nn.AvgPool2d(kernel_size=local_size, stride=1,
padding=int((local_size - 1.0) / 2))
self.alpha = alpha
self.beta = beta
def forward(self, x):
if self.ACROSS_CHANNELS:
div = x.pow(2).unsqueeze(1)
div = self.average(div).squeeze(1)
div = div.mul(self.alpha).add(1.0).pow(self.beta)
else:
div = x.pow(2)
div = self.average(div)
div = div.mul(self.alpha).add(1.0).pow(self.beta)
x = x.div(div)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mul_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0 * tmp0
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 + tmp2
tmp6 = 0.75
tmp7 = libdevice.pow(tmp5, tmp6)
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mul_pow_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class LRNNew(nn.Module):
def __init__(self, local_size=1, alpha=1.0, beta=0.75, ACROSS_CHANNELS=True
):
super(LRNNew, self).__init__()
self.ACROSS_CHANNELS = ACROSS_CHANNELS
if ACROSS_CHANNELS:
self.average = nn.AvgPool3d(kernel_size=(local_size, 1, 1),
stride=1, padding=(int((local_size - 1.0) / 2), 0, 0))
else:
self.average = nn.AvgPool2d(kernel_size=local_size, stride=1,
padding=int((local_size - 1.0) / 2))
self.alpha = alpha
self.beta = beta
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
txsing/dissect
|
LRN
| false
| 10,865
|
[
"MIT"
] | 0
|
3564605f7be9672c2cfc2ee19ca42225398a6e01
|
https://github.com/txsing/dissect/tree/3564605f7be9672c2cfc2ee19ca42225398a6e01
|
AdaptiveAvgMaxPool2d
|
import torch
import torch.nn as nn
class FastGlobalAvgPool2d(nn.Module):
def __init__(self, flatten=False):
super(FastGlobalAvgPool2d, self).__init__()
self.flatten = flatten
def forward(self, x):
if self.flatten:
in_size = x.size()
return x.view((in_size[0], in_size[1], -1)).mean(dim=2)
else:
return x.view(x.size(0), x.size(1), -1).mean(-1).view(x.size(0),
x.size(1), 1, 1)
class AdaptiveAvgMaxPool2d(nn.Module):
def __init__(self):
super(AdaptiveAvgMaxPool2d, self).__init__()
self.gap = FastGlobalAvgPool2d()
self.gmp = nn.AdaptiveMaxPool2d(1)
def forward(self, x):
avg_feat = self.gap(x)
max_feat = self.gmp(x)
feat = avg_feat + max_feat
return feat
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_adaptive_max_pool2d_add_mean_0(in_out_ptr0, in_ptr0,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp5 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp22 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp24 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp26 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp30 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp32 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp34 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp7 = triton_helpers.maximum(tmp6, tmp5)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp11 = triton_helpers.maximum(tmp10, tmp9)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp17 = triton_helpers.maximum(tmp16, tmp15)
tmp19 = triton_helpers.maximum(tmp18, tmp17)
tmp21 = triton_helpers.maximum(tmp20, tmp19)
tmp23 = triton_helpers.maximum(tmp22, tmp21)
tmp25 = triton_helpers.maximum(tmp24, tmp23)
tmp27 = triton_helpers.maximum(tmp26, tmp25)
tmp29 = triton_helpers.maximum(tmp28, tmp27)
tmp31 = triton_helpers.maximum(tmp30, tmp29)
tmp33 = triton_helpers.maximum(tmp32, tmp31)
tmp35 = triton_helpers.maximum(tmp34, tmp33)
tmp36 = 16.0
tmp37 = tmp4 / tmp36
tmp38 = tmp37 + tmp35
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp38, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf2 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_adaptive_max_pool2d_add_mean_0[grid(16)](buf2,
arg0_1, 16, 16, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf2,
class FastGlobalAvgPool2d(nn.Module):
def __init__(self, flatten=False):
super(FastGlobalAvgPool2d, self).__init__()
self.flatten = flatten
def forward(self, x):
if self.flatten:
in_size = x.size()
return x.view((in_size[0], in_size[1], -1)).mean(dim=2)
else:
return x.view(x.size(0), x.size(1), -1).mean(-1).view(x.size(0),
x.size(1), 1, 1)
class AdaptiveAvgMaxPool2dNew(nn.Module):
def __init__(self):
super(AdaptiveAvgMaxPool2dNew, self).__init__()
self.gap = FastGlobalAvgPool2d()
self.gmp = nn.AdaptiveMaxPool2d(1)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
tenghehan/reid_without_id
|
AdaptiveAvgMaxPool2d
| false
| 10,866
|
[
"MIT"
] | 0
|
d1d0ff273b1ef19fc6da8cbbf210527779b37455
|
https://github.com/tenghehan/reid_without_id/tree/d1d0ff273b1ef19fc6da8cbbf210527779b37455
|
Decoder
|
import torch
import torch.utils.data
import torch.nn as nn
import torch.nn.functional as F
class Decoder(nn.Module):
""" VAE decoder """
def __init__(self, img_channels, latent_size):
super(Decoder, self).__init__()
self.latent_size = latent_size
self.img_channels = img_channels
self.fc1 = nn.Linear(latent_size, 1024)
self.deconv1 = nn.ConvTranspose2d(1024, 128, 5, stride=2)
self.deconv2 = nn.ConvTranspose2d(128, 64, 5, stride=2)
self.deconv3 = nn.ConvTranspose2d(64, 32, 6, stride=2)
self.deconv4 = nn.ConvTranspose2d(32, img_channels, 6, stride=2)
def forward(self, x):
x = F.relu(self.fc1(x))
x = x.unsqueeze(-1).unsqueeze(-1)
x = F.relu(self.deconv1(x))
x = F.relu(self.deconv2(x))
x = F.relu(self.deconv3(x))
reconstruction = torch.sigmoid(self.deconv4(x))
return reconstruction
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'img_channels': 4, 'latent_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 128 * x2 + 3200 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 64 * x2 + 1600 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 36 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 32 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 128
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 36 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 4 * x2 + 144 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_4(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1024
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 12800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 43264
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 115200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_sigmoid_8(in_ptr0, in_ptr1, out_ptr0,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16384 * y1), ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(out_ptr0 + (x2 + 4096 * y3), tmp3, ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (1024, 4), (4, 1))
assert_size_stride(primals_2, (1024,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (1024, 128, 5, 5), (3200, 25, 5, 1))
assert_size_stride(primals_5, (128,), (1,))
assert_size_stride(primals_6, (128, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (64, 32, 6, 6), (1152, 36, 6, 1))
assert_size_stride(primals_9, (32,), (1,))
assert_size_stride(primals_10, (32, 4, 6, 6), (144, 36, 6, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1024, 128, 5, 5), (3200, 1, 640, 128),
torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(131072, 25)](primals_4, buf0, 131072, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_4
buf1 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64),
torch.float32)
triton_poi_fused_1[grid(8192, 25)](primals_6, buf1, 8192, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_6
buf2 = empty_strided_cuda((64, 32, 6, 6), (1152, 1, 192, 32), torch
.float32)
triton_poi_fused_2[grid(2048, 36)](primals_8, buf2, 2048, 36,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_8
buf3 = empty_strided_cuda((32, 4, 6, 6), (144, 1, 24, 4), torch.float32
)
triton_poi_fused_3[grid(128, 36)](primals_10, buf3, 128, 36, XBLOCK
=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_10
buf4 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32)
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 1024
), (1, 4), 0), out=buf4)
del primals_1
buf5 = buf4
del buf4
buf14 = empty_strided_cuda((4, 1024), (1024, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_4[grid(4096)](buf5,
primals_2, buf14, 4096, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf6 = extern_kernels.convolution(reinterpret_tensor(buf5, (4, 1024,
1, 1), (1024, 1, 0, 0), 0), buf0, stride=(2, 2), padding=(0, 0),
dilation=(1, 1), transposed=True, output_padding=(0, 0), groups
=1, bias=None)
assert_size_stride(buf6, (4, 128, 5, 5), (3200, 1, 640, 128))
buf7 = buf6
del buf6
triton_poi_fused_convolution_relu_5[grid(12800)](buf7, primals_5,
12800, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf8 = extern_kernels.convolution(buf7, buf1, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 64, 13, 13), (10816, 1, 832, 64))
buf9 = buf8
del buf8
triton_poi_fused_convolution_relu_6[grid(43264)](buf9, primals_7,
43264, XBLOCK=512, num_warps=4, num_stages=1)
del primals_7
buf10 = extern_kernels.convolution(buf9, buf2, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 32, 30, 30), (28800, 1, 960, 32))
buf11 = buf10
del buf10
triton_poi_fused_convolution_relu_7[grid(115200)](buf11, primals_9,
115200, XBLOCK=512, num_warps=8, num_stages=1)
del primals_9
buf12 = extern_kernels.convolution(buf11, buf3, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 4, 64, 64), (16384, 1, 256, 4))
buf13 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1),
torch.float32)
triton_poi_fused_convolution_sigmoid_8[grid(16, 4096)](buf12,
primals_11, buf13, 16, 4096, XBLOCK=64, YBLOCK=16, num_warps=4,
num_stages=1)
del buf12
del primals_11
return buf13, primals_3, buf0, buf1, buf2, buf3, reinterpret_tensor(buf5,
(4, 1024, 1, 1), (1024, 1, 1, 1), 0), buf7, buf9, buf11, buf13, buf14
class DecoderNew(nn.Module):
""" VAE decoder """
def __init__(self, img_channels, latent_size):
super(DecoderNew, self).__init__()
self.latent_size = latent_size
self.img_channels = img_channels
self.fc1 = nn.Linear(latent_size, 1024)
self.deconv1 = nn.ConvTranspose2d(1024, 128, 5, stride=2)
self.deconv2 = nn.ConvTranspose2d(128, 64, 5, stride=2)
self.deconv3 = nn.ConvTranspose2d(64, 32, 6, stride=2)
self.deconv4 = nn.ConvTranspose2d(32, img_channels, 6, stride=2)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.deconv1.weight
primals_5 = self.deconv1.bias
primals_6 = self.deconv2.weight
primals_7 = self.deconv2.bias
primals_8 = self.deconv3.weight
primals_9 = self.deconv3.bias
primals_10 = self.deconv4.weight
primals_11 = self.deconv4.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
susanwe/world-models
|
Decoder
| false
| 10,867
|
[
"MIT"
] | 0
|
0f246a430683e6ab741726df0a97f35830044356
|
https://github.com/susanwe/world-models/tree/0f246a430683e6ab741726df0a97f35830044356
|
GeneralizedMeanPooling
|
import torch
import torch.nn as nn
class GeneralizedMeanPooling(nn.Module):
"""Applies a 2D power-average adaptive pooling over an input signal composed of several input planes.
The function computed is: :math:`f(X) = pow(sum(pow(X, p)), 1/p)`
- At p = infinity, one gets Max Pooling
- At p = 1, one gets Average Pooling
The output is of size H x W, for any input size.
The number of output features is equal to the number of input planes.
Args:
output_size: the target output size of the image of the form H x W.
Can be a tuple (H, W) or a single H for a square image H x H
H and W can be either a ``int``, or ``None`` which means the size will
be the same as that of the input.
"""
def __init__(self, norm=3, output_size=1, eps=1e-06):
super(GeneralizedMeanPooling, self).__init__()
assert norm > 0
self.p = float(norm)
self.output_size = output_size
self.eps = eps
def forward(self, x):
x = x.clamp(min=self.eps).pow(self.p)
return torch.nn.functional.adaptive_avg_pool2d(x, self.output_size
).pow(1.0 / self.p)
def __repr__(self):
return self.__class__.__name__ + '(' + str(self.p
) + ', ' + 'output_size=' + str(self.output_size) + ')'
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_clamp_mean_pow_0(in_out_ptr0, in_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = 1e-06
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = tmp2 * tmp2
tmp4 = tmp3 * tmp2
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp5, 0)
tmp8 = tl.sum(tmp7, 1)[:, None]
tmp9 = 16.0
tmp10 = tmp8 / tmp9
tmp11 = 0.3333333333333333
tmp12 = libdevice.pow(tmp10, tmp11)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp12, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_clamp_mean_pow_0[grid(16)](buf1, arg0_1, 16, 16,
XBLOCK=8, num_warps=2, num_stages=1)
del arg0_1
return buf1,
class GeneralizedMeanPoolingNew(nn.Module):
"""Applies a 2D power-average adaptive pooling over an input signal composed of several input planes.
The function computed is: :math:`f(X) = pow(sum(pow(X, p)), 1/p)`
- At p = infinity, one gets Max Pooling
- At p = 1, one gets Average Pooling
The output is of size H x W, for any input size.
The number of output features is equal to the number of input planes.
Args:
output_size: the target output size of the image of the form H x W.
Can be a tuple (H, W) or a single H for a square image H x H
H and W can be either a ``int``, or ``None`` which means the size will
be the same as that of the input.
"""
def __init__(self, norm=3, output_size=1, eps=1e-06):
super(GeneralizedMeanPoolingNew, self).__init__()
assert norm > 0
self.p = float(norm)
self.output_size = output_size
self.eps = eps
def __repr__(self):
return self.__class__.__name__ + '(' + str(self.p
) + ', ' + 'output_size=' + str(self.output_size) + ')'
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
tenghehan/reid_without_id
|
GeneralizedMeanPooling
| false
| 10,868
|
[
"MIT"
] | 0
|
d1d0ff273b1ef19fc6da8cbbf210527779b37455
|
https://github.com/tenghehan/reid_without_id/tree/d1d0ff273b1ef19fc6da8cbbf210527779b37455
|
LearnedPositionalEmbedding
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class LearnedPositionalEmbedding(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
Padding ids are ignored by either offsetting based on padding_idx
or by setting padding_idx to None and ensuring that the appropriate
position ids are passed to the forward function.
"""
def __init__(self, num_embeddings: 'int', embedding_dim: 'int',
padding_idx: 'int'):
if padding_idx is not None:
num_embeddings_ = num_embeddings + padding_idx + 1
else:
num_embeddings_ = num_embeddings
super().__init__(num_embeddings_, embedding_dim, padding_idx)
self.max_positions = num_embeddings
def forward(self, input: 'torch.Tensor'):
"""Input is expected to be of size [bsz x seqlen]."""
if input.size(1) > self.max_positions:
raise ValueError(
f'Sequence length {input.size(1)} above maximum sequence length of {self.max_positions}'
)
mask = input.ne(self.padding_idx).int()
positions = (torch.cumsum(mask, dim=1).type_as(mask) * mask).long(
) + self.padding_idx
return F.embedding(positions, self.weight, self.padding_idx, self.
max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_embeddings': 4, 'embedding_dim': 4, 'padding_idx': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def _triton_helper_fn_add0(arg0_0, arg1_0):
tmp0 = arg0_0 + arg1_0
return tmp0
@triton.jit
def triton_per_fused__to_copy_cumsum_ne_0(in_ptr0, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 16
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 64 * x1), xmask, other=0.0)
tmp1 = 4.0
tmp2 = tmp0 != tmp1
tmp3 = tmp2.to(tl.int32)
tmp4 = tmp3.to(tl.int64)
tmp5 = tmp4.to(tl.int64)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp7, = tl.associative_scan((tmp6,), 1, _triton_helper_fn_add0)
tl.store(out_ptr0 + (x0 + 16 * r2 + 64 * x1), tmp7, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_mul_ne_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0.to(tl.int32)
tmp3 = 4.0
tmp4 = tmp2 != tmp3
tmp5 = tmp4.to(tl.int32)
tmp6 = tmp1 * tmp5
tmp7 = tmp6.to(tl.int64)
tmp8 = tl.full([1], 4, tl.int64)
tmp9 = tmp7 + tmp8
tl.store(in_out_ptr0 + x0, tmp9, xmask)
@triton.jit
def triton_poi_fused_embedding_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 9, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 9) | ~xmask,
'index out of bounds: 0 <= tmp4 < 9')
tmp6 = tl.load(in_ptr1 + (x0 + 4 * tmp4), xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (9, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int64)
get_raw_stream(0)
triton_per_fused__to_copy_cumsum_ne_0[grid(64)](primals_1, buf0, 64,
4, XBLOCK=32, num_warps=2, num_stages=1)
buf1 = buf0
del buf0
triton_poi_fused__to_copy_add_mul_ne_1[grid(256)](buf1, primals_1,
256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_poi_fused_embedding_2[grid(1024)](buf1, primals_2, buf2,
1024, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return buf2, buf1
class LearnedPositionalEmbeddingNew(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
Padding ids are ignored by either offsetting based on padding_idx
or by setting padding_idx to None and ensuring that the appropriate
position ids are passed to the forward function.
"""
def __init__(self, num_embeddings: 'int', embedding_dim: 'int',
padding_idx: 'int'):
if padding_idx is not None:
num_embeddings_ = num_embeddings + padding_idx + 1
else:
num_embeddings_ = num_embeddings
super().__init__(num_embeddings_, embedding_dim, padding_idx)
self.max_positions = num_embeddings
def forward(self, input_0):
primals_2 = self.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
sohrabi1/esm
|
LearnedPositionalEmbedding
| false
| 10,869
|
[
"MIT"
] | 0
|
e1f60a66b5c351d9d0011926549890b6744903c1
|
https://github.com/sohrabi1/esm/tree/e1f60a66b5c351d9d0011926549890b6744903c1
|
LogSTFTMagnitudeLoss
|
import torch
import torch.nn.functional as F
import torch.utils.data
class LogSTFTMagnitudeLoss(torch.nn.Module):
"""Log STFT magnitude loss module."""
def __init__(self):
"""Initilize los STFT magnitude loss module."""
super(LogSTFTMagnitudeLoss, self).__init__()
def forward(self, x_mag, y_mag):
"""Calculate forward propagation.
Args:
x_mag (Tensor): Magnitude spectrogram of predicted signal (B, #frames, #freq_bins).
y_mag (Tensor): Magnitude spectrogram of groundtruth signal (B, #frames, #freq_bins).
Returns:
Tensor: Log STFT magnitude loss value.
"""
return F.l1_loss(torch.log(y_mag), torch.log(x_mag))
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_log_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp2 = tl.load(in_ptr1 + r0, None)
tmp1 = tl_math.log(tmp0)
tmp3 = tl_math.log(tmp2)
tmp4 = tmp1 - tmp3
tmp5 = tl_math.abs(tmp4)
tmp6 = tl.broadcast_to(tmp5, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = 256.0
tmp10 = tmp8 / tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_abs_log_mean_sub_0[grid(1)](buf1, arg0_1, arg1_1,
1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class LogSTFTMagnitudeLossNew(torch.nn.Module):
"""Log STFT magnitude loss module."""
def __init__(self):
"""Initilize los STFT magnitude loss module."""
super(LogSTFTMagnitudeLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
tebin/Fre-GAN-pytorch
|
LogSTFTMagnitudeLoss
| false
| 10,870
|
[
"MIT"
] | 0
|
e2f51317ae3953f10b8a0d112fc14991a02ebe91
|
https://github.com/tebin/Fre-GAN-pytorch/tree/e2f51317ae3953f10b8a0d112fc14991a02ebe91
|
SpectralConvergengeLoss
|
import torch
import torch.utils.data
class SpectralConvergengeLoss(torch.nn.Module):
"""Spectral convergence loss module."""
def __init__(self):
"""Initilize spectral convergence loss module."""
super(SpectralConvergengeLoss, self).__init__()
def forward(self, x_mag, y_mag):
"""Calculate forward propagation.
Args:
x_mag (Tensor): Magnitude spectrogram of predicted signal (B, #frames, #freq_bins).
y_mag (Tensor): Magnitude spectrogram of groundtruth signal (B, #frames, #freq_bins).
Returns:
Tensor: Spectral convergence loss value.
"""
return torch.norm(y_mag - x_mag, p='fro') / torch.norm(y_mag, p='fro')
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_div_linalg_vector_norm_sub_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = tmp0 * tmp0
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = libdevice.sqrt(tmp6)
tmp12 = libdevice.sqrt(tmp10)
tmp13 = tmp11 / tmp12
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp13, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_div_linalg_vector_norm_sub_0[grid(1)](buf2, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf2,
class SpectralConvergengeLossNew(torch.nn.Module):
"""Spectral convergence loss module."""
def __init__(self):
"""Initilize spectral convergence loss module."""
super(SpectralConvergengeLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
tebin/Fre-GAN-pytorch
|
SpectralConvergengeLoss
| false
| 10,871
|
[
"MIT"
] | 0
|
e2f51317ae3953f10b8a0d112fc14991a02ebe91
|
https://github.com/tebin/Fre-GAN-pytorch/tree/e2f51317ae3953f10b8a0d112fc14991a02ebe91
|
ClipGlobalAvgPool2d
|
import torch
import torch.nn as nn
class FastGlobalAvgPool2d(nn.Module):
def __init__(self, flatten=False):
super(FastGlobalAvgPool2d, self).__init__()
self.flatten = flatten
def forward(self, x):
if self.flatten:
in_size = x.size()
return x.view((in_size[0], in_size[1], -1)).mean(dim=2)
else:
return x.view(x.size(0), x.size(1), -1).mean(-1).view(x.size(0),
x.size(1), 1, 1)
class ClipGlobalAvgPool2d(nn.Module):
def __init__(self):
super().__init__()
self.avgpool = FastGlobalAvgPool2d()
def forward(self, x):
x = self.avgpool(x)
x = torch.clamp(x, min=0.0, max=1.0)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_clamp_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = 1.0
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp10, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_clamp_mean_0[grid(16)](buf1, arg0_1, 16, 16,
XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf1,
class FastGlobalAvgPool2d(nn.Module):
def __init__(self, flatten=False):
super(FastGlobalAvgPool2d, self).__init__()
self.flatten = flatten
def forward(self, x):
if self.flatten:
in_size = x.size()
return x.view((in_size[0], in_size[1], -1)).mean(dim=2)
else:
return x.view(x.size(0), x.size(1), -1).mean(-1).view(x.size(0),
x.size(1), 1, 1)
class ClipGlobalAvgPool2dNew(nn.Module):
def __init__(self):
super().__init__()
self.avgpool = FastGlobalAvgPool2d()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
tenghehan/reid_without_id
|
ClipGlobalAvgPool2d
| false
| 10,872
|
[
"MIT"
] | 0
|
d1d0ff273b1ef19fc6da8cbbf210527779b37455
|
https://github.com/tenghehan/reid_without_id/tree/d1d0ff273b1ef19fc6da8cbbf210527779b37455
|
AttentionLayer
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class AttentionLayer(nn.Module):
def __init__(self, hidden_size):
super(AttentionLayer, self).__init__()
self.hidden_size = hidden_size
def dot_product_attention(self, hidden, encoder_output):
return torch.sum(hidden * encoder_output, dim=2)
def forward(self, hidden, encoded_output):
energies = self.dot_product_attention(hidden, encoded_output)
energies = energies.t()
return F.softmax(energies, dim=1).unsqueeze(1)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x2, tmp14, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tmp1 = tl.load(in_ptr0 + y0, ymask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + y0), ymask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + y0), ymask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + y0), ymask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x1 + 4 * y0), tmp8, xmask & ymask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(16)](arg0_1, arg1_1, buf0, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
triton_poi_fused__softmax_1[grid(16)](buf0, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf2 = reinterpret_tensor(buf0, (4, 4), (4, 1), 0)
del buf0
triton_poi_fused__softmax_2[grid(4, 4)](buf1, buf2, 4, 4, XBLOCK=4,
YBLOCK=4, num_warps=1, num_stages=1)
del buf1
return reinterpret_tensor(buf2, (4, 1, 4), (4, 4, 1), 0),
class AttentionLayerNew(nn.Module):
def __init__(self, hidden_size):
super(AttentionLayerNew, self).__init__()
self.hidden_size = hidden_size
def dot_product_attention(self, hidden, encoder_output):
return torch.sum(hidden * encoder_output, dim=2)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
u7javed/AI-Chatbot
|
AttentionLayer
| false
| 10,873
|
[
"MIT"
] | 0
|
d86916537e7b0b9a45f11d0fe0367fe9f66721e7
|
https://github.com/u7javed/AI-Chatbot/tree/d86916537e7b0b9a45f11d0fe0367fe9f66721e7
|
NormScaleFeature
|
import torch
from torch import nn
class NormScaleFeature(nn.Module):
def __init__(self, init_value=1):
super().__init__()
self.scale = nn.Parameter(torch.FloatTensor([init_value]))
def forward(self, input):
magnitudes = 1e-06 + torch.sqrt(torch.sum(input ** 2, axis=1,
keepdims=True))
output = self.scale * input / magnitudes
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mul_pow_sqrt_sum_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + x3, xmask)
tmp4 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 * tmp2
tmp5 = tmp4 * tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp13 = tmp12 * tmp12
tmp14 = tmp11 + tmp13
tmp15 = libdevice.sqrt(tmp14)
tmp16 = 1e-06
tmp17 = tmp15 + tmp16
tmp18 = tmp3 / tmp17
tl.store(out_ptr0 + x3, tmp18, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mul_pow_sqrt_sum_0[grid(256)](primals_2,
primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
return buf0, primals_1
class NormScaleFeatureNew(nn.Module):
def __init__(self, init_value=1):
super().__init__()
self.scale = nn.Parameter(torch.FloatTensor([init_value]))
def forward(self, input_0):
primals_2 = self.scale
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
uncbiag/FeatureMapICON
|
NormScaleFeature
| false
| 10,874
|
[
"Apache-2.0"
] | 0
|
04160d0ce4e8f7615e1c59a1be5c6b8340b5b6e5
|
https://github.com/uncbiag/FeatureMapICON/tree/04160d0ce4e8f7615e1c59a1be5c6b8340b5b6e5
|
Scaled_Dot_Product_Attention
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Scaled_Dot_Product_Attention(nn.Module):
"""Scaled Dot-Product Attention """
def __init__(self):
super(Scaled_Dot_Product_Attention, self).__init__()
def forward(self, Q, K, V, scale=None):
"""
Args:
Q: [batch_size, len_Q, dim_Q]
K: [batch_size, len_K, dim_K]
V: [batch_size, len_V, dim_V]
scale: 缩放因子 论文为根号dim_K
Return:
self-attention后的张量,以及attention张量
"""
attention = torch.matmul(Q, K.permute(0, 2, 1))
if scale:
attention = attention * scale
attention = F.softmax(attention, dim=-1)
context = torch.matmul(attention, V)
return context
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), (
16, 1, 4), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf0, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf2 = buf0
del buf0
triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = buf1
del buf1
extern_kernels.bmm(buf2, arg2_1, out=buf3)
del arg2_1
del buf2
return buf3,
class Scaled_Dot_Product_AttentionNew(nn.Module):
"""Scaled Dot-Product Attention """
def __init__(self):
super(Scaled_Dot_Product_AttentionNew, self).__init__()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
tianjiansmile/Chinese-Text-Classification-Pytorch
|
Scaled_Dot_Product_Attention
| false
| 10,875
|
[
"MIT"
] | 0
|
05cc211b161f61e6bb32ab185dadcffec2f5b5de
|
https://github.com/tianjiansmile/Chinese-Text-Classification-Pytorch/tree/05cc211b161f61e6bb32ab185dadcffec2f5b5de
|
UNETMax
|
import torch
from torch import nn
class UNETMax(nn.Module):
"""UNET Without concatenation during decoding"""
def __init__(self):
super(UNETMax, self).__init__()
self.conv1_1 = nn.Conv2d(in_channels=3, out_channels=16,
kernel_size=3, stride=1, padding=1)
self.conv1_2 = nn.Conv2d(in_channels=16, out_channels=16,
kernel_size=3, stride=1, padding=1)
self.relu1 = nn.ReLU()
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv2_1 = nn.Conv2d(in_channels=16, out_channels=32,
kernel_size=3, stride=1, padding=1)
self.conv2_2 = nn.Conv2d(in_channels=32, out_channels=32,
kernel_size=3, stride=1, padding=1)
self.relu2 = nn.ReLU()
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv3_1 = nn.Conv2d(in_channels=32, out_channels=64,
kernel_size=3, stride=1, padding=1)
self.conv3_2 = nn.Conv2d(in_channels=64, out_channels=64,
kernel_size=3, stride=1, padding=1)
self.relu3 = nn.ReLU()
self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv4_1 = nn.Conv2d(in_channels=64, out_channels=128,
kernel_size=3, stride=1, padding=1)
self.conv4_2 = nn.Conv2d(in_channels=128, out_channels=128,
kernel_size=3, stride=1, padding=1)
self.relu4 = nn.ReLU()
self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv5_1 = nn.Conv2d(in_channels=128, out_channels=256,
kernel_size=3, stride=1, padding=1)
self.conv5_2 = nn.Conv2d(in_channels=256, out_channels=256,
kernel_size=3, stride=1, padding=1)
self.relu5 = nn.ReLU()
self.upsample1 = nn.ConvTranspose2d(in_channels=256, out_channels=
128, kernel_size=2, stride=2)
self.conv6_1 = nn.Conv2d(in_channels=128, out_channels=128,
kernel_size=3, stride=1, padding=1)
self.conv6_2 = nn.Conv2d(in_channels=128, out_channels=128,
kernel_size=3, stride=1, padding=1)
self.relu6 = nn.ReLU()
self.upsample2 = nn.ConvTranspose2d(in_channels=128, out_channels=
64, kernel_size=2, stride=2)
self.conv7_1 = nn.Conv2d(in_channels=64, out_channels=64,
kernel_size=3, stride=1, padding=1)
self.conv7_2 = nn.Conv2d(in_channels=64, out_channels=64,
kernel_size=3, stride=1, padding=1)
self.relu7 = nn.ReLU()
self.upsample3 = nn.ConvTranspose2d(in_channels=64, out_channels=32,
kernel_size=2, stride=2)
self.conv8_1 = nn.Conv2d(in_channels=32, out_channels=32,
kernel_size=3, stride=1, padding=1)
self.conv8_2 = nn.Conv2d(in_channels=32, out_channels=32,
kernel_size=3, stride=1, padding=1)
self.relu8 = nn.ReLU()
self.upsample4 = nn.ConvTranspose2d(in_channels=32, out_channels=16,
kernel_size=2, stride=2)
self.conv9_1 = nn.Conv2d(in_channels=16, out_channels=16,
kernel_size=3, stride=1, padding=1)
self.conv9_2 = nn.Conv2d(in_channels=16, out_channels=16,
kernel_size=3, stride=1, padding=1)
self.relu9 = nn.ReLU()
self.conv10 = nn.Conv2d(in_channels=16, out_channels=2, kernel_size=1)
self.relu10 = nn.ReLU()
def forward(self, x):
c1 = self.relu1(self.conv1_2(self.conv1_1(x)))
p1 = self.pool1(c1)
c2 = self.relu2(self.conv2_2(self.conv2_1(p1)))
p2 = self.pool2(c2)
c3 = self.relu3(self.conv3_2(self.conv3_1(p2)))
p3 = self.pool3(c3)
c4 = self.relu4(self.conv4_2(self.conv4_1(p3)))
p4 = self.pool4(c4)
c5 = self.relu5(self.conv5_2(self.conv5_1(p4)))
u6 = self.upsample1(c5)
u6 = torch.max(u6, c4)
c6 = self.relu6(self.conv6_2(self.conv6_1(u6)))
u7 = self.upsample2(c6)
u7 = torch.max(u7, c3)
c7 = self.relu7(self.conv7_2(self.conv7_1(u7)))
u8 = self.upsample3(c7)
u8 = torch.max(u8, c2)
c8 = self.relu8(self.conv8_2(self.conv8_1(u8)))
u9 = self.upsample4(c8)
u9 = torch.max(u9, c1)
c9 = self.relu9(self.conv9_2(self.conv9_1(u9)))
c10 = self.relu10(self.conv10(c9))
return c10
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 48
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 256
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 16
y1 = yindex // 16
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 16 * x2 + 144 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 512
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 16
y1 = yindex // 16
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 16 * x2 + 144 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 32 * x2 + 288 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 32 * x2 + 288 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_9(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_10(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_11(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 512 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_12(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 256 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_13(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 32 * x2 + 128 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_14(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 512
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 16
y1 = yindex // 16
tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 16 * x2 + 64 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_15(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_relu_16(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_17(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = xindex // 16 % 32
x2 = xindex // 512
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 32 * x1 + 2048 * x2), None)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 32 * x1 + 2048 * x2), None)
tmp3 = tl.load(in_ptr0 + (1024 + x0 + 32 * x1 + 2048 * x2), None)
tmp5 = tl.load(in_ptr0 + (1040 + x0 + 32 * x1 + 2048 * x2), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, None)
tl.store(out_ptr1 + x3, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_18(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_relu_19(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_20(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32 % 16
x2 = xindex // 512
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1 + 2048 * x2), None)
tmp1 = tl.load(in_ptr0 + (32 + x0 + 64 * x1 + 2048 * x2), None)
tmp3 = tl.load(in_ptr0 + (1024 + x0 + 64 * x1 + 2048 * x2), None)
tmp5 = tl.load(in_ptr0 + (1056 + x0 + 64 * x1 + 2048 * x2), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, None)
tl.store(out_ptr1 + x3, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_21(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_relu_22(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_23(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 64
x1 = xindex // 64 % 8
x2 = xindex // 512
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 2048 * x2), None)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 2048 * x2), None)
tmp3 = tl.load(in_ptr0 + (1024 + x0 + 128 * x1 + 2048 * x2), None)
tmp5 = tl.load(in_ptr0 + (1088 + x0 + 128 * x1 + 2048 * x2), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, None)
tl.store(out_ptr1 + x3, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_24(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_relu_25(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_26(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 128
x1 = xindex // 128 % 4
x2 = xindex // 512
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1 + 2048 * x2), None)
tmp1 = tl.load(in_ptr0 + (128 + x0 + 256 * x1 + 2048 * x2), None)
tmp3 = tl.load(in_ptr0 + (1024 + x0 + 256 * x1 + 2048 * x2), None)
tmp5 = tl.load(in_ptr0 + (1152 + x0 + 256 * x1 + 2048 * x2), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, None)
tl.store(out_ptr1 + x3, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_27(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_relu_28(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_maximum_29(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, None)
tmp2 = tmp0 + tmp1
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tl.store(in_out_ptr0 + x2, tmp2, None)
tl.store(out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_maximum_30(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, None)
tmp2 = tmp0 + tmp1
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tl.store(in_out_ptr0 + x2, tmp2, None)
tl.store(out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_maximum_31(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, None)
tmp2 = tmp0 + tmp1
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tl.store(in_out_ptr0 + x2, tmp2, None)
tl.store(out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_maximum_32(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, None)
tmp2 = tmp0 + tmp1
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tl.store(in_out_ptr0 + x2, tmp2, None)
tl.store(out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_33(in_ptr0,
in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr,
XBLOCK: tl.constexpr):
ynumel = 8
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y0 = yindex % 2
y1 = yindex // 2
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 2 * x2 + 8192 * y1), ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2 + 4096 * y3), tmp4, ymask)
tl.store(out_ptr1 + (y0 + 2 * x2 + 8192 * y1), tmp6, ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32,
primals_33, primals_34, primals_35, primals_36, primals_37,
primals_38, primals_39, primals_40, primals_41, primals_42,
primals_43, primals_44, primals_45, primals_46, primals_47) = args
args.clear()
assert_size_stride(primals_1, (16, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (32, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_7, (32,), (1,))
assert_size_stride(primals_8, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_9, (32,), (1,))
assert_size_stride(primals_10, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_11, (64,), (1,))
assert_size_stride(primals_12, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_13, (64,), (1,))
assert_size_stride(primals_14, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_15, (128,), (1,))
assert_size_stride(primals_16, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_17, (128,), (1,))
assert_size_stride(primals_18, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_19, (256,), (1,))
assert_size_stride(primals_20, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_21, (256,), (1,))
assert_size_stride(primals_22, (256, 128, 2, 2), (512, 4, 2, 1))
assert_size_stride(primals_23, (128,), (1,))
assert_size_stride(primals_24, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_25, (128,), (1,))
assert_size_stride(primals_26, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_27, (128,), (1,))
assert_size_stride(primals_28, (128, 64, 2, 2), (256, 4, 2, 1))
assert_size_stride(primals_29, (64,), (1,))
assert_size_stride(primals_30, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_31, (64,), (1,))
assert_size_stride(primals_32, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_33, (64,), (1,))
assert_size_stride(primals_34, (64, 32, 2, 2), (128, 4, 2, 1))
assert_size_stride(primals_35, (32,), (1,))
assert_size_stride(primals_36, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_37, (32,), (1,))
assert_size_stride(primals_38, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_39, (32,), (1,))
assert_size_stride(primals_40, (32, 16, 2, 2), (64, 4, 2, 1))
assert_size_stride(primals_41, (16,), (1,))
assert_size_stride(primals_42, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_43, (16,), (1,))
assert_size_stride(primals_44, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_45, (16,), (1,))
assert_size_stride(primals_46, (2, 16, 1, 1), (16, 1, 1, 1))
assert_size_stride(primals_47, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 3, 3, 3), (27, 1, 9, 3), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(48, 9)](primals_1, buf0, 48, 9, XBLOCK=16,
YBLOCK=64, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch
.float32)
triton_poi_fused_1[grid(12, 4096)](primals_3, buf1, 12, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((16, 16, 3, 3), (144, 1, 48, 16), torch.
float32)
triton_poi_fused_2[grid(256, 9)](primals_4, buf2, 256, 9, XBLOCK=16,
YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((32, 16, 3, 3), (144, 1, 48, 16), torch.
float32)
triton_poi_fused_3[grid(512, 9)](primals_6, buf3, 512, 9, XBLOCK=16,
YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf4 = empty_strided_cuda((32, 32, 3, 3), (288, 1, 96, 32), torch.
float32)
triton_poi_fused_4[grid(1024, 9)](primals_8, buf4, 1024, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_8
buf5 = empty_strided_cuda((64, 32, 3, 3), (288, 1, 96, 32), torch.
float32)
triton_poi_fused_5[grid(2048, 9)](primals_10, buf5, 2048, 9, XBLOCK
=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_10
buf6 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch.
float32)
triton_poi_fused_6[grid(4096, 9)](primals_12, buf6, 4096, 9, XBLOCK
=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_12
buf7 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch
.float32)
triton_poi_fused_7[grid(8192, 9)](primals_14, buf7, 8192, 9, XBLOCK
=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_14
buf8 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_8[grid(16384, 9)](primals_16, buf8, 16384, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_16
buf9 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_9[grid(32768, 9)](primals_18, buf9, 32768, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_18
buf10 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_10[grid(65536, 9)](primals_20, buf10, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_20
buf11 = empty_strided_cuda((256, 128, 2, 2), (512, 1, 256, 128),
torch.float32)
triton_poi_fused_11[grid(32768, 4)](primals_22, buf11, 32768, 4,
XBLOCK=4, YBLOCK=256, num_warps=4, num_stages=1)
del primals_22
buf12 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_8[grid(16384, 9)](primals_24, buf12, 16384, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_24
buf13 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_8[grid(16384, 9)](primals_26, buf13, 16384, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_26
buf14 = empty_strided_cuda((128, 64, 2, 2), (256, 1, 128, 64),
torch.float32)
triton_poi_fused_12[grid(8192, 4)](primals_28, buf14, 8192, 4,
XBLOCK=4, YBLOCK=256, num_warps=4, num_stages=1)
del primals_28
buf15 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch
.float32)
triton_poi_fused_6[grid(4096, 9)](primals_30, buf15, 4096, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_30
buf16 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch
.float32)
triton_poi_fused_6[grid(4096, 9)](primals_32, buf16, 4096, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_32
buf17 = empty_strided_cuda((64, 32, 2, 2), (128, 1, 64, 32), torch.
float32)
triton_poi_fused_13[grid(2048, 4)](primals_34, buf17, 2048, 4,
XBLOCK=4, YBLOCK=256, num_warps=4, num_stages=1)
del primals_34
buf18 = empty_strided_cuda((32, 32, 3, 3), (288, 1, 96, 32), torch.
float32)
triton_poi_fused_4[grid(1024, 9)](primals_36, buf18, 1024, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_36
buf19 = empty_strided_cuda((32, 32, 3, 3), (288, 1, 96, 32), torch.
float32)
triton_poi_fused_4[grid(1024, 9)](primals_38, buf19, 1024, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_38
buf20 = empty_strided_cuda((32, 16, 2, 2), (64, 1, 32, 16), torch.
float32)
triton_poi_fused_14[grid(512, 4)](primals_40, buf20, 512, 4, XBLOCK
=4, YBLOCK=256, num_warps=4, num_stages=1)
del primals_40
buf21 = empty_strided_cuda((16, 16, 3, 3), (144, 1, 48, 16), torch.
float32)
triton_poi_fused_2[grid(256, 9)](primals_42, buf21, 256, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_42
buf22 = empty_strided_cuda((16, 16, 3, 3), (144, 1, 48, 16), torch.
float32)
triton_poi_fused_2[grid(256, 9)](primals_44, buf22, 256, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_44
buf23 = extern_kernels.convolution(buf1, buf0, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf23, (4, 16, 64, 64), (65536, 1, 1024, 16))
buf24 = buf23
del buf23
triton_poi_fused_convolution_15[grid(262144)](buf24, primals_2,
262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf25 = extern_kernels.convolution(buf24, buf2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf25, (4, 16, 64, 64), (65536, 1, 1024, 16))
buf26 = buf25
del buf25
triton_poi_fused_convolution_relu_16[grid(262144)](buf26, primals_5,
262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf27 = empty_strided_cuda((4, 16, 32, 32), (16384, 1, 512, 16),
torch.float32)
buf28 = empty_strided_cuda((4, 16, 32, 32), (16384, 1, 512, 16),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_17[grid(65536)](buf26,
buf27, buf28, 65536, XBLOCK=512, num_warps=4, num_stages=1)
buf29 = extern_kernels.convolution(buf27, buf3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf29, (4, 32, 32, 32), (32768, 1, 1024, 32))
buf30 = buf29
del buf29
triton_poi_fused_convolution_18[grid(131072)](buf30, primals_7,
131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_7
buf31 = extern_kernels.convolution(buf30, buf4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf31, (4, 32, 32, 32), (32768, 1, 1024, 32))
buf32 = buf31
del buf31
triton_poi_fused_convolution_relu_19[grid(131072)](buf32, primals_9,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_9
buf33 = empty_strided_cuda((4, 32, 16, 16), (8192, 1, 512, 32),
torch.float32)
buf34 = empty_strided_cuda((4, 32, 16, 16), (8192, 1, 512, 32),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_20[grid(32768)](buf32,
buf33, buf34, 32768, XBLOCK=128, num_warps=4, num_stages=1)
buf35 = extern_kernels.convolution(buf33, buf5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf35, (4, 64, 16, 16), (16384, 1, 1024, 64))
buf36 = buf35
del buf35
triton_poi_fused_convolution_21[grid(65536)](buf36, primals_11,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_11
buf37 = extern_kernels.convolution(buf36, buf6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf37, (4, 64, 16, 16), (16384, 1, 1024, 64))
buf38 = buf37
del buf37
triton_poi_fused_convolution_relu_22[grid(65536)](buf38, primals_13,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_13
buf39 = empty_strided_cuda((4, 64, 8, 8), (4096, 1, 512, 64), torch
.float32)
buf40 = empty_strided_cuda((4, 64, 8, 8), (4096, 1, 512, 64), torch
.int8)
triton_poi_fused_max_pool2d_with_indices_23[grid(16384)](buf38,
buf39, buf40, 16384, XBLOCK=256, num_warps=4, num_stages=1)
buf41 = extern_kernels.convolution(buf39, buf7, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf41, (4, 128, 8, 8), (8192, 1, 1024, 128))
buf42 = buf41
del buf41
triton_poi_fused_convolution_24[grid(32768)](buf42, primals_15,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_15
buf43 = extern_kernels.convolution(buf42, buf8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf43, (4, 128, 8, 8), (8192, 1, 1024, 128))
buf44 = buf43
del buf43
triton_poi_fused_convolution_relu_25[grid(32768)](buf44, primals_17,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_17
buf45 = empty_strided_cuda((4, 128, 4, 4), (2048, 1, 512, 128),
torch.float32)
buf46 = empty_strided_cuda((4, 128, 4, 4), (2048, 1, 512, 128),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_26[grid(8192)](buf44,
buf45, buf46, 8192, XBLOCK=256, num_warps=4, num_stages=1)
buf47 = extern_kernels.convolution(buf45, buf9, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf47, (4, 256, 4, 4), (4096, 1, 1024, 256))
buf48 = buf47
del buf47
triton_poi_fused_convolution_27[grid(16384)](buf48, primals_19,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_19
buf49 = extern_kernels.convolution(buf48, buf10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf49, (4, 256, 4, 4), (4096, 1, 1024, 256))
buf50 = buf49
del buf49
triton_poi_fused_convolution_relu_28[grid(16384)](buf50, primals_21,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_21
buf51 = extern_kernels.convolution(buf50, buf11, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf51, (4, 128, 8, 8), (8192, 1, 1024, 128))
buf52 = buf51
del buf51
buf53 = empty_strided_cuda((4, 128, 8, 8), (8192, 1, 1024, 128),
torch.float32)
triton_poi_fused_convolution_maximum_29[grid(32768)](buf52,
primals_23, buf44, buf53, 32768, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_23
buf54 = extern_kernels.convolution(buf53, buf12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf54, (4, 128, 8, 8), (8192, 1, 1024, 128))
buf55 = buf54
del buf54
triton_poi_fused_convolution_24[grid(32768)](buf55, primals_25,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_25
buf56 = extern_kernels.convolution(buf55, buf13, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf56, (4, 128, 8, 8), (8192, 1, 1024, 128))
buf57 = buf56
del buf56
triton_poi_fused_convolution_relu_25[grid(32768)](buf57, primals_27,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_27
buf58 = extern_kernels.convolution(buf57, buf14, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf58, (4, 64, 16, 16), (16384, 1, 1024, 64))
buf59 = buf58
del buf58
buf60 = empty_strided_cuda((4, 64, 16, 16), (16384, 1, 1024, 64),
torch.float32)
triton_poi_fused_convolution_maximum_30[grid(65536)](buf59,
primals_29, buf38, buf60, 65536, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_29
buf61 = extern_kernels.convolution(buf60, buf15, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf61, (4, 64, 16, 16), (16384, 1, 1024, 64))
buf62 = buf61
del buf61
triton_poi_fused_convolution_21[grid(65536)](buf62, primals_31,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_31
buf63 = extern_kernels.convolution(buf62, buf16, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf63, (4, 64, 16, 16), (16384, 1, 1024, 64))
buf64 = buf63
del buf63
triton_poi_fused_convolution_relu_22[grid(65536)](buf64, primals_33,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_33
buf65 = extern_kernels.convolution(buf64, buf17, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf65, (4, 32, 32, 32), (32768, 1, 1024, 32))
buf66 = buf65
del buf65
buf67 = empty_strided_cuda((4, 32, 32, 32), (32768, 1, 1024, 32),
torch.float32)
triton_poi_fused_convolution_maximum_31[grid(131072)](buf66,
primals_35, buf32, buf67, 131072, XBLOCK=512, num_warps=8,
num_stages=1)
del primals_35
buf68 = extern_kernels.convolution(buf67, buf18, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf68, (4, 32, 32, 32), (32768, 1, 1024, 32))
buf69 = buf68
del buf68
triton_poi_fused_convolution_18[grid(131072)](buf69, primals_37,
131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_37
buf70 = extern_kernels.convolution(buf69, buf19, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf70, (4, 32, 32, 32), (32768, 1, 1024, 32))
buf71 = buf70
del buf70
triton_poi_fused_convolution_relu_19[grid(131072)](buf71,
primals_39, 131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_39
buf72 = extern_kernels.convolution(buf71, buf20, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf72, (4, 16, 64, 64), (65536, 1, 1024, 16))
buf73 = buf72
del buf72
buf74 = empty_strided_cuda((4, 16, 64, 64), (65536, 1, 1024, 16),
torch.float32)
triton_poi_fused_convolution_maximum_32[grid(262144)](buf73,
primals_41, buf26, buf74, 262144, XBLOCK=512, num_warps=8,
num_stages=1)
del primals_41
buf75 = extern_kernels.convolution(buf74, buf21, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf75, (4, 16, 64, 64), (65536, 1, 1024, 16))
buf76 = buf75
del buf75
triton_poi_fused_convolution_15[grid(262144)](buf76, primals_43,
262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_43
buf77 = extern_kernels.convolution(buf76, buf22, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf77, (4, 16, 64, 64), (65536, 1, 1024, 16))
buf78 = buf77
del buf77
triton_poi_fused_convolution_relu_16[grid(262144)](buf78,
primals_45, 262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_45
buf79 = extern_kernels.convolution(buf78, primals_46, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf79, (4, 2, 64, 64), (8192, 1, 128, 2))
buf80 = empty_strided_cuda((4, 2, 64, 64), (8192, 4096, 64, 1),
torch.float32)
buf81 = empty_strided_cuda((4, 2, 64, 64), (8192, 1, 128, 2), torch
.bool)
triton_poi_fused_convolution_relu_threshold_backward_33[grid(8, 4096)](
buf79, primals_47, buf80, buf81, 8, 4096, XBLOCK=128, YBLOCK=8,
num_warps=4, num_stages=1)
del buf79
del primals_47
return (buf80, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf8,
buf9, buf10, buf11, buf12, buf13, buf14, buf15, buf16, buf17, buf18,
buf19, buf20, buf21, buf22, primals_46, buf24, buf26, buf27, buf28,
buf30, buf32, buf33, buf34, buf36, buf38, buf39, buf40, buf42,
buf44, buf45, buf46, buf48, buf50, buf52, buf53, buf55, buf57,
buf59, buf60, buf62, buf64, buf66, buf67, buf69, buf71, buf73,
buf74, buf76, buf78, buf81)
class UNETMaxNew(nn.Module):
"""UNET Without concatenation during decoding"""
def __init__(self):
super(UNETMaxNew, self).__init__()
self.conv1_1 = nn.Conv2d(in_channels=3, out_channels=16,
kernel_size=3, stride=1, padding=1)
self.conv1_2 = nn.Conv2d(in_channels=16, out_channels=16,
kernel_size=3, stride=1, padding=1)
self.relu1 = nn.ReLU()
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv2_1 = nn.Conv2d(in_channels=16, out_channels=32,
kernel_size=3, stride=1, padding=1)
self.conv2_2 = nn.Conv2d(in_channels=32, out_channels=32,
kernel_size=3, stride=1, padding=1)
self.relu2 = nn.ReLU()
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv3_1 = nn.Conv2d(in_channels=32, out_channels=64,
kernel_size=3, stride=1, padding=1)
self.conv3_2 = nn.Conv2d(in_channels=64, out_channels=64,
kernel_size=3, stride=1, padding=1)
self.relu3 = nn.ReLU()
self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv4_1 = nn.Conv2d(in_channels=64, out_channels=128,
kernel_size=3, stride=1, padding=1)
self.conv4_2 = nn.Conv2d(in_channels=128, out_channels=128,
kernel_size=3, stride=1, padding=1)
self.relu4 = nn.ReLU()
self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv5_1 = nn.Conv2d(in_channels=128, out_channels=256,
kernel_size=3, stride=1, padding=1)
self.conv5_2 = nn.Conv2d(in_channels=256, out_channels=256,
kernel_size=3, stride=1, padding=1)
self.relu5 = nn.ReLU()
self.upsample1 = nn.ConvTranspose2d(in_channels=256, out_channels=
128, kernel_size=2, stride=2)
self.conv6_1 = nn.Conv2d(in_channels=128, out_channels=128,
kernel_size=3, stride=1, padding=1)
self.conv6_2 = nn.Conv2d(in_channels=128, out_channels=128,
kernel_size=3, stride=1, padding=1)
self.relu6 = nn.ReLU()
self.upsample2 = nn.ConvTranspose2d(in_channels=128, out_channels=
64, kernel_size=2, stride=2)
self.conv7_1 = nn.Conv2d(in_channels=64, out_channels=64,
kernel_size=3, stride=1, padding=1)
self.conv7_2 = nn.Conv2d(in_channels=64, out_channels=64,
kernel_size=3, stride=1, padding=1)
self.relu7 = nn.ReLU()
self.upsample3 = nn.ConvTranspose2d(in_channels=64, out_channels=32,
kernel_size=2, stride=2)
self.conv8_1 = nn.Conv2d(in_channels=32, out_channels=32,
kernel_size=3, stride=1, padding=1)
self.conv8_2 = nn.Conv2d(in_channels=32, out_channels=32,
kernel_size=3, stride=1, padding=1)
self.relu8 = nn.ReLU()
self.upsample4 = nn.ConvTranspose2d(in_channels=32, out_channels=16,
kernel_size=2, stride=2)
self.conv9_1 = nn.Conv2d(in_channels=16, out_channels=16,
kernel_size=3, stride=1, padding=1)
self.conv9_2 = nn.Conv2d(in_channels=16, out_channels=16,
kernel_size=3, stride=1, padding=1)
self.relu9 = nn.ReLU()
self.conv10 = nn.Conv2d(in_channels=16, out_channels=2, kernel_size=1)
self.relu10 = nn.ReLU()
def forward(self, input_0):
primals_1 = self.conv1_1.weight
primals_2 = self.conv1_1.bias
primals_4 = self.conv1_2.weight
primals_5 = self.conv1_2.bias
primals_6 = self.conv2_1.weight
primals_7 = self.conv2_1.bias
primals_8 = self.conv2_2.weight
primals_9 = self.conv2_2.bias
primals_10 = self.conv3_1.weight
primals_11 = self.conv3_1.bias
primals_12 = self.conv3_2.weight
primals_13 = self.conv3_2.bias
primals_14 = self.conv4_1.weight
primals_15 = self.conv4_1.bias
primals_16 = self.conv4_2.weight
primals_17 = self.conv4_2.bias
primals_18 = self.conv5_1.weight
primals_19 = self.conv5_1.bias
primals_20 = self.conv5_2.weight
primals_21 = self.conv5_2.bias
primals_22 = self.upsample1.weight
primals_23 = self.upsample1.bias
primals_24 = self.conv6_1.weight
primals_25 = self.conv6_1.bias
primals_26 = self.conv6_2.weight
primals_27 = self.conv6_2.bias
primals_28 = self.upsample2.weight
primals_29 = self.upsample2.bias
primals_30 = self.conv7_1.weight
primals_31 = self.conv7_1.bias
primals_32 = self.conv7_2.weight
primals_33 = self.conv7_2.bias
primals_34 = self.upsample3.weight
primals_35 = self.upsample3.bias
primals_36 = self.conv8_1.weight
primals_37 = self.conv8_1.bias
primals_38 = self.conv8_2.weight
primals_39 = self.conv8_2.bias
primals_40 = self.upsample4.weight
primals_41 = self.upsample4.bias
primals_42 = self.conv9_1.weight
primals_43 = self.conv9_1.bias
primals_44 = self.conv9_2.weight
primals_45 = self.conv9_2.bias
primals_46 = self.conv10.weight
primals_47 = self.conv10.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33, primals_34,
primals_35, primals_36, primals_37, primals_38, primals_39,
primals_40, primals_41, primals_42, primals_43, primals_44,
primals_45, primals_46, primals_47])
return output[0]
|
quenting44/semantic_segmentation
|
UNETMax
| false
| 10,876
|
[
"MIT"
] | 0
|
bd197ddda3c6891d69ff7e552a0c224c7ec1269a
|
https://github.com/quenting44/semantic_segmentation/tree/bd197ddda3c6891d69ff7e552a0c224c7ec1269a
|
PositionwiseFeedForward
|
import torch
from abc import ABC
import torch.nn as nn
class PositionwiseFeedForward(nn.Module, ABC):
def __init__(self, d_in, d_hidden, dropout=0.1):
super().__init__()
self.w_1 = nn.Linear(d_in, d_hidden)
self.w_2 = nn.Linear(d_hidden, d_in)
self.layer_norm = nn.LayerNorm(d_in, eps=1e-06)
self.relu = nn.ReLU(inplace=True)
self.dropout = nn.Dropout(dropout, inplace=True)
def forward(self, x):
residual = x
x = self.w_2(self.relu(self.w_1(x)))
x = self.dropout(x)
x += residual
x = self.layer_norm(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_in': 4, 'd_hidden': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from abc import ABC
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + x4, tmp6, xmask)
@triton.jit
def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * (x1 % 4 // 4) + 64 * ((4 *
(x1 // 4 % 4) + x1 % 4) // 16)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
tl.store(out_ptr1 + x2, tmp28, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x3, xmask)
tmp3 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-06
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x3, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_3, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
triton_poi_fused_view_1[grid(256)](buf1, buf2, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0)
del buf1
extern_kernels.addmm(primals_5, buf2, reinterpret_tensor(primals_4,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_5
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused_native_layer_norm_2[grid(64)](buf3, primals_1,
buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_3[grid(256)](buf3, primals_1,
buf4, buf5, primals_6, primals_7, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf4
del buf5
del primals_7
return buf6, primals_1, primals_6, buf2, buf3, primals_4, buf7
class PositionwiseFeedForwardNew(nn.Module, ABC):
def __init__(self, d_in, d_hidden, dropout=0.1):
super().__init__()
self.w_1 = nn.Linear(d_in, d_hidden)
self.w_2 = nn.Linear(d_hidden, d_in)
self.layer_norm = nn.LayerNorm(d_in, eps=1e-06)
self.relu = nn.ReLU(inplace=True)
self.dropout = nn.Dropout(dropout, inplace=True)
def forward(self, input_0):
primals_2 = self.w_1.weight
primals_3 = self.w_1.bias
primals_4 = self.w_2.weight
primals_5 = self.w_2.bias
primals_6 = self.layer_norm.weight
primals_7 = self.layer_norm.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
superMC5657/transformer
|
PositionwiseFeedForward
| false
| 10,877
|
[
"MIT"
] | 0
|
b9d9ca3a5f307f6587330a8235e8d5a2a3650510
|
https://github.com/superMC5657/transformer/tree/b9d9ca3a5f307f6587330a8235e8d5a2a3650510
|
GCN
|
from torch.nn import Module
import math
import torch
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
import torch.nn.functional as F
import torch.nn as nn
import torch.nn.modules.loss
class GraphConvolution1(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, bias=True):
super(GraphConvolution1, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input, adj):
support = torch.mm(input, self.weight)
output = torch.spmm(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
class GCN(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout):
super(GCN, self).__init__()
self.gc1 = GraphConvolution1(nfeat, nhid)
self.gc3 = GraphConvolution1(nhid, nhid)
self.gc4 = GraphConvolution1(nhid, nhid)
self.gc2 = GraphConvolution1(nhid, nclass)
self.dropout = dropout
def forward(self, x, adj):
x1 = F.relu(self.gc1(x, adj))
x1 = F.dropout(x1, self.dropout, training=self.training)
x2 = F.relu(self.gc3(x1, adj))
x2 = F.dropout(x2, self.dropout, training=self.training)
x3 = F.relu(self.gc4(x2 + x1, adj))
x3 = F.dropout(x3, self.dropout, training=self.training)
x = self.gc2(x2 + x1 + x3, adj)
return F.log_softmax(x, dim=1)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'nfeat': 4, 'nhid': 4, 'nclass': 4, 'dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch.nn import Module
import math
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
import torch.nn as nn
import torch.nn.modules.loss
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_1(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp4 + tmp5
tmp7 = 0.0
tmp8 = tmp4 <= tmp7
tl.store(out_ptr0 + x2, tmp6, xmask)
tl.store(out_ptr1 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_2(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = tmp0 + tmp5
tmp7 = 0.0
tmp8 = tmp5 <= tmp7
tl.store(out_ptr0 + x2, tmp6, xmask)
tl.store(out_ptr1 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_2, primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_3, buf0, out=buf1)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_add_relu_0[grid(16)](buf2, primals_4, 16, XBLOCK=
16, num_warps=1, num_stages=1)
del primals_4
buf3 = buf0
del buf0
extern_kernels.mm(buf2, primals_5, out=buf3)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_3, buf3, out=buf4)
buf5 = buf3
del buf3
buf14 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_add_relu_threshold_backward_1[grid(16)](buf4,
primals_6, buf2, buf5, buf14, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del primals_6
buf6 = buf4
del buf4
extern_kernels.mm(buf5, primals_7, out=buf6)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_3, buf6, out=buf7)
buf8 = buf6
del buf6
buf13 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_add_relu_threshold_backward_2[grid(16)](buf5, buf7,
primals_8, buf8, buf13, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_8
buf9 = buf7
del buf7
extern_kernels.mm(buf8, primals_9, out=buf9)
buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_10, primals_3, buf9, alpha=1, beta=1,
out=buf10)
del primals_10
buf11 = buf9
del buf9
triton_poi_fused__log_softmax_3[grid(16)](buf10, buf11, 16, XBLOCK=
16, num_warps=1, num_stages=1)
buf12 = buf10
del buf10
triton_poi_fused__log_softmax_4[grid(16)](buf11, buf12, 16, XBLOCK=
16, num_warps=1, num_stages=1)
del buf11
return buf12, buf2, buf12, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0
), reinterpret_tensor(buf8, (4, 4), (1, 4), 0), reinterpret_tensor(
primals_9, (4, 4), (1, 4), 0), buf13, reinterpret_tensor(buf5, (4,
4), (1, 4), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0
), buf14, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0
), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0)
class GraphConvolution1(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, bias=True):
super(GraphConvolution1, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input, adj):
support = torch.mm(input, self.weight)
output = torch.spmm(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
class GCNNew(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout):
super(GCNNew, self).__init__()
self.gc1 = GraphConvolution1(nfeat, nhid)
self.gc3 = GraphConvolution1(nhid, nhid)
self.gc4 = GraphConvolution1(nhid, nhid)
self.gc2 = GraphConvolution1(nhid, nclass)
self.dropout = dropout
def forward(self, input_0, input_1):
primals_1 = self.gc1.weight
primals_4 = self.gc1.bias
primals_2 = self.gc3.weight
primals_6 = self.gc3.bias
primals_3 = self.gc4.weight
primals_8 = self.gc4.bias
primals_5 = self.gc2.weight
primals_10 = self.gc2.bias
primals_7 = input_0
primals_9 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
|
thilinicooray/pygcn
|
GCN
| false
| 10,878
|
[
"MIT"
] | 0
|
a7d4f12f31898a3b386736215a6d5fe5cb857387
|
https://github.com/thilinicooray/pygcn/tree/a7d4f12f31898a3b386736215a6d5fe5cb857387
|
LocalVariation
|
import torch
import torch.nn as nn
class LocalVariation(nn.Module):
"""Layer to compute the LocalVariation of an image
"""
def __init__(self, k_size=5):
super(LocalVariation, self).__init__()
self.mu_x_pool = nn.AvgPool2d(k_size, 1)
self.mu_y_pool = nn.AvgPool2d(k_size, 1)
self.sig_x_pool = nn.AvgPool2d(k_size, 1)
self.sig_y_pool = nn.AvgPool2d(k_size, 1)
self.sig_xy_pool = nn.AvgPool2d(k_size, 1)
self.refl = nn.ReflectionPad2d(k_size // 2)
def forward(self, x, y):
x = self.refl(x)
y = self.refl(y)
mu_x = self.mu_x_pool(x)
mu_y = self.mu_y_pool(y)
sigma_x = self.sig_x_pool(x ** 2) - mu_x ** 2
sigma_y = self.sig_y_pool(y ** 2) - mu_y ** 2
sigma_x + sigma_y
return sigma_y
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_avg_pool2d_pow_reflection_pad2d_sub_0(in_out_ptr0,
in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-2 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-2 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-2 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + x0) + -4 * tl_math
.abs(-3 + tl_math.abs(-2 + x1)) + 16 * x2), xmask)
tmp8 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-2 + x0) + -4 * tl_math
.abs(-3 + tl_math.abs(-2 + x1)) + 16 * x2), xmask)
tmp11 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-1 + x0) + -4 *
tl_math.abs(-3 + tl_math.abs(-2 + x1)) + 16 * x2), xmask)
tmp14 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-2 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + x0) + -4 *
tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask)
tmp23 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-2 + x0) + -4 *
tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask)
tmp26 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-1 + x0) + -4 *
tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask)
tmp29 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-2 +
x0)) + -4 * tl_math.abs(-3 + x1) + 16 * x2), xmask, eviction_policy
='evict_last')
tmp32 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + x1) + 16 * x2), xmask, eviction_policy
='evict_last')
tmp35 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + x0) + -4 *
tl_math.abs(-3 + x1) + 16 * x2), xmask)
tmp38 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-2 + x0) + -4 *
tl_math.abs(-3 + x1) + 16 * x2), xmask)
tmp41 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-1 + x0) + -4 *
tl_math.abs(-3 + x1) + 16 * x2), xmask)
tmp44 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-2 +
x0)) + -4 * tl_math.abs(-2 + x1) + 16 * x2), xmask, eviction_policy
='evict_last')
tmp47 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-2 + x1) + 16 * x2), xmask, eviction_policy
='evict_last')
tmp50 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + x0) + -4 *
tl_math.abs(-2 + x1) + 16 * x2), xmask)
tmp53 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-2 + x0) + -4 *
tl_math.abs(-2 + x1) + 16 * x2), xmask)
tmp56 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-1 + x0) + -4 *
tl_math.abs(-2 + x1) + 16 * x2), xmask)
tmp59 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-2 +
x0)) + -4 * tl_math.abs(-1 + x1) + 16 * x2), xmask, eviction_policy
='evict_last')
tmp62 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-1 + x1) + 16 * x2), xmask, eviction_policy
='evict_last')
tmp65 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + x0) + -4 *
tl_math.abs(-1 + x1) + 16 * x2), xmask)
tmp68 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-2 + x0) + -4 *
tl_math.abs(-1 + x1) + 16 * x2), xmask)
tmp71 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-1 + x0) + -4 *
tl_math.abs(-1 + x1) + 16 * x2), xmask)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp3 + tmp1
tmp6 = tmp5 * tmp5
tmp7 = tmp6 + tmp4
tmp9 = tmp8 * tmp8
tmp10 = tmp9 + tmp7
tmp12 = tmp11 * tmp11
tmp13 = tmp12 + tmp10
tmp15 = tmp14 * tmp14
tmp16 = tmp15 + tmp13
tmp18 = tmp17 * tmp17
tmp19 = tmp18 + tmp16
tmp21 = tmp20 * tmp20
tmp22 = tmp21 + tmp19
tmp24 = tmp23 * tmp23
tmp25 = tmp24 + tmp22
tmp27 = tmp26 * tmp26
tmp28 = tmp27 + tmp25
tmp30 = tmp29 * tmp29
tmp31 = tmp30 + tmp28
tmp33 = tmp32 * tmp32
tmp34 = tmp33 + tmp31
tmp36 = tmp35 * tmp35
tmp37 = tmp36 + tmp34
tmp39 = tmp38 * tmp38
tmp40 = tmp39 + tmp37
tmp42 = tmp41 * tmp41
tmp43 = tmp42 + tmp40
tmp45 = tmp44 * tmp44
tmp46 = tmp45 + tmp43
tmp48 = tmp47 * tmp47
tmp49 = tmp48 + tmp46
tmp51 = tmp50 * tmp50
tmp52 = tmp51 + tmp49
tmp54 = tmp53 * tmp53
tmp55 = tmp54 + tmp52
tmp57 = tmp56 * tmp56
tmp58 = tmp57 + tmp55
tmp60 = tmp59 * tmp59
tmp61 = tmp60 + tmp58
tmp63 = tmp62 * tmp62
tmp64 = tmp63 + tmp61
tmp66 = tmp65 * tmp65
tmp67 = tmp66 + tmp64
tmp69 = tmp68 * tmp68
tmp70 = tmp69 + tmp67
tmp72 = tmp71 * tmp71
tmp73 = tmp72 + tmp70
tmp74 = 0.04
tmp75 = tmp73 * tmp74
tmp76 = tmp2 + tmp0
tmp77 = tmp5 + tmp76
tmp78 = tmp8 + tmp77
tmp79 = tmp11 + tmp78
tmp80 = tmp14 + tmp79
tmp81 = tmp17 + tmp80
tmp82 = tmp20 + tmp81
tmp83 = tmp23 + tmp82
tmp84 = tmp26 + tmp83
tmp85 = tmp29 + tmp84
tmp86 = tmp32 + tmp85
tmp87 = tmp35 + tmp86
tmp88 = tmp38 + tmp87
tmp89 = tmp41 + tmp88
tmp90 = tmp44 + tmp89
tmp91 = tmp47 + tmp90
tmp92 = tmp50 + tmp91
tmp93 = tmp53 + tmp92
tmp94 = tmp56 + tmp93
tmp95 = tmp59 + tmp94
tmp96 = tmp62 + tmp95
tmp97 = tmp65 + tmp96
tmp98 = tmp68 + tmp97
tmp99 = tmp71 + tmp98
tmp100 = tmp99 * tmp74
tmp101 = tmp100 * tmp100
tmp102 = tmp75 - tmp101
tl.store(in_out_ptr0 + x3, tmp102, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_avg_pool2d_pow_reflection_pad2d_sub_0[grid(256)](buf2,
arg1_1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg1_1
return buf2,
class LocalVariationNew(nn.Module):
"""Layer to compute the LocalVariation of an image
"""
def __init__(self, k_size=5):
super(LocalVariationNew, self).__init__()
self.mu_x_pool = nn.AvgPool2d(k_size, 1)
self.mu_y_pool = nn.AvgPool2d(k_size, 1)
self.sig_x_pool = nn.AvgPool2d(k_size, 1)
self.sig_y_pool = nn.AvgPool2d(k_size, 1)
self.sig_xy_pool = nn.AvgPool2d(k_size, 1)
self.refl = nn.ReflectionPad2d(k_size // 2)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
shlomi-amitai/myDIFFNet
|
LocalVariation
| false
| 10,879
|
[
"MIT"
] | 0
|
39dead457f10c82caae2a12ea152f2339188014c
|
https://github.com/shlomi-amitai/myDIFFNet/tree/39dead457f10c82caae2a12ea152f2339188014c
|
Project3D
|
import torch
import torch.nn as nn
class Project3D(nn.Module):
"""Layer which projects 3D points into a camera with intrinsics K and at position T
"""
def __init__(self, batch_size, height, width, eps=1e-07):
super(Project3D, self).__init__()
self.batch_size = batch_size
self.height = height
self.width = width
self.eps = eps
def forward(self, points, K, T):
P = torch.matmul(K, T)[:, :3, :]
cam_points = torch.matmul(P, points)
pix_coords = cam_points[:, :2, :] / (cam_points[:, 2, :].unsqueeze(
1) + self.eps)
pix_coords = pix_coords.view(self.batch_size, 2, self.height, self.
width)
pix_coords = pix_coords.permute(0, 2, 3, 1)
pix_coords[..., 0] /= self.width - 1
pix_coords[..., 1] /= self.height - 1
pix_coords = (pix_coords - 0.5) * 2
return pix_coords
def get_inputs():
return [torch.rand([4, 3, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'batch_size': 4, 'height': 4, 'width': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 48
x1 = xindex // 48
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_mul_sub_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 2
x0 = xindex % 16
x2 = xindex // 32
x3 = xindex % 32
x4 = xindex
tmp7 = tl.load(in_ptr0 + (x0 + 48 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + 48 * x2), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (16 + x0 + 48 * x2), xmask, eviction_policy=
'evict_last')
tmp22 = tl.load(in_ptr0 + (x3 + 48 * x2), xmask)
tmp0 = x1
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = tmp1 == tmp1
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = tmp1 == tmp4
tmp6 = tmp4 == tmp4
tmp9 = 1e-07
tmp10 = tmp8 + tmp9
tmp11 = tmp7 / tmp10
tmp12 = 0.3333333333333333
tmp13 = tmp11 * tmp12
tmp14 = tl.where(tmp6, tmp13, tmp11)
tmp16 = tmp15 / tmp10
tmp17 = tl.where(tmp5, tmp13, tmp16)
tmp18 = tl.where(tmp5, tmp14, tmp17)
tmp19 = tmp18 * tmp12
tmp20 = tl.where(tmp3, tmp19, tmp18)
tmp21 = tmp0 == tmp4
tmp23 = tmp22 / tmp10
tmp24 = tl.where(tmp21, tmp13, tmp23)
tmp25 = tl.where(tmp21, tmp14, tmp24)
tmp26 = tl.where(tmp2, tmp19, tmp25)
tmp27 = tl.where(tmp2, tmp20, tmp26)
tmp28 = 0.5
tmp29 = tmp27 - tmp28
tmp30 = 2.0
tmp31 = tmp29 * tmp30
tl.store(out_ptr0 + x4, tmp31, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 3, 4, 4), (48, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1), 0),
out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(192)](buf0, buf1, 192, XBLOCK=256,
num_warps=4, num_stages=1)
del buf0
buf2 = empty_strided_cuda((12, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf1, (12, 4, 4), (16, 4, 1),
0), reinterpret_tensor(arg2_1, (12, 4, 4), (16, 4, 1), 0), out=buf2
)
del arg2_1
del buf1
buf3 = empty_strided_cuda((4, 4, 4, 2), (32, 4, 1, 16), torch.float32)
triton_poi_fused_mul_sub_1[grid(128)](buf2, buf3, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del buf2
return buf3,
class Project3DNew(nn.Module):
"""Layer which projects 3D points into a camera with intrinsics K and at position T
"""
def __init__(self, batch_size, height, width, eps=1e-07):
super(Project3DNew, self).__init__()
self.batch_size = batch_size
self.height = height
self.width = width
self.eps = eps
def forward(self, input_0, input_1, input_2):
arg2_1 = input_0
arg0_1 = input_1
arg1_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
shlomi-amitai/myDIFFNet
|
Project3D
| false
| 10,880
|
[
"MIT"
] | 0
|
39dead457f10c82caae2a12ea152f2339188014c
|
https://github.com/shlomi-amitai/myDIFFNet/tree/39dead457f10c82caae2a12ea152f2339188014c
|
SSIM
|
import torch
import torch.nn as nn
class SSIM(nn.Module):
"""Layer to compute the SSIM loss between a pair of images
"""
def __init__(self):
super(SSIM, self).__init__()
self.mu_x_pool = nn.AvgPool2d(3, 1)
self.mu_y_pool = nn.AvgPool2d(3, 1)
self.sig_x_pool = nn.AvgPool2d(3, 1)
self.sig_y_pool = nn.AvgPool2d(3, 1)
self.sig_xy_pool = nn.AvgPool2d(3, 1)
self.refl = nn.ReflectionPad2d(1)
self.C1 = 0.01 ** 2
self.C2 = 0.03 ** 2
def forward(self, x, y):
x = self.refl(x)
y = self.refl(y)
mu_x = self.mu_x_pool(x)
mu_y = self.mu_y_pool(y)
sigma_x = self.sig_x_pool(x ** 2) - mu_x ** 2
sigma_y = self.sig_y_pool(y ** 2) - mu_y ** 2
sigma_xy = self.sig_xy_pool(x * y) - mu_x * mu_y
SSIM_n = (2 * mu_x * mu_y + self.C1) * (2 * sigma_xy + self.C2)
SSIM_d = (mu_x ** 2 + mu_y ** 2 + self.C1) * (sigma_x + sigma_y +
self.C2)
return torch.clamp((1 - SSIM_n / SSIM_d) / 2, 0, 1)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_reflection_pad2d_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_avg_pool2d_clamp_div_mul_pow_reflection_pad2d_rsub_sub_1(
in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 6 * x1 + 36 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (1 + x0 + 6 * x1 + 36 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (2 + x0 + 6 * x1 + 36 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (6 + x0 + 6 * x1 + 36 * x2), xmask)
tmp7 = tl.load(in_ptr0 + (7 + x0 + 6 * x1 + 36 * x2), xmask)
tmp9 = tl.load(in_ptr0 + (8 + x0 + 6 * x1 + 36 * x2), xmask)
tmp11 = tl.load(in_ptr0 + (12 + x0 + 6 * x1 + 36 * x2), xmask)
tmp13 = tl.load(in_ptr0 + (13 + x0 + 6 * x1 + 36 * x2), xmask)
tmp15 = tl.load(in_ptr0 + (14 + x0 + 6 * x1 + 36 * x2), xmask)
tmp19 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + x0) + -4 *
tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask)
tmp22 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-2 + x0) + -4 *
tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask)
tmp24 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + x1) + 16 * x2), xmask, eviction_policy
='evict_last')
tmp26 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + x0) + -4 *
tl_math.abs(-3 + x1) + 16 * x2), xmask)
tmp28 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-2 + x0) + -4 *
tl_math.abs(-3 + x1) + 16 * x2), xmask)
tmp30 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-2 + x1) + 16 * x2), xmask, eviction_policy
='evict_last')
tmp32 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + x0) + -4 *
tl_math.abs(-2 + x1) + 16 * x2), xmask)
tmp34 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-2 + x0) + -4 *
tl_math.abs(-2 + x1) + 16 * x2), xmask)
tmp55 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tmp56 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + x0) + -4 *
tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask)
tmp58 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-2 + x0) + -4 *
tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask)
tmp60 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + x1) + 16 * x2), xmask, eviction_policy
='evict_last')
tmp62 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + x0) + -4 *
tl_math.abs(-3 + x1) + 16 * x2), xmask)
tmp64 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-2 + x0) + -4 *
tl_math.abs(-3 + x1) + 16 * x2), xmask)
tmp66 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-2 + x1) + 16 * x2), xmask, eviction_policy
='evict_last')
tmp68 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + x0) + -4 *
tl_math.abs(-2 + x1) + 16 * x2), xmask)
tmp70 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-2 + x0) + -4 *
tl_math.abs(-2 + x1) + 16 * x2), xmask)
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp17 = 0.1111111111111111
tmp18 = tmp16 * tmp17
tmp21 = tmp20 + tmp19
tmp23 = tmp22 + tmp21
tmp25 = tmp24 + tmp23
tmp27 = tmp26 + tmp25
tmp29 = tmp28 + tmp27
tmp31 = tmp30 + tmp29
tmp33 = tmp32 + tmp31
tmp35 = tmp34 + tmp33
tmp36 = tmp35 * tmp17
tmp37 = tmp19 * tmp19
tmp38 = tmp20 * tmp20
tmp39 = tmp38 + tmp37
tmp40 = tmp22 * tmp22
tmp41 = tmp40 + tmp39
tmp42 = tmp24 * tmp24
tmp43 = tmp42 + tmp41
tmp44 = tmp26 * tmp26
tmp45 = tmp44 + tmp43
tmp46 = tmp28 * tmp28
tmp47 = tmp46 + tmp45
tmp48 = tmp30 * tmp30
tmp49 = tmp48 + tmp47
tmp50 = tmp32 * tmp32
tmp51 = tmp50 + tmp49
tmp52 = tmp34 * tmp34
tmp53 = tmp52 + tmp51
tmp54 = tmp53 * tmp17
tmp57 = tmp56 + tmp55
tmp59 = tmp58 + tmp57
tmp61 = tmp60 + tmp59
tmp63 = tmp62 + tmp61
tmp65 = tmp64 + tmp63
tmp67 = tmp66 + tmp65
tmp69 = tmp68 + tmp67
tmp71 = tmp70 + tmp69
tmp72 = tmp71 * tmp17
tmp73 = tmp55 * tmp55
tmp74 = tmp56 * tmp56
tmp75 = tmp74 + tmp73
tmp76 = tmp58 * tmp58
tmp77 = tmp76 + tmp75
tmp78 = tmp60 * tmp60
tmp79 = tmp78 + tmp77
tmp80 = tmp62 * tmp62
tmp81 = tmp80 + tmp79
tmp82 = tmp64 * tmp64
tmp83 = tmp82 + tmp81
tmp84 = tmp66 * tmp66
tmp85 = tmp84 + tmp83
tmp86 = tmp68 * tmp68
tmp87 = tmp86 + tmp85
tmp88 = tmp70 * tmp70
tmp89 = tmp88 + tmp87
tmp90 = tmp89 * tmp17
tmp91 = 2.0
tmp92 = tmp36 * tmp91
tmp93 = tmp92 * tmp72
tmp94 = 0.0001
tmp95 = tmp93 + tmp94
tmp96 = tmp36 * tmp72
tmp97 = tmp18 - tmp96
tmp98 = tmp97 * tmp91
tmp99 = 0.0009
tmp100 = tmp98 + tmp99
tmp101 = tmp95 * tmp100
tmp102 = tmp36 * tmp36
tmp103 = tmp72 * tmp72
tmp104 = tmp102 + tmp103
tmp105 = tmp104 + tmp94
tmp106 = tmp54 - tmp102
tmp107 = tmp90 - tmp103
tmp108 = tmp106 + tmp107
tmp109 = tmp108 + tmp99
tmp110 = tmp105 * tmp109
tmp111 = tmp101 / tmp110
tmp112 = 1.0
tmp113 = tmp112 - tmp111
tmp114 = 0.5
tmp115 = tmp113 * tmp114
tmp116 = 0.0
tmp117 = triton_helpers.maximum(tmp115, tmp116)
tmp118 = triton_helpers.minimum(tmp117, tmp112)
tl.store(in_out_ptr0 + x3, tmp118, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_reflection_pad2d_0[grid(576)](arg0_1, arg1_1,
buf2, 576, XBLOCK=128, num_warps=4, num_stages=1)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf6 = buf0
del buf0
buf7 = buf6
del buf6
triton_poi_fused_add_avg_pool2d_clamp_div_mul_pow_reflection_pad2d_rsub_sub_1[
grid(256)](buf7, buf2, arg0_1, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
del arg1_1
del buf2
return buf7,
class SSIMNew(nn.Module):
"""Layer to compute the SSIM loss between a pair of images
"""
def __init__(self):
super(SSIMNew, self).__init__()
self.mu_x_pool = nn.AvgPool2d(3, 1)
self.mu_y_pool = nn.AvgPool2d(3, 1)
self.sig_x_pool = nn.AvgPool2d(3, 1)
self.sig_y_pool = nn.AvgPool2d(3, 1)
self.sig_xy_pool = nn.AvgPool2d(3, 1)
self.refl = nn.ReflectionPad2d(1)
self.C1 = 0.01 ** 2
self.C2 = 0.03 ** 2
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
shlomi-amitai/myDIFFNet
|
SSIM
| false
| 10,881
|
[
"MIT"
] | 0
|
39dead457f10c82caae2a12ea152f2339188014c
|
https://github.com/shlomi-amitai/myDIFFNet/tree/39dead457f10c82caae2a12ea152f2339188014c
|
Net
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 16, 3, 1)
self.conv2 = nn.Conv2d(16, 40, 2, 1)
self.fc1 = nn.Linear(3 * 3 * 40, 400)
self.fc2 = nn.Linear(400, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 3 * 3 * 40)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 246016
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3844 % 16
x0 = xindex % 3844
x4 = xindex // 3844
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + 3872 * x4), tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 61504
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 31
x1 = xindex // 31 % 31
x4 = xindex // 961
x3 = xindex // 15376
x5 = xindex % 15376
tmp0 = tl.load(in_ptr0 + (2 * x0 + 124 * x1 + 3872 * x4), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 124 * x1 + 3872 * x4), xmask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (62 + 2 * x0 + 124 * x1 + 3872 * x4), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (63 + 2 * x0 + 124 * x1 + 3872 * x4), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x5 + 15392 * x3), tmp6, xmask)
tl.store(out_ptr1 + (x5 + 15488 * x3), tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 144000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 900 % 40
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 36000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 15
x3 = xindex // 15
x2 = xindex // 9000
x4 = xindex % 9000
tmp0 = tl.load(in_ptr0 + (2 * x0 + 60 * x3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 60 * x3), xmask, eviction_policy
='evict_last')
tmp7 = tl.load(in_ptr0 + (30 + 2 * x0 + 60 * x3), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (31 + 2 * x0 + 60 * x3), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x4 + 9088 * x2), tmp15, xmask)
tl.store(out_ptr1 + (x4 + 9024 * x2), tmp16, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_view_4(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 36000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 360
x1 = xindex // 360
x2 = xindex
tmp0 = tl.load(in_ptr0 + (15 * ((x0 + 360 * x1) // 15 % 600) + 9024 * (
(x0 + 360 * x1) // 9000) + x0 % 15), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 40000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 400
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_per_fused__log_softmax_6(in_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 100
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (16, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (40, 16, 2, 2), (64, 4, 2, 1))
assert_size_stride(primals_5, (40,), (1,))
assert_size_stride(primals_6, (400, 360), (360, 1))
assert_size_stride(primals_7, (400,), (1,))
assert_size_stride(primals_8, (10, 400), (400, 1))
assert_size_stride(primals_9, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 62, 62), (61504, 3844, 62, 1))
buf1 = empty_strided_cuda((4, 16, 62, 62), (61952, 3872, 62, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(246016)](buf0, primals_2,
buf1, 246016, XBLOCK=512, num_warps=8, num_stages=1)
del buf0
del primals_2
buf2 = empty_strided_cuda((4, 16, 31, 31), (15392, 961, 31, 1),
torch.float32)
buf3 = empty_strided_cuda((4, 16, 31, 31), (15488, 961, 31, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(61504)](buf1, buf2,
buf3, 61504, XBLOCK=256, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 40, 30, 30), (36000, 900, 30, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(144000)](buf5, primals_5,
144000, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 40, 15, 15), (9088, 225, 15, 1),
torch.int8)
buf7 = empty_strided_cuda((4, 40, 15, 15), (9024, 225, 15, 1),
torch.float32)
triton_poi_fused_max_pool2d_with_indices_3[grid(36000)](buf5, buf6,
buf7, 36000, XBLOCK=256, num_warps=4, num_stages=1)
buf8 = empty_strided_cuda((100, 360), (360, 1), torch.float32)
triton_poi_fused_max_pool2d_with_indices_view_4[grid(36000)](buf7,
buf8, 36000, XBLOCK=256, num_warps=4, num_stages=1)
del buf7
buf9 = empty_strided_cuda((100, 400), (400, 1), torch.float32)
extern_kernels.mm(buf8, reinterpret_tensor(primals_6, (360, 400), (
1, 360), 0), out=buf9)
buf10 = buf9
del buf9
triton_poi_fused_relu_5[grid(40000)](buf10, primals_7, 40000,
XBLOCK=512, num_warps=4, num_stages=1)
del primals_7
buf11 = empty_strided_cuda((100, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_9, buf10, reinterpret_tensor(primals_8,
(400, 10), (1, 400), 0), alpha=1, beta=1, out=buf11)
del primals_9
buf14 = empty_strided_cuda((100, 10), (10, 1), torch.float32)
triton_per_fused__log_softmax_6[grid(100)](buf11, buf14, 100, 10,
XBLOCK=1, num_warps=2, num_stages=1)
del buf11
return (buf14, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5,
buf6, buf8, buf10, buf14, primals_8, primals_6)
class NetNew(nn.Module):
def __init__(self):
super(NetNew, self).__init__()
self.conv1 = nn.Conv2d(1, 16, 3, 1)
self.conv2 = nn.Conv2d(16, 40, 2, 1)
self.fc1 = nn.Linear(3 * 3 * 40, 400)
self.fc2 = nn.Linear(400, 10)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
tkhkaeio/PyTorch-GAN
|
Net
| false
| 10,882
|
[
"MIT"
] | 0
|
565c67cae168a42c6822c787562a1f7a5b35a2ab
|
https://github.com/tkhkaeio/PyTorch-GAN/tree/565c67cae168a42c6822c787562a1f7a5b35a2ab
|
CoAttention
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class CoAttention(nn.Module):
"""
CoAttention encoder
in Dynamic Coattention Networks For Question Answering (https://arxiv.org/abs/1611.01604)
check the Figure 2 in paper
* Args:
embed_dim: the number of input embedding dimension
"""
def __init__(self, embed_dim):
super(CoAttention, self).__init__()
self.W_0 = nn.Linear(embed_dim * 3, 1, bias=False)
def forward(self, context_embed, question_embed, context_mask=None,
question_mask=None):
C, Q = context_embed, question_embed
B, C_L, Q_L, D = C.size(0), C.size(1), Q.size(1), Q.size(2)
similarity_matrix_shape = torch.zeros(B, C_L, Q_L, D)
C_ = C.unsqueeze(2).expand_as(similarity_matrix_shape)
Q_ = Q.unsqueeze(1).expand_as(similarity_matrix_shape)
C_Q = torch.mul(C_, Q_)
S = self.W_0(torch.cat([C_, Q_, C_Q], 3)).squeeze(3)
S_question = S
if question_mask is not None:
S_question = f.add_masked_value(S_question, question_mask.
unsqueeze(1), value=-10000000.0)
S_q = F.softmax(S_question, 2)
S_context = S.transpose(1, 2)
if context_mask is not None:
S_context = f.add_masked_value(S_context, context_mask.
unsqueeze(1), value=-10000000.0)
S_c = F.softmax(S_context, 2)
A = torch.bmm(S_q, Q)
B = torch.bmm(S_q, S_c).bmm(C)
out = torch.cat([C, A, C * A, C * B], dim=-1)
return out
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'embed_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 12
x4 = xindex // 48
x1 = xindex // 12 % 4
x3 = xindex // 192
x5 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x4 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (4 * x1 + 16 * x3 + (-4 + x0)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tl.full([1], 12, tl.int64)
tmp14 = tl.load(in_ptr0 + (4 * x4 + (-8 + x0)), tmp11 & xmask,
eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr1 + (4 * x1 + 16 * x3 + (-8 + x0)), tmp11 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tmp14 * tmp15
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp11, tmp16, tmp17)
tmp19 = tl.where(tmp9, tmp10, tmp18)
tmp20 = tl.where(tmp4, tmp5, tmp19)
tl.store(out_ptr0 + x5, tmp20, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x1 = xindex // 4
x0 = xindex % 4
x3 = xindex // 16
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (x0 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (4 + x0 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr0 + (8 + x0 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (12 + x0 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tmp12 = triton_helpers.maximum(tmp10, tmp11)
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp16 = triton_helpers.maximum(tmp14, tmp15)
tmp17 = tmp0 - tmp16
tmp18 = tl_math.exp(tmp17)
tl.store(out_ptr0 + x4, tmp9, xmask)
tl.store(out_ptr1 + x4, tmp18, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (y0 + 16 * y1), ymask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + y0 + 16 * y1), ymask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + y0 + 16 * y1), ymask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + y0 + 16 * y1), ymask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2 + 4 * y3), tmp8, xmask & ymask)
@triton.jit
def triton_poi_fused_cat_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + (4 * x1 + (-8 + x0)), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tl.load(in_ptr1 + (4 * x1 + (-8 + x0)), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp17 = tmp15 * tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp14, tmp17, tmp18)
tmp20 = tmp0 >= tmp12
tl.full([1], 16, tl.int64)
tmp23 = tl.load(in_ptr0 + (4 * x1 + (-12 + x0)), tmp20 & xmask,
eviction_policy='evict_last', other=0.0)
tmp24 = tl.load(in_ptr2 + (4 * x1 + (-12 + x0)), tmp20 & xmask,
eviction_policy='evict_last', other=0.0)
tmp25 = tmp23 * tmp24
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp20, tmp25, tmp26)
tmp28 = tl.where(tmp14, tmp19, tmp27)
tmp29 = tl.where(tmp9, tmp10, tmp28)
tmp30 = tl.where(tmp4, tmp5, tmp29)
tl.store(out_ptr0 + x2, tmp30, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (1, 12), (12, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 12), (192, 48, 12, 1), torch.
float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(768)](primals_2, primals_1, buf0, 768,
XBLOCK=128, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 12), (12, 1), 0),
reinterpret_tensor(primals_3, (12, 1), (1, 12), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
triton_poi_fused__softmax_1[grid(64)](buf1, buf2, buf4, 64, XBLOCK=
64, num_warps=1, num_stages=1)
buf3 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0)
del buf1
triton_poi_fused__softmax_2[grid(64)](buf2, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf5 = buf2
del buf2
triton_poi_fused__softmax_3[grid(16, 4)](buf4, buf5, 16, 4, XBLOCK=
4, YBLOCK=16, num_warps=1, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0)
del buf4
extern_kernels.bmm(buf3, primals_1, out=buf6)
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf3, buf5, out=buf7)
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf7, primals_2, out=buf8)
del buf7
buf9 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
triton_poi_fused_cat_4[grid(256)](primals_2, buf6, buf8, buf9, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del buf6
del buf8
return buf9, primals_2, reinterpret_tensor(buf0, (64, 12), (12, 1), 0
), buf3, buf5, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0)
class CoAttentionNew(nn.Module):
"""
CoAttention encoder
in Dynamic Coattention Networks For Question Answering (https://arxiv.org/abs/1611.01604)
check the Figure 2 in paper
* Args:
embed_dim: the number of input embedding dimension
"""
def __init__(self, embed_dim):
super(CoAttentionNew, self).__init__()
self.W_0 = nn.Linear(embed_dim * 3, 1, bias=False)
def forward(self, input_0, input_1):
primals_3 = self.W_0.weight
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
|
srlee-ai/claf
|
CoAttention
| false
| 10,883
|
[
"MIT"
] | 0
|
89b3e5c5ec0486886876ea3bac381508c6a6bf58
|
https://github.com/srlee-ai/claf/tree/89b3e5c5ec0486886876ea3bac381508c6a6bf58
|
PositionwiseFeedForward
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class PointwiseConv(nn.Module):
"""
Pointwise Convolution (1x1 Conv)
Convolution 1 Dimension (Faster version)
(cf. https://github.com/huggingface/pytorch-openai-transformer-lm/blob/ eafc28abdfadfa0732f03a0fc65805c5bfb2ffe7/model_pytorch.py#L45)
* Args:
input_size: the number of input tensor's dimension
num_filters: the number of convolution filter
"""
def __init__(self, input_size, num_filters):
super(PointwiseConv, self).__init__()
self.kernel_size = 1
self.num_filters = num_filters
weight = torch.empty(input_size, num_filters)
nn.init.normal_(weight, std=0.02)
self.weight = nn.Parameter(weight)
self.bias = nn.Parameter(torch.zeros(num_filters))
def forward(self, x):
size_out = x.size()[:-1] + (self.num_filters,)
x = torch.addmm(self.bias, x.contiguous().view(-1, x.size(-1)),
self.weight)
x = x.view(*size_out)
return x
class PositionwiseFeedForward(nn.Module):
"""
Pointwise Feed-Forward Layer
* Args:
input_size: the number of input size
hidden_size: the number of hidden size
* Kwargs:
dropout: the probability of dropout
"""
def __init__(self, input_size, hidden_size, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.pointwise_conv1 = PointwiseConv(input_size=input_size,
num_filters=hidden_size)
self.pointwise_conv2 = PointwiseConv(input_size=hidden_size,
num_filters=input_size)
self.activation_fn = F.relu
self.dropout = nn.Dropout(p=dropout)
def forward(self, x):
x = self.pointwise_conv1(x)
x = self.activation_fn(x)
x = self.pointwise_conv2(x)
x = self.dropout(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
primals_3, out=buf0)
del primals_3
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_4, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), primals_5, alpha=1, beta=1, out=buf2)
del primals_4
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0
), reinterpret_tensor(buf1, (4, 64), (1, 4), 0
), buf3, reinterpret_tensor(primals_1, (4, 64), (1, 4), 0)
class PointwiseConv(nn.Module):
"""
Pointwise Convolution (1x1 Conv)
Convolution 1 Dimension (Faster version)
(cf. https://github.com/huggingface/pytorch-openai-transformer-lm/blob/ eafc28abdfadfa0732f03a0fc65805c5bfb2ffe7/model_pytorch.py#L45)
* Args:
input_size: the number of input tensor's dimension
num_filters: the number of convolution filter
"""
def __init__(self, input_size, num_filters):
super(PointwiseConv, self).__init__()
self.kernel_size = 1
self.num_filters = num_filters
weight = torch.empty(input_size, num_filters)
nn.init.normal_(weight, std=0.02)
self.weight = nn.Parameter(weight)
self.bias = nn.Parameter(torch.zeros(num_filters))
def forward(self, x):
size_out = x.size()[:-1] + (self.num_filters,)
x = torch.addmm(self.bias, x.contiguous().view(-1, x.size(-1)),
self.weight)
x = x.view(*size_out)
return x
class PositionwiseFeedForwardNew(nn.Module):
"""
Pointwise Feed-Forward Layer
* Args:
input_size: the number of input size
hidden_size: the number of hidden size
* Kwargs:
dropout: the probability of dropout
"""
def __init__(self, input_size, hidden_size, dropout=0.1):
super(PositionwiseFeedForwardNew, self).__init__()
self.pointwise_conv1 = PointwiseConv(input_size=input_size,
num_filters=hidden_size)
self.pointwise_conv2 = PointwiseConv(input_size=hidden_size,
num_filters=input_size)
self.activation_fn = F.relu
self.dropout = nn.Dropout(p=dropout)
def forward(self, input_0):
primals_3 = self.pointwise_conv1.weight
primals_2 = self.pointwise_conv1.bias
primals_5 = self.pointwise_conv2.weight
primals_4 = self.pointwise_conv2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
srlee-ai/claf
|
PositionwiseFeedForward
| false
| 10,884
|
[
"MIT"
] | 0
|
89b3e5c5ec0486886876ea3bac381508c6a6bf58
|
https://github.com/srlee-ai/claf/tree/89b3e5c5ec0486886876ea3bac381508c6a6bf58
|
LayerNorm
|
import torch
import torch.nn as nn
class LayerNorm(nn.Module):
"""
Layer Normalization
(https://arxiv.org/abs/1607.06450)
"""
def __init__(self, normalized_shape, eps=1e-05):
super(LayerNorm, self).__init__()
self.gamma = nn.Parameter(torch.ones(normalized_shape))
self.beta = nn.Parameter(torch.zeros(normalized_shape))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.gamma * (x - mean) / (std + self.eps) + self.beta
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'normalized_shape': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp0 * tmp11
tmp13 = tmp2 - tmp10
tmp14 = tmp13 * tmp13
tmp15 = tmp3 - tmp10
tmp16 = tmp15 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = tmp5 - tmp10
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp21 = tmp7 - tmp10
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp24 = 3.0
tmp25 = tmp23 / tmp24
tmp26 = libdevice.sqrt(tmp25)
tmp27 = 1e-05
tmp28 = tmp26 + tmp27
tmp29 = tmp12 / tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + x2, tmp31, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_std_sub_0[grid(256)](primals_2,
primals_1, primals_3, buf0, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_2
del primals_3
return buf0, primals_1
class LayerNormNew(nn.Module):
"""
Layer Normalization
(https://arxiv.org/abs/1607.06450)
"""
def __init__(self, normalized_shape, eps=1e-05):
super(LayerNormNew, self).__init__()
self.gamma = nn.Parameter(torch.ones(normalized_shape))
self.beta = nn.Parameter(torch.zeros(normalized_shape))
self.eps = eps
def forward(self, input_0):
primals_2 = self.gamma
primals_3 = self.beta
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
srlee-ai/claf
|
LayerNorm
| false
| 10,885
|
[
"MIT"
] | 0
|
89b3e5c5ec0486886876ea3bac381508c6a6bf58
|
https://github.com/srlee-ai/claf/tree/89b3e5c5ec0486886876ea3bac381508c6a6bf58
|
MultiHeadAttention
|
import torch
from abc import ABC
import torch.nn as nn
from torch import matmul
class ScaledDotProductAttention(nn.Module, ABC):
""" Scaled Dot-Product Attention """
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout, inplace=False)
self.softmax = nn.Softmax(dim=-1)
def forward(self, q, k, v, mask=None):
attn = matmul(q / self.temperature, k.transpose(2, 3))
if mask is not None:
attn = attn.masked_fill(mask == 0, -1000000000.0)
attn = self.dropout(self.softmax(attn))
output = matmul(attn, v)
return output, attn
class MultiHeadAttention(nn.Module, ABC):
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.d_v = d_v
self.d_k = d_k
self.n_head = n_head
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)
self.fc = nn.Linear(n_head * d_v, d_model, bias=False)
self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5)
self.dropout = nn.Dropout(dropout, inplace=True)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-06)
def forward(self, q, k, v, mask=None):
sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1)
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)
if mask is not None:
mask = mask.unsqueeze(1)
q, attn = self.attention(q, k, v, mask)
q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1)
q = self.dropout(self.fc(q))
q += residual
q = self.layer_norm(q)
return q, attn
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'n_head': 4, 'd_model': 4, 'd_k': 4, 'd_v': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from abc import ABC
import torch.nn as nn
from torch import matmul
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x4, tmp2, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-06
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (16, 4), (4, 1))
assert_size_stride(primals_6, (16, 4), (4, 1))
assert_size_stride(primals_7, (4, 16), (16, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf0)
del primals_4
buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 16), (1, 4), 0), out=buf1)
del primals_5
buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 16), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_div_0[grid(256)](buf0, buf3, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused_clone_1[grid(64, 4)](buf1, buf4, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
buf5 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0)
del buf1
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused__softmax_3[grid(256)](buf6, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf8 = buf6
del buf6
triton_poi_fused_clone_4[grid(256)](buf2, buf8, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf9 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_4[grid(256)](buf9, buf10, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf9
buf11 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf10, (16, 16), (16, 1), 0),
reinterpret_tensor(primals_7, (16, 4), (1, 16), 0), out=buf11)
buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_native_layer_norm_5[grid(16)](buf11, primals_1,
buf12, buf13, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_6[grid(64)](buf11, primals_1,
buf12, buf13, primals_8, primals_9, buf14, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf12
del buf13
del primals_9
return buf14, buf7, primals_1, primals_8, reinterpret_tensor(primals_2,
(16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0
), buf7, reinterpret_tensor(buf10, (16, 16), (16, 1), 0
), buf11, primals_7, reinterpret_tensor(buf8, (16, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf4, (16, 4, 4), (16, 1, 4), 0)
class ScaledDotProductAttention(nn.Module, ABC):
""" Scaled Dot-Product Attention """
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout, inplace=False)
self.softmax = nn.Softmax(dim=-1)
def forward(self, q, k, v, mask=None):
attn = matmul(q / self.temperature, k.transpose(2, 3))
if mask is not None:
attn = attn.masked_fill(mask == 0, -1000000000.0)
attn = self.dropout(self.softmax(attn))
output = matmul(attn, v)
return output, attn
class MultiHeadAttentionNew(nn.Module, ABC):
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.d_v = d_v
self.d_k = d_k
self.n_head = n_head
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)
self.fc = nn.Linear(n_head * d_v, d_model, bias=False)
self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5)
self.dropout = nn.Dropout(dropout, inplace=True)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-06)
def forward(self, input_0, input_1, input_2):
primals_4 = self.w_qs.weight
primals_5 = self.w_ks.weight
primals_6 = self.w_vs.weight
primals_7 = self.fc.weight
primals_8 = self.layer_norm.weight
primals_9 = self.layer_norm.bias
primals_1 = input_0
primals_2 = input_1
primals_3 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0], output[1]
|
superMC5657/transformer
|
MultiHeadAttention
| false
| 10,886
|
[
"MIT"
] | 0
|
b9d9ca3a5f307f6587330a8235e8d5a2a3650510
|
https://github.com/superMC5657/transformer/tree/b9d9ca3a5f307f6587330a8235e8d5a2a3650510
|
Multi_Head_Attention
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Scaled_Dot_Product_Attention(nn.Module):
"""Scaled Dot-Product Attention """
def __init__(self):
super(Scaled_Dot_Product_Attention, self).__init__()
def forward(self, Q, K, V, scale=None):
"""
Args:
Q: [batch_size, len_Q, dim_Q]
K: [batch_size, len_K, dim_K]
V: [batch_size, len_V, dim_V]
scale: 缩放因子 论文为根号dim_K
Return:
self-attention后的张量,以及attention张量
"""
attention = torch.matmul(Q, K.permute(0, 2, 1))
if scale:
attention = attention * scale
attention = F.softmax(attention, dim=-1)
context = torch.matmul(attention, V)
return context
class Multi_Head_Attention(nn.Module):
def __init__(self, dim_model, num_head, dropout=0.0):
super(Multi_Head_Attention, self).__init__()
self.num_head = num_head
assert dim_model % num_head == 0
self.dim_head = dim_model // self.num_head
self.fc_Q = nn.Linear(dim_model, num_head * self.dim_head)
self.fc_K = nn.Linear(dim_model, num_head * self.dim_head)
self.fc_V = nn.Linear(dim_model, num_head * self.dim_head)
self.attention = Scaled_Dot_Product_Attention()
self.fc = nn.Linear(num_head * self.dim_head, dim_model)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(dim_model)
def forward(self, x):
batch_size = x.size(0)
Q = self.fc_Q(x)
K = self.fc_K(x)
V = self.fc_V(x)
Q = Q.view(batch_size * self.num_head, -1, self.dim_head)
K = K.view(batch_size * self.num_head, -1, self.dim_head)
V = V.view(batch_size * self.num_head, -1, self.dim_head)
scale = K.size(-1) ** -0.5
context = self.attention(Q, K, V, scale)
context = context.view(batch_size, -1, self.dim_head * self.num_head)
out = self.fc(context)
out = self.dropout(out)
out = out + x
out = self.layer_norm(out)
return out
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'dim_model': 4, 'num_head': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = tmp2 - tmp2
tmp4 = tmp3 * tmp1
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp5 / tmp5
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
tl.store(out_ptr1 + x2, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex % 16
x4 = xindex // 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp1 = tl.load(in_ptr1 + x3, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x5, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, primals_1, reinterpret_tensor(
primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, primals_1, reinterpret_tensor(
primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, primals_1, reinterpret_tensor(
primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_6
del primals_7
buf3 = empty_strided_cuda((16, 1, 1), (1, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (16, 1, 1), (1, 1, 1),
0), reinterpret_tensor(buf1, (16, 1, 1), (1, 1, 1), 0), out=buf3)
buf4 = buf3
del buf3
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(16)](buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((16, 1, 1), (1, 1, 1), torch.float32)
extern_kernels.bmm(buf4, reinterpret_tensor(buf2, (16, 1, 1), (1, 1,
1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (4, 4), (4,
1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha
=1, beta=1, out=buf6)
del primals_9
buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_native_layer_norm_1[grid(16)](buf6, primals_1,
buf7, buf8, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_2[grid(64)](buf6, primals_1,
buf7, buf8, primals_10, primals_11, buf9, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf7
del buf8
del primals_11
return buf9, primals_1, primals_10, buf4, reinterpret_tensor(buf5, (4,
4), (4, 1), 0), buf6, primals_8, reinterpret_tensor(buf2, (16, 1, 1
), (1, 1, 1), 0), reinterpret_tensor(buf0, (16, 1, 1), (1, 1, 1), 0
), reinterpret_tensor(buf1, (16, 1, 1), (1, 1, 1), 0)
class Scaled_Dot_Product_Attention(nn.Module):
"""Scaled Dot-Product Attention """
def __init__(self):
super(Scaled_Dot_Product_Attention, self).__init__()
def forward(self, Q, K, V, scale=None):
"""
Args:
Q: [batch_size, len_Q, dim_Q]
K: [batch_size, len_K, dim_K]
V: [batch_size, len_V, dim_V]
scale: 缩放因子 论文为根号dim_K
Return:
self-attention后的张量,以及attention张量
"""
attention = torch.matmul(Q, K.permute(0, 2, 1))
if scale:
attention = attention * scale
attention = F.softmax(attention, dim=-1)
context = torch.matmul(attention, V)
return context
class Multi_Head_AttentionNew(nn.Module):
def __init__(self, dim_model, num_head, dropout=0.0):
super(Multi_Head_AttentionNew, self).__init__()
self.num_head = num_head
assert dim_model % num_head == 0
self.dim_head = dim_model // self.num_head
self.fc_Q = nn.Linear(dim_model, num_head * self.dim_head)
self.fc_K = nn.Linear(dim_model, num_head * self.dim_head)
self.fc_V = nn.Linear(dim_model, num_head * self.dim_head)
self.attention = Scaled_Dot_Product_Attention()
self.fc = nn.Linear(num_head * self.dim_head, dim_model)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(dim_model)
def forward(self, input_0):
primals_1 = self.fc_Q.weight
primals_3 = self.fc_Q.bias
primals_2 = self.fc_K.weight
primals_5 = self.fc_K.bias
primals_4 = self.fc_V.weight
primals_7 = self.fc_V.bias
primals_6 = self.fc.weight
primals_9 = self.fc.bias
primals_10 = self.layer_norm.weight
primals_11 = self.layer_norm.bias
primals_8 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
tianjiansmile/Chinese-Text-Classification-Pytorch
|
Multi_Head_Attention
| false
| 10,887
|
[
"MIT"
] | 0
|
05cc211b161f61e6bb32ab185dadcffec2f5b5de
|
https://github.com/tianjiansmile/Chinese-Text-Classification-Pytorch/tree/05cc211b161f61e6bb32ab185dadcffec2f5b5de
|
Bilinear
|
import torch
import torch.nn as nn
class Bilinear(nn.Module):
def __init__(self, dim_left, dim_right, dim_out):
super().__init__()
self.dim_left = dim_left
self.dim_right = dim_right
self.dim_out = dim_out
self.bilinear = nn.Bilinear(dim_left, dim_right, dim_out)
self.linear_l = nn.Linear(dim_left, dim_out)
self.linear_r = nn.Linear(dim_right, dim_out)
def forward(self, x_l, x_r):
x = self.bilinear(x_l, x_r)
x += self.linear_l(x_l) + self.linear_r(x_r)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim_left': 4, 'dim_right': 4, 'dim_out': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr3 + x2, xmask)
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp8 = tmp6 + tmp7
tmp9 = tmp5 + tmp8
tmp10 = tmp2 + tmp9
tl.store(in_out_ptr0 + x2, tmp10, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = torch.ops.aten._trilinear.default(reinterpret_tensor(
primals_4, (64, 4), (4, 1), 0), primals_1, reinterpret_tensor(
primals_3, (64, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3])
del primals_1
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf2)
del primals_5
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf3)
del primals_7
buf4 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](buf4, primals_2, buf2, primals_6,
buf3, primals_8, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf2
del buf3
del primals_2
del primals_6
del primals_8
return buf4, primals_3, primals_4
class BilinearNew(nn.Module):
def __init__(self, dim_left, dim_right, dim_out):
super().__init__()
self.dim_left = dim_left
self.dim_right = dim_right
self.dim_out = dim_out
self.bilinear = nn.Bilinear(dim_left, dim_right, dim_out)
self.linear_l = nn.Linear(dim_left, dim_out)
self.linear_r = nn.Linear(dim_right, dim_out)
def forward(self, input_0, input_1):
primals_1 = self.bilinear.weight
primals_2 = self.bilinear.bias
primals_5 = self.linear_l.weight
primals_6 = self.linear_l.bias
primals_7 = self.linear_r.weight
primals_8 = self.linear_r.bias
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
|
tpimentelms/dep-parser
|
Bilinear
| false
| 10,888
|
[
"MIT"
] | 0
|
be622cdd9a8b0ba85a28c39129ae2cdbfef03901
|
https://github.com/tpimentelms/dep-parser/tree/be622cdd9a8b0ba85a28c39129ae2cdbfef03901
|
EncoderLayer
|
import torch
from abc import ABC
import torch.nn as nn
from torch import matmul
class ScaledDotProductAttention(nn.Module, ABC):
""" Scaled Dot-Product Attention """
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout, inplace=False)
self.softmax = nn.Softmax(dim=-1)
def forward(self, q, k, v, mask=None):
attn = matmul(q / self.temperature, k.transpose(2, 3))
if mask is not None:
attn = attn.masked_fill(mask == 0, -1000000000.0)
attn = self.dropout(self.softmax(attn))
output = matmul(attn, v)
return output, attn
class MultiHeadAttention(nn.Module, ABC):
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.d_v = d_v
self.d_k = d_k
self.n_head = n_head
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)
self.fc = nn.Linear(n_head * d_v, d_model, bias=False)
self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5)
self.dropout = nn.Dropout(dropout, inplace=True)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-06)
def forward(self, q, k, v, mask=None):
sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1)
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)
if mask is not None:
mask = mask.unsqueeze(1)
q, attn = self.attention(q, k, v, mask)
q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1)
q = self.dropout(self.fc(q))
q += residual
q = self.layer_norm(q)
return q, attn
class PositionwiseFeedForward(nn.Module, ABC):
def __init__(self, d_in, d_hidden, dropout=0.1):
super().__init__()
self.w_1 = nn.Linear(d_in, d_hidden)
self.w_2 = nn.Linear(d_hidden, d_in)
self.layer_norm = nn.LayerNorm(d_in, eps=1e-06)
self.relu = nn.ReLU(inplace=True)
self.dropout = nn.Dropout(dropout, inplace=True)
def forward(self, x):
residual = x
x = self.w_2(self.relu(self.w_1(x)))
x = self.dropout(x)
x += residual
x = self.layer_norm(x)
return x
class EncoderLayer(nn.Module, ABC):
""" Compose with two layers """
def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1):
super(EncoderLayer, self).__init__()
self.slf_attn = MultiHeadAttention(n_head, d_model, d_k, d_v,
dropout=dropout)
self.pos_ffn = PositionwiseFeedForward(d_model, d_inner, dropout=
dropout)
def forward(self, enc_input, slf_attn_mask=None):
enc_output, enc_slf_attn = self.slf_attn(enc_input, enc_input,
enc_input, mask=slf_attn_mask)
enc_output = self.pos_ffn(enc_output)
return enc_output, enc_slf_attn
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'd_inner': 4, 'n_head': 4, 'd_k': 4, 'd_v': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from abc import ABC
import torch.nn as nn
from torch import matmul
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x4, tmp2, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-06
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_7(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_view_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * (x1 % 4 // 4)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_9(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_10(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-06
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_11(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (16, 4), (4, 1))
assert_size_stride(primals_3, (16, 4), (4, 1))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (4, 16), (16, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 16), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf2)
del primals_4
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_div_0[grid(256)](buf0, buf3, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused_clone_1[grid(64, 4)](buf1, buf4, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
buf5 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0)
del buf1
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused__softmax_3[grid(256)](buf6, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf8 = buf6
del buf6
triton_poi_fused_clone_4[grid(256)](buf2, buf8, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf9 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_4[grid(256)](buf9, buf10, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf9
buf11 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf10, (16, 16), (16, 1), 0),
reinterpret_tensor(primals_5, (16, 4), (1, 16), 0), out=buf11)
buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_native_layer_norm_5[grid(16)](buf11, primals_1,
buf12, buf13, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_6[grid(64)](buf11, primals_1,
buf12, buf13, primals_6, primals_7, buf14, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_7
buf15 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf14, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf15)
buf16 = reinterpret_tensor(buf15, (4, 4, 4), (16, 4, 1), 0)
del buf15
buf23 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_7[grid(64)](buf16,
primals_9, buf23, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_9
buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
triton_poi_fused_view_8[grid(64)](buf16, buf17, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf18 = reinterpret_tensor(buf16, (16, 4), (4, 1), 0)
del buf16
extern_kernels.mm(buf17, reinterpret_tensor(primals_10, (4, 4), (1,
4), 0), out=buf18)
buf19 = reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0)
del buf18
triton_poi_fused_add_9[grid(64)](buf19, primals_11, buf14, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_11
buf20 = buf13
del buf13
buf21 = buf12
del buf12
triton_poi_fused_native_layer_norm_10[grid(16)](buf19, buf20, buf21,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf22 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_11[grid(64)](buf19, buf20, buf21,
primals_12, primals_13, buf22, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf20
del buf21
del primals_13
return (buf22, buf7, primals_1, primals_6, primals_12, buf7,
reinterpret_tensor(buf10, (16, 16), (16, 1), 0), buf11,
reinterpret_tensor(buf14, (16, 4), (4, 1), 0), buf17, buf19,
primals_10, buf23, primals_8, primals_5, reinterpret_tensor(buf8, (
16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf3, (16, 4, 4), (16,
1, 4), 0), reinterpret_tensor(buf4, (16, 4, 4), (16, 1, 4), 0))
class ScaledDotProductAttention(nn.Module, ABC):
""" Scaled Dot-Product Attention """
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout, inplace=False)
self.softmax = nn.Softmax(dim=-1)
def forward(self, q, k, v, mask=None):
attn = matmul(q / self.temperature, k.transpose(2, 3))
if mask is not None:
attn = attn.masked_fill(mask == 0, -1000000000.0)
attn = self.dropout(self.softmax(attn))
output = matmul(attn, v)
return output, attn
class MultiHeadAttention(nn.Module, ABC):
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.d_v = d_v
self.d_k = d_k
self.n_head = n_head
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)
self.fc = nn.Linear(n_head * d_v, d_model, bias=False)
self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5)
self.dropout = nn.Dropout(dropout, inplace=True)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-06)
def forward(self, q, k, v, mask=None):
sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1)
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)
if mask is not None:
mask = mask.unsqueeze(1)
q, attn = self.attention(q, k, v, mask)
q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1)
q = self.dropout(self.fc(q))
q += residual
q = self.layer_norm(q)
return q, attn
class PositionwiseFeedForward(nn.Module, ABC):
def __init__(self, d_in, d_hidden, dropout=0.1):
super().__init__()
self.w_1 = nn.Linear(d_in, d_hidden)
self.w_2 = nn.Linear(d_hidden, d_in)
self.layer_norm = nn.LayerNorm(d_in, eps=1e-06)
self.relu = nn.ReLU(inplace=True)
self.dropout = nn.Dropout(dropout, inplace=True)
def forward(self, x):
residual = x
x = self.w_2(self.relu(self.w_1(x)))
x = self.dropout(x)
x += residual
x = self.layer_norm(x)
return x
class EncoderLayerNew(nn.Module, ABC):
""" Compose with two layers """
def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1):
super(EncoderLayerNew, self).__init__()
self.slf_attn = MultiHeadAttention(n_head, d_model, d_k, d_v,
dropout=dropout)
self.pos_ffn = PositionwiseFeedForward(d_model, d_inner, dropout=
dropout)
def forward(self, input_0):
primals_2 = self.slf_attn.w_qs.weight
primals_3 = self.slf_attn.w_ks.weight
primals_4 = self.slf_attn.w_vs.weight
primals_5 = self.slf_attn.fc.weight
primals_6 = self.slf_attn.layer_norm.weight
primals_7 = self.slf_attn.layer_norm.bias
primals_8 = self.pos_ffn.w_1.weight
primals_9 = self.pos_ffn.w_1.bias
primals_10 = self.pos_ffn.w_2.weight
primals_11 = self.pos_ffn.w_2.bias
primals_12 = self.pos_ffn.layer_norm.weight
primals_13 = self.pos_ffn.layer_norm.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0], output[1]
|
superMC5657/transformer
|
EncoderLayer
| false
| 10,889
|
[
"MIT"
] | 0
|
b9d9ca3a5f307f6587330a8235e8d5a2a3650510
|
https://github.com/superMC5657/transformer/tree/b9d9ca3a5f307f6587330a8235e8d5a2a3650510
|
Conv
|
import torch
import torch.nn as nn
class Conv(nn.Module):
def __init__(self, chn_in, chn_out, ker_sz=3):
super().__init__()
self.c = nn.Conv2d(chn_in, chn_out, ker_sz, padding=ker_sz // 2,
padding_mode='circular', bias=False)
self.a = nn.ReLU()
def forward(self, x):
x = self.c(x)
x = self.a(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'chn_in': 4, 'chn_out': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_copy_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x4 = xindex
tmp0 = x0
tmp1 = tl.full([1], 5, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = -4 + x0
tmp4 = tl.full([1], 1, tl.int64)
tmp5 = tmp3 < tmp4
tmp6 = tmp5 & tmp2
tmp7 = tmp0 >= tmp4
tmp8 = tmp0 < tmp1
tmp9 = tmp7 & tmp8
tmp10 = tmp9 & tmp6
tmp11 = x1
tmp12 = tmp11 >= tmp4
tmp13 = tmp11 < tmp1
tmp14 = tmp12 & tmp13
tmp15 = tmp14 & tmp10
tmp16 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp15 & xmask,
other=0.0)
tmp17 = tl.load(in_ptr1 + x4, tmp10 & xmask, other=0.0)
tmp18 = tl.where(tmp14, tmp16, tmp17)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp10, tmp18, tmp19)
tmp21 = float('nan')
tmp22 = tl.where(tmp9, tmp20, tmp21)
tmp23 = tl.full(tmp22.shape, 0.0, tmp22.dtype)
tmp24 = tl.where(tmp6, tmp22, tmp23)
tmp25 = tmp3 >= tmp4
tmp26 = tmp3 < tmp1
tmp27 = tmp25 & tmp26
tmp28 = tmp27 & tmp2
tmp29 = tmp14 & tmp28
tmp30 = tl.load(in_ptr0 + (-9 + x0 + 4 * x1 + 16 * x2), tmp29 & xmask,
other=0.0)
tmp31 = tl.load(in_ptr1 + (-4 + x4), tmp28 & xmask, other=0.0)
tmp32 = tl.where(tmp14, tmp30, tmp31)
tmp33 = tl.full(tmp32.shape, 0.0, tmp32.dtype)
tmp34 = tl.where(tmp28, tmp32, tmp33)
tmp35 = tl.where(tmp27, tmp34, tmp21)
tmp36 = tl.where(tmp5, tmp24, tmp35)
tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype)
tmp38 = tl.where(tmp2, tmp36, tmp37)
tmp39 = tmp0 < tmp4
tmp40 = 4 + x0
tmp41 = tmp40 >= tmp4
tmp42 = tmp40 < tmp1
tmp43 = tmp41 & tmp42
tmp44 = tmp43 & tmp39
tmp45 = tmp14 & tmp44
tmp46 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1 + 16 * x2), tmp45 & xmask,
other=0.0)
tmp47 = tl.load(in_ptr1 + (4 + x4), tmp44 & xmask, other=0.0)
tmp48 = tl.where(tmp14, tmp46, tmp47)
tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype)
tmp50 = tl.where(tmp44, tmp48, tmp49)
tmp51 = tl.where(tmp43, tmp50, tmp21)
tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype)
tmp53 = tl.where(tmp39, tmp51, tmp52)
tmp54 = tmp14 & tmp9
tmp55 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp54 & xmask,
other=0.0)
tmp56 = tl.load(in_ptr1 + x4, tmp9 & xmask, other=0.0)
tmp57 = tl.where(tmp14, tmp55, tmp56)
tmp58 = tl.full(tmp57.shape, 0.0, tmp57.dtype)
tmp59 = tl.where(tmp9, tmp57, tmp58)
tmp60 = tl.where(tmp9, tmp59, tmp21)
tmp61 = tl.where(tmp39, tmp53, tmp60)
tmp62 = tl.where(tmp2, tmp38, tmp61)
tl.store(out_ptr0 + x4, tmp62, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 6 % 6
x0 = xindex % 6
x2 = xindex // 36
x3 = xindex
tmp14 = tl.load(in_ptr0 + x3, xmask)
tmp0 = x1
tmp1 = tl.full([1], 5, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = -4 + x1
tmp4 = tl.full([1], 1, tl.int64)
tmp5 = tmp3 < tmp4
tmp6 = tmp5 & tmp2
tmp7 = tl.load(in_ptr0 + (24 + x0 + 36 * x2), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp8 = tl.load(in_ptr0 + (-24 + x3), tmp2 & xmask, other=0.0)
tmp9 = tl.where(tmp5, tmp7, tmp8)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp2, tmp9, tmp10)
tmp12 = tmp0 < tmp4
tmp13 = tl.load(in_ptr0 + (24 + x0 + 36 * x2), tmp12 & xmask,
eviction_policy='evict_last', other=0.0)
tmp15 = tl.where(tmp12, tmp13, tmp14)
tmp16 = tl.where(tmp2, tmp11, tmp15)
tl.store(out_ptr0 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_copy_0[grid(576)](primals_2, buf0, buf1, 576,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = buf0
del buf0
triton_poi_fused_1[grid(576)](buf1, buf2, 576, XBLOCK=256,
num_warps=4, num_stages=1)
del buf1
buf3 = extern_kernels.convolution(buf2, primals_1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = buf3
del buf3
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(256)](buf4, buf5,
256, XBLOCK=128, num_warps=4, num_stages=1)
return buf4, primals_1, buf2, buf5
class ConvNew(nn.Module):
def __init__(self, chn_in, chn_out, ker_sz=3):
super().__init__()
self.c = nn.Conv2d(chn_in, chn_out, ker_sz, padding=ker_sz // 2,
padding_mode='circular', bias=False)
self.a = nn.ReLU()
def forward(self, input_0):
primals_1 = self.c.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
tuxedcat/A2C
|
Conv
| false
| 10,890
|
[
"Apache-2.0"
] | 0
|
4a6686af05667f8760f2731f184e1845a2d11c6f
|
https://github.com/tuxedcat/A2C/tree/4a6686af05667f8760f2731f184e1845a2d11c6f
|
BiAvg
|
import torch
from torch import nn
class BiAvg(nn.AvgPool1d):
def forward(self, x):
x = x.transpose(1, 2)
x = super().forward(x)
return x.transpose(1, 2)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'kernel_size': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_avg_pool2d_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 1, 4), (4, 4, 1), 0),
class BiAvgNew(nn.AvgPool1d):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
urchade/urchade-byte_search
|
BiAvg
| false
| 10,891
|
[
"MIT"
] | 0
|
5155adb1550dcab873db4e9b124c42da24c99b8e
|
https://github.com/urchade/urchade-byte_search/tree/5155adb1550dcab873db4e9b124c42da24c99b8e
|
DecoderLayer
|
import torch
from abc import ABC
import torch.nn as nn
from torch import matmul
class ScaledDotProductAttention(nn.Module, ABC):
""" Scaled Dot-Product Attention """
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout, inplace=False)
self.softmax = nn.Softmax(dim=-1)
def forward(self, q, k, v, mask=None):
attn = matmul(q / self.temperature, k.transpose(2, 3))
if mask is not None:
attn = attn.masked_fill(mask == 0, -1000000000.0)
attn = self.dropout(self.softmax(attn))
output = matmul(attn, v)
return output, attn
class MultiHeadAttention(nn.Module, ABC):
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.d_v = d_v
self.d_k = d_k
self.n_head = n_head
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)
self.fc = nn.Linear(n_head * d_v, d_model, bias=False)
self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5)
self.dropout = nn.Dropout(dropout, inplace=True)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-06)
def forward(self, q, k, v, mask=None):
sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1)
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)
if mask is not None:
mask = mask.unsqueeze(1)
q, attn = self.attention(q, k, v, mask)
q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1)
q = self.dropout(self.fc(q))
q += residual
q = self.layer_norm(q)
return q, attn
class PositionwiseFeedForward(nn.Module, ABC):
def __init__(self, d_in, d_hidden, dropout=0.1):
super().__init__()
self.w_1 = nn.Linear(d_in, d_hidden)
self.w_2 = nn.Linear(d_hidden, d_in)
self.layer_norm = nn.LayerNorm(d_in, eps=1e-06)
self.relu = nn.ReLU(inplace=True)
self.dropout = nn.Dropout(dropout, inplace=True)
def forward(self, x):
residual = x
x = self.w_2(self.relu(self.w_1(x)))
x = self.dropout(x)
x += residual
x = self.layer_norm(x)
return x
class DecoderLayer(nn.Module, ABC):
""" Compose with three layers """
def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1):
super(DecoderLayer, self).__init__()
self.slf_attn = MultiHeadAttention(n_head, d_model, d_k, d_v,
dropout=dropout)
self.enc_attn = MultiHeadAttention(n_head, d_model, d_k, d_v,
dropout=dropout)
self.pos_ffn = PositionwiseFeedForward(d_model, d_inner, dropout=
dropout)
def forward(self, dec_input, enc_output, slf_attn_mask=None,
dec_enc_attn_mask=None):
dec_output, dec_slf_attn = self.slf_attn(dec_input, dec_input,
dec_input, mask=slf_attn_mask)
dec_output, dec_enc_attn = self.enc_attn(dec_output, enc_output,
enc_output, mask=dec_enc_attn_mask)
dec_output = self.pos_ffn(dec_output)
return dec_output, dec_slf_attn, dec_enc_attn
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'd_inner': 4, 'n_head': 4, 'd_k': 4, 'd_v': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from abc import ABC
import torch.nn as nn
from torch import matmul
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x4, tmp2, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-06
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_add_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_8(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-06
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_10(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_view_11(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * (x1 % 4 // 4)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_12(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (16, 4), (4, 1))
assert_size_stride(primals_3, (16, 4), (4, 1))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (4, 16), (16, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_9, (16, 4), (4, 1))
assert_size_stride(primals_10, (16, 4), (4, 1))
assert_size_stride(primals_11, (16, 4), (4, 1))
assert_size_stride(primals_12, (4, 16), (16, 1))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (4,), (1,))
assert_size_stride(primals_15, (4, 4), (4, 1))
assert_size_stride(primals_16, (4,), (1,))
assert_size_stride(primals_17, (4, 4), (4, 1))
assert_size_stride(primals_18, (4,), (1,))
assert_size_stride(primals_19, (4,), (1,))
assert_size_stride(primals_20, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 16), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf2)
del primals_4
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_div_0[grid(256)](buf0, buf3, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused_clone_1[grid(64, 4)](buf1, buf4, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
buf5 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0)
del buf1
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused__softmax_3[grid(256)](buf6, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf8 = buf6
del buf6
triton_poi_fused_clone_4[grid(256)](buf2, buf8, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf9 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_4[grid(256)](buf9, buf10, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf11 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf10, (16, 16), (16, 1), 0),
reinterpret_tensor(primals_5, (16, 4), (1, 16), 0), out=buf11)
buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_native_layer_norm_5[grid(16)](buf11, primals_1,
buf12, buf13, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_6[grid(64)](buf11, primals_1,
buf12, buf13, primals_6, primals_7, buf14, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_7
buf15 = reinterpret_tensor(buf9, (16, 16), (16, 1), 0)
del buf9
extern_kernels.mm(reinterpret_tensor(buf14, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_9, (4, 16), (1, 4), 0), out=buf15)
buf16 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_8, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_10, (4, 16), (1, 4), 0), out=buf16)
del primals_10
buf17 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_8, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_11, (4, 16), (1, 4), 0), out=buf17)
del primals_11
buf18 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_div_0[grid(256)](buf15, buf18, 256, XBLOCK=
128, num_warps=4, num_stages=1)
buf19 = reinterpret_tensor(buf15, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf15
triton_poi_fused_clone_1[grid(64, 4)](buf16, buf19, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
buf20 = reinterpret_tensor(buf16, (16, 4, 4), (16, 4, 1), 0)
del buf16
extern_kernels.bmm(reinterpret_tensor(buf18, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf19, (16, 4, 4), (16, 4, 1), 0), out=buf20
)
buf21 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(256)](buf20, buf21, 256, XBLOCK=
256, num_warps=4, num_stages=1)
buf22 = reinterpret_tensor(buf20, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf20
triton_poi_fused__softmax_3[grid(256)](buf21, buf22, 256, XBLOCK=
256, num_warps=4, num_stages=1)
buf23 = buf21
del buf21
triton_poi_fused_clone_4[grid(256)](buf17, buf23, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf24 = reinterpret_tensor(buf17, (16, 4, 4), (16, 4, 1), 0)
del buf17
extern_kernels.bmm(reinterpret_tensor(buf22, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf23, (16, 4, 4), (16, 4, 1), 0), out=buf24
)
buf25 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_4[grid(256)](buf24, buf25, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf24
buf26 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf25, (16, 16), (16, 1), 0),
reinterpret_tensor(primals_12, (16, 4), (1, 16), 0), out=buf26)
buf27 = reinterpret_tensor(buf26, (4, 4, 4), (16, 4, 1), 0)
del buf26
triton_poi_fused_add_7[grid(64)](buf27, buf14, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf28 = buf13
del buf13
buf29 = buf12
del buf12
triton_poi_fused_native_layer_norm_8[grid(16)](buf27, buf28, buf29,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf30 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_9[grid(64)](buf27, buf28, buf29,
primals_13, primals_14, buf30, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del primals_14
buf31 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf30, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf31)
buf32 = reinterpret_tensor(buf31, (4, 4, 4), (16, 4, 1), 0)
del buf31
buf39 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_10[grid(64)](buf32,
primals_16, buf39, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_16
buf33 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
triton_poi_fused_view_11[grid(64)](buf32, buf33, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf34 = reinterpret_tensor(buf32, (16, 4), (4, 1), 0)
del buf32
extern_kernels.mm(buf33, reinterpret_tensor(primals_17, (4, 4), (1,
4), 0), out=buf34)
buf35 = reinterpret_tensor(buf34, (4, 4, 4), (16, 4, 1), 0)
del buf34
triton_poi_fused_add_12[grid(64)](buf35, primals_18, buf30, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_18
buf36 = buf29
del buf29
buf37 = buf28
del buf28
triton_poi_fused_native_layer_norm_8[grid(16)](buf35, buf36, buf37,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf38 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_9[grid(64)](buf35, buf36, buf37,
primals_19, primals_20, buf38, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf36
del buf37
del primals_20
return (buf38, buf7, buf22, primals_1, primals_6, primals_13,
primals_19, buf7, reinterpret_tensor(buf10, (16, 16), (16, 1), 0),
buf11, reinterpret_tensor(buf14, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (16, 4), (4, 1), 0), buf22,
reinterpret_tensor(buf25, (16, 16), (16, 1), 0), buf27,
reinterpret_tensor(buf30, (16, 4), (4, 1), 0), buf33, buf35,
primals_17, buf39, primals_15, primals_12, reinterpret_tensor(buf23,
(16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf18, (16, 4, 4), (
16, 1, 4), 0), reinterpret_tensor(buf19, (16, 4, 4), (16, 1, 4), 0),
primals_9, primals_5, reinterpret_tensor(buf8, (16, 4, 4), (16, 1,
4), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0),
reinterpret_tensor(buf4, (16, 4, 4), (16, 1, 4), 0))
class ScaledDotProductAttention(nn.Module, ABC):
""" Scaled Dot-Product Attention """
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout, inplace=False)
self.softmax = nn.Softmax(dim=-1)
def forward(self, q, k, v, mask=None):
attn = matmul(q / self.temperature, k.transpose(2, 3))
if mask is not None:
attn = attn.masked_fill(mask == 0, -1000000000.0)
attn = self.dropout(self.softmax(attn))
output = matmul(attn, v)
return output, attn
class MultiHeadAttention(nn.Module, ABC):
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.d_v = d_v
self.d_k = d_k
self.n_head = n_head
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)
self.fc = nn.Linear(n_head * d_v, d_model, bias=False)
self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5)
self.dropout = nn.Dropout(dropout, inplace=True)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-06)
def forward(self, q, k, v, mask=None):
sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1)
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)
if mask is not None:
mask = mask.unsqueeze(1)
q, attn = self.attention(q, k, v, mask)
q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1)
q = self.dropout(self.fc(q))
q += residual
q = self.layer_norm(q)
return q, attn
class PositionwiseFeedForward(nn.Module, ABC):
def __init__(self, d_in, d_hidden, dropout=0.1):
super().__init__()
self.w_1 = nn.Linear(d_in, d_hidden)
self.w_2 = nn.Linear(d_hidden, d_in)
self.layer_norm = nn.LayerNorm(d_in, eps=1e-06)
self.relu = nn.ReLU(inplace=True)
self.dropout = nn.Dropout(dropout, inplace=True)
def forward(self, x):
residual = x
x = self.w_2(self.relu(self.w_1(x)))
x = self.dropout(x)
x += residual
x = self.layer_norm(x)
return x
class DecoderLayerNew(nn.Module, ABC):
""" Compose with three layers """
def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1):
super(DecoderLayerNew, self).__init__()
self.slf_attn = MultiHeadAttention(n_head, d_model, d_k, d_v,
dropout=dropout)
self.enc_attn = MultiHeadAttention(n_head, d_model, d_k, d_v,
dropout=dropout)
self.pos_ffn = PositionwiseFeedForward(d_model, d_inner, dropout=
dropout)
def forward(self, input_0, input_1):
primals_2 = self.slf_attn.w_qs.weight
primals_3 = self.slf_attn.w_ks.weight
primals_4 = self.slf_attn.w_vs.weight
primals_5 = self.slf_attn.fc.weight
primals_6 = self.slf_attn.layer_norm.weight
primals_7 = self.slf_attn.layer_norm.bias
primals_9 = self.enc_attn.w_qs.weight
primals_10 = self.enc_attn.w_ks.weight
primals_11 = self.enc_attn.w_vs.weight
primals_12 = self.enc_attn.fc.weight
primals_13 = self.enc_attn.layer_norm.weight
primals_14 = self.enc_attn.layer_norm.bias
primals_15 = self.pos_ffn.w_1.weight
primals_16 = self.pos_ffn.w_1.bias
primals_17 = self.pos_ffn.w_2.weight
primals_18 = self.pos_ffn.w_2.bias
primals_19 = self.pos_ffn.layer_norm.weight
primals_20 = self.pos_ffn.layer_norm.bias
primals_1 = input_0
primals_8 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20])
return output[0], output[1], output[2]
|
superMC5657/transformer
|
DecoderLayer
| false
| 10,892
|
[
"MIT"
] | 0
|
b9d9ca3a5f307f6587330a8235e8d5a2a3650510
|
https://github.com/superMC5657/transformer/tree/b9d9ca3a5f307f6587330a8235e8d5a2a3650510
|
Biaffine
|
import torch
import torch.nn as nn
class Biaffine(nn.Module):
def __init__(self, dim_left, dim_right):
super().__init__()
self.dim_left = dim_left
self.dim_right = dim_right
self.matrix = nn.Parameter(torch.Tensor(dim_left, dim_right))
self.bias = nn.Parameter(torch.Tensor(1))
self.linear_l = nn.Linear(dim_left, 1)
self.linear_r = nn.Linear(dim_right, 1)
self.reset_parameters()
def reset_parameters(self):
nn.init.constant_(self.bias, 0.0)
nn.init.xavier_uniform_(self.matrix)
def forward(self, x_l, x_r):
x = torch.matmul(x_l, self.matrix)
x = torch.bmm(x, x_r.transpose(1, 2)) + self.bias
x += self.linear_l(x_l) + self.linear_r(x_r).transpose(1, 2)
return x
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dim_left': 4, 'dim_right': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex // 4
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp8 = tl.load(in_ptr3 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp9 = tl.load(in_ptr4 + 0)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp7 = tmp4 + tmp6
tmp11 = tmp8 + tmp10
tmp12 = tmp7 + tmp11
tmp13 = tmp3 + tmp12
tl.store(in_out_ptr0 + x3, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (1,), (1,))
assert_size_stride(primals_5, (1, 4), (4, 1))
assert_size_stride(primals_6, (1,), (1,))
assert_size_stride(primals_7, (1, 4), (4, 1))
assert_size_stride(primals_8, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1),
0), reinterpret_tensor(primals_3, (4, 4, 4), (16, 1, 4), 0),
out=buf1)
del buf0
buf2 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 1), (1, 4), 0), out=buf2)
del primals_5
buf3 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 1), (1, 4), 0), out=buf3)
del primals_7
buf4 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_add_0[grid(64)](buf4, primals_4, buf2, primals_6,
buf3, primals_8, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf2
del buf3
del primals_4
del primals_6
del primals_8
return buf4, primals_3, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0)
class BiaffineNew(nn.Module):
def __init__(self, dim_left, dim_right):
super().__init__()
self.dim_left = dim_left
self.dim_right = dim_right
self.matrix = nn.Parameter(torch.Tensor(dim_left, dim_right))
self.bias = nn.Parameter(torch.Tensor(1))
self.linear_l = nn.Linear(dim_left, 1)
self.linear_r = nn.Linear(dim_right, 1)
self.reset_parameters()
def reset_parameters(self):
nn.init.constant_(self.bias, 0.0)
nn.init.xavier_uniform_(self.matrix)
def forward(self, input_0, input_1):
primals_1 = self.matrix
primals_4 = self.bias
primals_5 = self.linear_l.weight
primals_6 = self.linear_l.bias
primals_7 = self.linear_r.weight
primals_8 = self.linear_r.bias
primals_2 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
|
tpimentelms/dep-parser
|
Biaffine
| false
| 10,893
|
[
"MIT"
] | 0
|
be622cdd9a8b0ba85a28c39129ae2cdbfef03901
|
https://github.com/tpimentelms/dep-parser/tree/be622cdd9a8b0ba85a28c39129ae2cdbfef03901
|
SelfAttn
|
import torch
import torch.nn.functional as F
from torch import nn
class SelfAttn(nn.Module):
"""
self-attention with learnable parameters
"""
def __init__(self, dhid):
super().__init__()
self.scorer = nn.Linear(dhid, 1)
def forward(self, inp):
scores = F.softmax(self.scorer(inp), dim=1)
cont = scores.transpose(1, 2).bmm(inp).squeeze(1)
return cont
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dhid': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 4), (4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_1
del primals_2
buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(16)](buf1, buf2, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
triton_poi_fused__softmax_1[grid(16)](buf2, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf4 = reinterpret_tensor(buf2, (4, 1, 4), (4, 4, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf3, (4, 1, 4), (4, 0, 1), 0
), primals_3, out=buf4)
del buf3
return reinterpret_tensor(buf4, (4, 4), (4, 1), 0), primals_3, buf1
class SelfAttnNew(nn.Module):
"""
self-attention with learnable parameters
"""
def __init__(self, dhid):
super().__init__()
self.scorer = nn.Linear(dhid, 1)
def forward(self, input_0):
primals_1 = self.scorer.weight
primals_2 = self.scorer.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
uyeongkim/moca
|
SelfAttn
| false
| 10,894
|
[
"MIT"
] | 0
|
8a5870898b6d59258ce1064bab440b7e8107e9b4
|
https://github.com/uyeongkim/moca/tree/8a5870898b6d59258ce1064bab440b7e8107e9b4
|
SeqAttnMatch
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class SeqAttnMatch(nn.Module):
"""
Given sequences X and Y, match sequence Y to each element in X.
* o_i = sum(alpha_j * y_j) for i in X
* alpha_j = softmax(y_j * x_i)
"""
def __init__(self, embed_dim, identity=False):
super(SeqAttnMatch, self).__init__()
if not identity:
self.linear = nn.Linear(embed_dim, embed_dim)
else:
self.linear = None
def forward(self, x, y, y_mask):
if self.linear:
x_proj = self.linear(x.view(-1, x.size(2))).view(x.size())
x_proj = F.relu(x_proj)
y_proj = self.linear(y.view(-1, y.size(2))).view(y.size())
y_proj = F.relu(y_proj)
else:
x_proj = x
y_proj = y
scores = x_proj.bmm(y_proj.transpose(2, 1))
y_mask = y_mask.unsqueeze(1).expand(scores.size())
scores = scores.masked_fill(y_mask == 0, -1e+30)
alpha_flat = F.softmax(scores.view(-1, y.size(1)), -1)
alpha = alpha_flat.view(-1, x.size(1), y.size(1))
matched_seq = alpha.bmm(y)
return matched_seq
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'embed_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_out_ptr1,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_out_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp5 + tmp1
tmp7 = triton_helpers.maximum(tmp3, tmp6)
tmp8 = 0.0
tmp9 = tmp7 <= tmp8
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(in_out_ptr1 + x2, tmp7, xmask)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * (x0 // 4), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * (x0 // 4)), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + 4 * (x0 // 4)), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr0 + (3 + 4 * (x0 // 4)), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tmp4 = -1.0000000150474662e+30
tmp5 = tl.where(tmp2, tmp4, tmp3)
tmp7 = tmp6 == tmp1
tmp9 = tl.where(tmp7, tmp4, tmp8)
tmp10 = triton_helpers.maximum(tmp5, tmp9)
tmp12 = tmp11 == tmp1
tmp14 = tl.where(tmp12, tmp4, tmp13)
tmp15 = triton_helpers.maximum(tmp10, tmp14)
tmp17 = tmp16 == tmp1
tmp19 = tl.where(tmp17, tmp4, tmp18)
tmp20 = triton_helpers.maximum(tmp15, tmp19)
tmp21 = tmp5 - tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp9 - tmp20
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = tmp14 - tmp20
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tmp19 - tmp20
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp28 + tmp30
tl.store(out_ptr0 + x0, tmp20, xmask)
tl.store(out_ptr1 + x0, tmp31, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * (x1 // 4)), xmask)
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp6 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tmp4 = -1.0000000150474662e+30
tmp5 = tl.where(tmp2, tmp4, tmp3)
tmp7 = tmp5 - tmp6
tmp8 = tl_math.exp(tmp7)
tmp10 = tmp8 / tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf2)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0)
del buf0
buf3 = reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0)
del buf2
buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(64)](buf1, buf3,
primals_3, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf1, reinterpret_tensor(buf3, (4, 4, 4), (16, 1,
4), 0), out=buf4)
buf5 = empty_strided_cuda((16, 1), (1, 16), torch.float32)
buf6 = empty_strided_cuda((16, 1), (1, 16), torch.float32)
triton_poi_fused__softmax_1[grid(16)](primals_5, buf4, buf5, buf6,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf7 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(64)](primals_5, buf4, buf5, buf6,
buf7, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf5
del buf6
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf7, (4, 4, 4), (16, 4, 1),
0), primals_4, out=buf8)
del buf7
return buf8, primals_4, primals_5, reinterpret_tensor(primals_1, (16, 4
), (4, 1), 0), buf1, buf4, buf3, buf9
class SeqAttnMatchNew(nn.Module):
"""
Given sequences X and Y, match sequence Y to each element in X.
* o_i = sum(alpha_j * y_j) for i in X
* alpha_j = softmax(y_j * x_i)
"""
def __init__(self, embed_dim, identity=False):
super(SeqAttnMatchNew, self).__init__()
if not identity:
self.linear = nn.Linear(embed_dim, embed_dim)
else:
self.linear = None
def forward(self, input_0, input_1, input_2):
primals_2 = self.linear.weight
primals_3 = self.linear.bias
primals_1 = input_0
primals_4 = input_1
primals_5 = input_2
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
srlee-ai/claf
|
SeqAttnMatch
| false
| 10,895
|
[
"MIT"
] | 0
|
89b3e5c5ec0486886876ea3bac381508c6a6bf58
|
https://github.com/srlee-ai/claf/tree/89b3e5c5ec0486886876ea3bac381508c6a6bf58
|
Critic
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.autograd
class Critic(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(Critic, self).__init__()
self.linear1 = nn.Linear(input_size, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.linear3 = nn.Linear(hidden_size, output_size)
def forward(self, state, action):
"""
Params state and actions are torch tensors
"""
x = torch.cat([state, action], 1)
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
x = self.linear3(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4, 'output_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.autograd
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 8
x0 = xindex % 16
x2 = xindex // 128
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask,
other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((128, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (128, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf1, (4, 8, 4, 4), (128, 16, 4, 1), 0)
del buf1
buf7 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(512)](buf2,
primals_4, buf7, 512, XBLOCK=128, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((128, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (128, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 8, 4, 4), (128, 16, 4, 1), 0)
del buf3
buf6 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(512)](buf4,
primals_6, buf6, 512, XBLOCK=128, num_warps=4, num_stages=1)
del primals_6
buf5 = empty_strided_cuda((128, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_8, reinterpret_tensor(buf4, (128, 4),
(4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf5)
del primals_8
return reinterpret_tensor(buf5, (4, 8, 4, 4), (128, 16, 4, 1), 0
), reinterpret_tensor(buf0, (128, 4), (4, 1), 0), reinterpret_tensor(
buf2, (128, 4), (4, 1), 0), reinterpret_tensor(buf4, (128, 4), (4,
1), 0), primals_7, buf6, primals_5, buf7
class CriticNew(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(CriticNew, self).__init__()
self.linear1 = nn.Linear(input_size, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.linear3 = nn.Linear(hidden_size, output_size)
def forward(self, input_0, input_1):
primals_3 = self.linear1.weight
primals_4 = self.linear1.bias
primals_5 = self.linear2.weight
primals_6 = self.linear2.bias
primals_7 = self.linear3.weight
primals_8 = self.linear3.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
|
vivekagra/Biplane-Quadrotor
|
Critic
| false
| 10,896
|
[
"BSD-3-Clause"
] | 0
|
afe69216494842f5bfe16cbcc0cdcc6ef0de7769
|
https://github.com/vivekagra/Biplane-Quadrotor/tree/afe69216494842f5bfe16cbcc0cdcc6ef0de7769
|
Simple_nn
|
import torch
class Simple_nn(torch.nn.Module):
def __init__(self, dims_in, hidden):
super(Simple_nn, self).__init__()
self.linear1 = torch.nn.Linear(dims_in, hidden)
self.linear2 = torch.nn.Linear(hidden, 2)
self.output = torch.nn.LogSoftmax()
def forward(self, x):
hidden_activation = self.linear1(x).clamp(min=0)
y_pred = self.linear2(hidden_activation).clamp(min=0)
return self.output(y_pred)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dims_in': 4, 'hidden': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clamp_ge_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = tmp2 >= tmp3
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp5, xmask)
@triton.jit
def triton_poi_fused__log_softmax_clamp_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 8
x3 = xindex % 8
x0 = xindex % 2
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3 + 32 * x2), xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (8 + x3 + 32 * x2), xmask)
tmp9 = tl.load(in_ptr0 + (16 + x3 + 32 * x2), xmask)
tmp13 = tl.load(in_ptr0 + (24 + x3 + 32 * x2), xmask)
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = tmp5 + tmp1
tmp7 = triton_helpers.maximum(tmp6, tmp3)
tmp8 = triton_helpers.maximum(tmp4, tmp7)
tmp10 = tmp9 + tmp1
tmp11 = triton_helpers.maximum(tmp10, tmp3)
tmp12 = triton_helpers.maximum(tmp8, tmp11)
tmp14 = tmp13 + tmp1
tmp15 = triton_helpers.maximum(tmp14, tmp3)
tmp16 = triton_helpers.maximum(tmp12, tmp15)
tmp17 = tmp4 - tmp16
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp7 - tmp16
tmp20 = tl_math.exp(tmp19)
tmp21 = tmp18 + tmp20
tmp22 = tmp11 - tmp16
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp21 + tmp23
tmp25 = tmp15 - tmp16
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp24 + tmp26
tmp28 = tl_math.log(tmp27)
tl.store(out_ptr0 + x4, tmp16, xmask)
tl.store(out_ptr1 + x4, tmp28, xmask)
@triton.jit
def triton_poi_fused__log_softmax_clamp_ge_2(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x5 = xindex
x0 = xindex % 2
x3 = xindex // 32
x6 = xindex % 8
tmp0 = tl.load(in_ptr0 + x5, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x6 + 8 * x3), xmask, eviction_policy='evict_last'
)
tmp7 = tl.load(in_ptr3 + (x6 + 8 * x3), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = tmp4 - tmp5
tmp8 = tmp6 - tmp7
tmp9 = tmp2 >= tmp3
tl.store(out_ptr0 + x5, tmp8, xmask)
tl.store(out_ptr1 + x5, tmp9, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (2, 4), (4, 1))
assert_size_stride(primals_5, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_clamp_ge_0[grid(256)](buf0, primals_2, buf1, buf7,
256, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_2
buf2 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 2), (1, 4), 0), out=buf2)
buf3 = empty_strided_cuda((4, 1, 4, 2), (8, 32, 2, 1), torch.float32)
buf4 = empty_strided_cuda((4, 1, 4, 2), (8, 32, 2, 1), torch.float32)
triton_poi_fused__log_softmax_clamp_1[grid(32)](buf2, primals_5,
buf3, buf4, 32, XBLOCK=32, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.bool)
triton_poi_fused__log_softmax_clamp_ge_2[grid(128)](buf2, primals_5,
buf3, buf4, buf5, buf6, 128, XBLOCK=128, num_warps=4, num_stages=1)
del buf2
del buf3
del buf4
del primals_5
return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0
), buf5, buf6, primals_4, buf7
class Simple_nnNew(torch.nn.Module):
def __init__(self, dims_in, hidden):
super(Simple_nnNew, self).__init__()
self.linear1 = torch.nn.Linear(dims_in, hidden)
self.linear2 = torch.nn.Linear(hidden, 2)
self.output = torch.nn.LogSoftmax()
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
urbanriskmap/timeseries-analysis
|
Simple_nn
| false
| 10,897
|
[
"MIT"
] | 0
|
6b9a8d1a916ff784cb0de93d6997cd072d1ca6ae
|
https://github.com/urbanriskmap/timeseries-analysis/tree/6b9a8d1a916ff784cb0de93d6997cd072d1ca6ae
|
model
|
import torch
import torch.nn as nn
class model(nn.Module):
def __init__(self, input_shape=28 * 28, nr_classes=10):
super(model, self).__init__()
self.input_shape = input_shape
self.fc1 = nn.Linear(input_shape, 200)
self.fc2 = nn.Linear(200, nr_classes)
self.relu = nn.ReLU()
def forward(self, x):
x = x.view(-1, self.input_shape)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
log_softmax_out = nn.LogSoftmax()(x)
return log_softmax_out
def forward_inference(self, x):
raise NotImplementedError
def get_name(self):
return 'default_model'
def get_inputs():
return [torch.rand([4, 784])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 200
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_per_fused__log_softmax_1(in_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 784), (784, 1))
assert_size_stride(primals_2, (200, 784), (784, 1))
assert_size_stride(primals_3, (200,), (1,))
assert_size_stride(primals_4, (10, 200), (200, 1))
assert_size_stride(primals_5, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 200), (200, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784,
200), (1, 784), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(800)](buf1, primals_3, 800, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4,
(200, 10), (1, 200), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf5 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
triton_per_fused__log_softmax_1[grid(4)](buf2, buf5, 4, 10, XBLOCK=
1, num_warps=2, num_stages=1)
del buf2
return buf5, primals_1, buf1, buf5, primals_4
class modelNew(nn.Module):
def __init__(self, input_shape=28 * 28, nr_classes=10):
super(modelNew, self).__init__()
self.input_shape = input_shape
self.fc1 = nn.Linear(input_shape, 200)
self.fc2 = nn.Linear(200, nr_classes)
self.relu = nn.ReLU()
def forward_inference(self, x):
raise NotImplementedError
def get_name(self):
return 'default_model'
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
vishal-keshav/pytorch-project-template
|
model
| false
| 10,898
|
[
"MIT"
] | 0
|
526dd5b1036ed9cf592172301a2c85e8425cd154
|
https://github.com/vishal-keshav/pytorch-project-template/tree/526dd5b1036ed9cf592172301a2c85e8425cd154
|
ThreeNet
|
import torch
import torch.nn as nn
class ThreeNet(nn.Module):
"""
A network with three layers. This is used for testing a network with more
than one operation. The network has a convolution layer followed by two
fully connected layers.
"""
def __init__(self, input_dim: 'int', conv_dim: 'int', linear_dim: 'int'
) ->None:
super(ThreeNet, self).__init__()
self.conv = nn.Conv2d(input_dim, conv_dim, 1, 1)
out_dim = 1
self.pool = nn.AdaptiveAvgPool2d((out_dim, out_dim))
self.linear1 = nn.Linear(conv_dim, linear_dim)
self.linear2 = nn.Linear(linear_dim, 1)
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
x = self.conv(x)
x = self.pool(x)
x = torch.flatten(x, 1)
x = self.linear1(x)
x = self.linear2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'conv_dim': 4, 'linear_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_convolution_mean_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r2 + 16 * x3), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = 16.0
tmp8 = tmp6 / tmp7
tl.debug_barrier()
tl.store(in_out_ptr0 + x3, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (1, 4), (4, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_convolution_mean_0[grid(16)](buf2, buf0, primals_2,
16, 16, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del primals_2
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (4, 4), (4,
1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha
=1, beta=1, out=buf3)
del primals_5
buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6,
(4, 1), (1, 4), 0), alpha=1, beta=1, out=buf5)
del primals_7
return buf5, primals_1, primals_3, reinterpret_tensor(buf2, (4, 4), (4,
1), 0), buf3, primals_6, primals_4
class ThreeNetNew(nn.Module):
"""
A network with three layers. This is used for testing a network with more
than one operation. The network has a convolution layer followed by two
fully connected layers.
"""
def __init__(self, input_dim: 'int', conv_dim: 'int', linear_dim: 'int'
) ->None:
super(ThreeNetNew, self).__init__()
self.conv = nn.Conv2d(input_dim, conv_dim, 1, 1)
out_dim = 1
self.pool = nn.AdaptiveAvgPool2d((out_dim, out_dim))
self.linear1 = nn.Linear(conv_dim, linear_dim)
self.linear2 = nn.Linear(linear_dim, 1)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_4 = self.linear1.weight
primals_5 = self.linear1.bias
primals_6 = self.linear2.weight
primals_7 = self.linear2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
synthara/M-SFV-SyntharaFVcore
|
ThreeNet
| false
| 10,899
|
[
"Apache-2.0"
] | 0
|
b4d2167a110aaecf3df442f58793ca2cb7b028ba
|
https://github.com/synthara/M-SFV-SyntharaFVcore/tree/b4d2167a110aaecf3df442f58793ca2cb7b028ba
|
SmallConvNet
|
import torch
from typing import Tuple
import torch.nn as nn
from numpy import prod
class SmallConvNet(nn.Module):
"""
A network with three conv layers. This is used for testing convolution
layers for activation count.
"""
def __init__(self, input_dim: 'int') ->None:
super(SmallConvNet, self).__init__()
conv_dim1 = 8
conv_dim2 = 4
conv_dim3 = 2
self.conv1 = nn.Conv2d(input_dim, conv_dim1, 1, 1)
self.conv2 = nn.Conv2d(conv_dim1, conv_dim2, 1, 2)
self.conv3 = nn.Conv2d(conv_dim2, conv_dim3, 1, 2)
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
def get_gt_activation(self, x: 'torch.Tensor') ->Tuple[int, int, int]:
x = self.conv1(x)
count1 = prod(list(x.size()))
x = self.conv2(x)
count2 = prod(list(x.size()))
x = self.conv3(x)
count3 = prod(list(x.size()))
return count1, count2, count3
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from typing import Tuple
import torch.nn as nn
from numpy import prod
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 8
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (8, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 8, 1, 1), (8, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (2, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 8, 4, 4), (128, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(512)](buf1, primals_2, 512,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 2, 2), (16, 4, 2, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_1[grid(64)](buf3, primals_5, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 2, 1, 1), (2, 1, 1, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_2[grid(8)](buf5, primals_7, 8, XBLOCK=
8, num_warps=1, num_stages=1)
del primals_7
return buf5, primals_1, primals_3, primals_4, primals_6, buf1, buf3
class SmallConvNetNew(nn.Module):
"""
A network with three conv layers. This is used for testing convolution
layers for activation count.
"""
def __init__(self, input_dim: 'int') ->None:
super(SmallConvNetNew, self).__init__()
conv_dim1 = 8
conv_dim2 = 4
conv_dim3 = 2
self.conv1 = nn.Conv2d(input_dim, conv_dim1, 1, 1)
self.conv2 = nn.Conv2d(conv_dim1, conv_dim2, 1, 2)
self.conv3 = nn.Conv2d(conv_dim2, conv_dim3, 1, 2)
def get_gt_activation(self, x: 'torch.Tensor') ->Tuple[int, int, int]:
x = self.conv1(x)
count1 = prod(list(x.size()))
x = self.conv2(x)
count2 = prod(list(x.size()))
x = self.conv3(x)
count3 = prod(list(x.size()))
return count1, count2, count3
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
synthara/M-SFV-SyntharaFVcore
|
SmallConvNet
| false
| 10,900
|
[
"Apache-2.0"
] | 0
|
b4d2167a110aaecf3df442f58793ca2cb7b028ba
|
https://github.com/synthara/M-SFV-SyntharaFVcore/tree/b4d2167a110aaecf3df442f58793ca2cb7b028ba
|
Discrete
|
import torch
import torch.nn as nn
class Discrete(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return nn.functional.softmax(x, dim=0)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf0, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf0
return buf1,
class DiscreteNew(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
wandb/cli
|
Discrete
| false
| 10,901
|
[
"MIT"
] | 0
|
4a21c2c0c9944734f4c30a8e1453aaf45609e415
|
https://github.com/wandb/cli/tree/4a21c2c0c9944734f4c30a8e1453aaf45609e415
|
NestedNetInnerModule
|
import torch
import torch.nn as nn
from typing import Counter
from collections import Counter
class NestedNetInnerModule(nn.Module):
"""
A submodule for the nested net test module below.
"""
def __init__(self, lin_op: 'str'='addmm') ->None:
super().__init__()
conv_input_size = 2, 5
conv_in = 2
conv_out = 2
kernel_size = 1
padding = 0
fc_in = 10
fc_out = 10
self.conv = nn.Conv1d(in_channels=conv_in, out_channels=conv_out,
kernel_size=kernel_size, padding=padding)
self.fc = nn.Linear(in_features=fc_in, out_features=fc_out)
fc_flops = fc_in * fc_out
fc_flops = Counter({lin_op: fc_flops})
spatial_pos = conv_input_size[1] + 2 * padding - 2 * (kernel_size // 2)
conv_flops = spatial_pos * kernel_size * conv_in * conv_out
conv_flops = Counter({'conv': conv_flops})
model_flops = conv_flops + fc_flops
self.flops = {'': model_flops, 'fc': fc_flops, 'conv': conv_flops}
self.name_to_module = {'': self, 'fc': self.fc, 'conv': self.conv}
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
x = x.reshape(-1, 2, 5)
x = self.conv(x)
x = torch.flatten(x, 1)
x = 3 * self.fc(x) + 1
return x
def get_inputs():
return [torch.rand([4, 2, 5])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from typing import Counter
from collections import Counter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 40
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 5 % 2
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_mul_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 40
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 10
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 3.0
tmp4 = tmp2 * tmp3
tmp5 = 1.0
tmp6 = tmp4 + tmp5
tl.store(in_out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 2, 5), (10, 5, 1))
assert_size_stride(primals_2, (2, 2, 1), (2, 1, 1))
assert_size_stride(primals_3, (2,), (1,))
assert_size_stride(primals_4, (10, 10), (10, 1))
assert_size_stride(primals_5, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 2, 5), (10, 5, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(40)](buf1, primals_3, 40,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (4, 10), (10, 1), 0),
reinterpret_tensor(primals_4, (10, 10), (1, 10), 0), out=buf2)
buf3 = buf2
del buf2
triton_poi_fused_add_mul_1[grid(40)](buf3, primals_5, 40, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_5
return buf3, primals_2, primals_1, reinterpret_tensor(buf1, (4, 10), (
10, 1), 0), primals_4
class NestedNetInnerModuleNew(nn.Module):
"""
A submodule for the nested net test module below.
"""
def __init__(self, lin_op: 'str'='addmm') ->None:
super().__init__()
conv_input_size = 2, 5
conv_in = 2
conv_out = 2
kernel_size = 1
padding = 0
fc_in = 10
fc_out = 10
self.conv = nn.Conv1d(in_channels=conv_in, out_channels=conv_out,
kernel_size=kernel_size, padding=padding)
self.fc = nn.Linear(in_features=fc_in, out_features=fc_out)
fc_flops = fc_in * fc_out
fc_flops = Counter({lin_op: fc_flops})
spatial_pos = conv_input_size[1] + 2 * padding - 2 * (kernel_size // 2)
conv_flops = spatial_pos * kernel_size * conv_in * conv_out
conv_flops = Counter({'conv': conv_flops})
model_flops = conv_flops + fc_flops
self.flops = {'': model_flops, 'fc': fc_flops, 'conv': conv_flops}
self.name_to_module = {'': self, 'fc': self.fc, 'conv': self.conv}
def forward(self, input_0):
primals_2 = self.conv.weight
primals_3 = self.conv.bias
primals_4 = self.fc.weight
primals_5 = self.fc.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
synthara/M-SFV-SyntharaFVcore
|
NestedNetInnerModule
| false
| 10,902
|
[
"Apache-2.0"
] | 0
|
b4d2167a110aaecf3df442f58793ca2cb7b028ba
|
https://github.com/synthara/M-SFV-SyntharaFVcore/tree/b4d2167a110aaecf3df442f58793ca2cb7b028ba
|
Complex_nn
|
import torch
import torch.nn.functional as F
class Complex_nn(torch.nn.Module):
def __init__(self, dims_in, hidden):
super(Complex_nn, self).__init__()
self.fc1 = torch.nn.Linear(dims_in, hidden)
self.fc2 = torch.nn.Linear(hidden, hidden)
self.fc3 = torch.nn.Linear(hidden, 2)
self.fc4 = torch.nn.LogSoftmax()
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = self.fc4(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dims_in': 4, 'hidden': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused__log_softmax_relu_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 8
x3 = xindex % 8
x0 = xindex % 2
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3 + 32 * x2), xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (8 + x3 + 32 * x2), xmask)
tmp9 = tl.load(in_ptr0 + (16 + x3 + 32 * x2), xmask)
tmp13 = tl.load(in_ptr0 + (24 + x3 + 32 * x2), xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp5 + tmp1
tmp7 = triton_helpers.maximum(tmp3, tmp6)
tmp8 = triton_helpers.maximum(tmp4, tmp7)
tmp10 = tmp9 + tmp1
tmp11 = triton_helpers.maximum(tmp3, tmp10)
tmp12 = triton_helpers.maximum(tmp8, tmp11)
tmp14 = tmp13 + tmp1
tmp15 = triton_helpers.maximum(tmp3, tmp14)
tmp16 = triton_helpers.maximum(tmp12, tmp15)
tmp17 = tmp4 - tmp16
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp7 - tmp16
tmp20 = tl_math.exp(tmp19)
tmp21 = tmp18 + tmp20
tmp22 = tmp11 - tmp16
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp21 + tmp23
tmp25 = tmp15 - tmp16
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp24 + tmp26
tmp28 = tl_math.log(tmp27)
tl.store(out_ptr0 + x4, tmp16, xmask)
tl.store(out_ptr1 + x4, tmp28, xmask)
@triton.jit
def triton_poi_fused__log_softmax_relu_threshold_backward_2(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x5 = xindex
x0 = xindex % 2
x3 = xindex // 32
x6 = xindex % 8
tmp0 = tl.load(in_ptr0 + x5, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x6 + 8 * x3), xmask, eviction_policy='evict_last'
)
tmp7 = tl.load(in_ptr3 + (x6 + 8 * x3), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp4 - tmp5
tmp8 = tmp6 - tmp7
tmp9 = 0.0
tmp10 = tmp4 <= tmp9
tl.store(out_ptr0 + x5, tmp8, xmask)
tl.store(out_ptr1 + x5, tmp10, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (2, 4), (4, 1))
assert_size_stride(primals_7, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf10, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3,
primals_5, buf9, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 2), (1, 4), 0), out=buf4)
buf5 = empty_strided_cuda((4, 1, 4, 2), (8, 32, 2, 1), torch.float32)
buf6 = empty_strided_cuda((4, 1, 4, 2), (8, 32, 2, 1), torch.float32)
triton_poi_fused__log_softmax_relu_1[grid(32)](buf4, primals_7,
buf5, buf6, 32, XBLOCK=32, num_warps=1, num_stages=1)
buf7 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
buf8 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.bool)
triton_poi_fused__log_softmax_relu_threshold_backward_2[grid(128)](buf4
, primals_7, buf5, buf6, buf7, buf8, 128, XBLOCK=128, num_warps
=4, num_stages=1)
del buf4
del buf5
del buf6
del primals_7
return buf7, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(
buf3, (64, 4), (4, 1), 0
), buf7, buf8, primals_6, buf9, primals_4, buf10
class Complex_nnNew(torch.nn.Module):
def __init__(self, dims_in, hidden):
super(Complex_nnNew, self).__init__()
self.fc1 = torch.nn.Linear(dims_in, hidden)
self.fc2 = torch.nn.Linear(hidden, hidden)
self.fc3 = torch.nn.Linear(hidden, 2)
self.fc4 = torch.nn.LogSoftmax()
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
urbanriskmap/timeseries-analysis
|
Complex_nn
| false
| 10,903
|
[
"MIT"
] | 0
|
6b9a8d1a916ff784cb0de93d6997cd072d1ca6ae
|
https://github.com/urbanriskmap/timeseries-analysis/tree/6b9a8d1a916ff784cb0de93d6997cd072d1ca6ae
|
DilatedResidualLayer
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class DilatedResidualLayer(nn.Module):
def __init__(self, dilation, in_channels, out_channels):
super(DilatedResidualLayer, self).__init__()
self.conv_dilated = nn.Conv1d(in_channels, out_channels, 3, padding
=dilation, dilation=dilation)
self.conv_1x1 = nn.Conv1d(out_channels, out_channels, 1)
self.dropout = nn.Dropout()
def forward(self, x, mask):
out = F.relu(self.conv_dilated(x))
out = self.conv_1x1(out)
out = self.dropout(out)
return (x + out) * mask[:, 0:1, :]
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dilation': 1, 'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_convolution_mul_1(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_out_ptr0 + x3, xmask)
tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp6 = tmp4 * tmp5
tl.store(in_out_ptr0 + x3, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3), (12, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,),
padding=(1,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4), (16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(64)](buf1, primals_2, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4), (16, 4, 1))
buf3 = buf2
del buf2
triton_poi_fused_add_convolution_mul_1[grid(64)](buf3, primals_3,
primals_5, primals_6, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_5
return buf3, primals_1, primals_3, primals_4, buf1, reinterpret_tensor(
primals_6, (4, 1, 4), (16, 4, 1), 0)
class DilatedResidualLayerNew(nn.Module):
def __init__(self, dilation, in_channels, out_channels):
super(DilatedResidualLayerNew, self).__init__()
self.conv_dilated = nn.Conv1d(in_channels, out_channels, 3, padding
=dilation, dilation=dilation)
self.conv_1x1 = nn.Conv1d(out_channels, out_channels, 1)
self.dropout = nn.Dropout()
def forward(self, input_0, input_1):
primals_1 = self.conv_dilated.weight
primals_2 = self.conv_dilated.bias
primals_4 = self.conv_1x1.weight
primals_5 = self.conv_1x1.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
tonnidas/sign-segmentation
|
DilatedResidualLayer
| false
| 10,904
|
[
"MIT"
] | 0
|
5332ccd1dbef311daa594ed6faa45cbd618a76a0
|
https://github.com/tonnidas/sign-segmentation/tree/5332ccd1dbef311daa594ed6faa45cbd618a76a0
|
Upconv
|
import math
import torch
import torch.nn.functional as F
from torch.nn import Conv2d
from torch.nn import Upsample
class PadSameConv2d(torch.nn.Module):
def __init__(self, kernel_size, stride=1):
"""
Imitates padding_mode="same" from tensorflow.
:param kernel_size: Kernelsize of the convolution, int or tuple/list
:param stride: Stride of the convolution, int or tuple/list
"""
super().__init__()
if isinstance(kernel_size, (tuple, list)):
self.kernel_size_y = kernel_size[0]
self.kernel_size_x = kernel_size[1]
else:
self.kernel_size_y = kernel_size
self.kernel_size_x = kernel_size
if isinstance(stride, (tuple, list)):
self.stride_y = stride[0]
self.stride_x = stride[1]
else:
self.stride_y = stride
self.stride_x = stride
def forward(self, x: 'torch.Tensor'):
_, _, height, width = x.shape
padding_y = (self.stride_y * (math.ceil(height / self.stride_y) - 1
) + self.kernel_size_y - height) / 2
padding_x = (self.stride_x * (math.ceil(width / self.stride_x) - 1) +
self.kernel_size_x - width) / 2
padding = [math.floor(padding_x), math.ceil(padding_x), math.floor(
padding_y), math.ceil(padding_y)]
return F.pad(input=x, pad=padding)
class Upconv(torch.nn.Module):
def __init__(self, in_channels, out_channels):
"""
Performs two convolutions and a leaky relu. The first operation only convolves in y direction, the second one
only in x direction.
:param in_channels: Number of input channels
:param out_channels: Number of output channels
:param kernel_size: Kernel size for the convolutions, first in y direction, then in x direction
:param stride: Stride for the convolutions, first in y direction, then in x direction
"""
super().__init__()
self.upsample = Upsample(scale_factor=2)
self.pad = PadSameConv2d(kernel_size=2)
self.conv = Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=2, stride=1)
def forward(self, x: 'torch.Tensor'):
t = self.upsample(x)
t = self.pad(t)
return self.conv(t)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import torch.nn.functional as F
from torch.nn import Conv2d
from torch.nn import Upsample
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__unsafe_index_constant_pad_nd_0(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1296
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 9 % 9
x0 = xindex % 9
x2 = xindex // 81
x4 = xindex
tmp0 = x1
tmp1 = tl.full([1], 8, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = x0
tmp4 = tmp3 < tmp1
tmp5 = tmp2 & tmp4
tmp6 = tmp0.to(tl.float32)
tmp7 = 0.5
tmp8 = tmp6 * tmp7
tmp9 = tmp8.to(tl.int32)
tmp10 = tmp3.to(tl.float32)
tmp11 = tmp10 * tmp7
tmp12 = tmp11.to(tl.int32)
tmp13 = tl.load(in_ptr0 + (tmp12 + 4 * tmp9 + 16 * x2), tmp5 & xmask,
eviction_policy='evict_last', other=0.0)
tl.store(out_ptr0 + x4, tmp13, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 64 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 2, 2), (16, 4, 2, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 9, 9), (324, 81, 9, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__unsafe_index_constant_pad_nd_0[grid(1296)](primals_1,
buf0, 1296, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 8, 8), (256, 64, 8, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(1024)](buf2, primals_3, 1024,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
return buf2, primals_2, buf0
class PadSameConv2d(torch.nn.Module):
def __init__(self, kernel_size, stride=1):
"""
Imitates padding_mode="same" from tensorflow.
:param kernel_size: Kernelsize of the convolution, int or tuple/list
:param stride: Stride of the convolution, int or tuple/list
"""
super().__init__()
if isinstance(kernel_size, (tuple, list)):
self.kernel_size_y = kernel_size[0]
self.kernel_size_x = kernel_size[1]
else:
self.kernel_size_y = kernel_size
self.kernel_size_x = kernel_size
if isinstance(stride, (tuple, list)):
self.stride_y = stride[0]
self.stride_x = stride[1]
else:
self.stride_y = stride
self.stride_x = stride
def forward(self, x: 'torch.Tensor'):
_, _, height, width = x.shape
padding_y = (self.stride_y * (math.ceil(height / self.stride_y) - 1
) + self.kernel_size_y - height) / 2
padding_x = (self.stride_x * (math.ceil(width / self.stride_x) - 1) +
self.kernel_size_x - width) / 2
padding = [math.floor(padding_x), math.ceil(padding_x), math.floor(
padding_y), math.ceil(padding_y)]
return F.pad(input=x, pad=padding)
class UpconvNew(torch.nn.Module):
def __init__(self, in_channels, out_channels):
"""
Performs two convolutions and a leaky relu. The first operation only convolves in y direction, the second one
only in x direction.
:param in_channels: Number of input channels
:param out_channels: Number of output channels
:param kernel_size: Kernel size for the convolutions, first in y direction, then in x direction
:param stride: Stride for the convolutions, first in y direction, then in x direction
"""
super().__init__()
self.upsample = Upsample(scale_factor=2)
self.pad = PadSameConv2d(kernel_size=2)
self.conv = Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=2, stride=1)
def forward(self, input_0):
primals_2 = self.conv.weight
primals_3 = self.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
shlomi-amitai/monorec
|
Upconv
| false
| 10,905
|
[
"MIT"
] | 0
|
74571c6cd8d06ae4fb15cbee5a41147c54c78556
|
https://github.com/shlomi-amitai/monorec/tree/74571c6cd8d06ae4fb15cbee5a41147c54c78556
|
ConvReLU
|
import math
import torch
import torch.nn.functional as F
from torch.nn import Conv2d
from torch.nn import LeakyReLU
class PadSameConv2d(torch.nn.Module):
def __init__(self, kernel_size, stride=1):
"""
Imitates padding_mode="same" from tensorflow.
:param kernel_size: Kernelsize of the convolution, int or tuple/list
:param stride: Stride of the convolution, int or tuple/list
"""
super().__init__()
if isinstance(kernel_size, (tuple, list)):
self.kernel_size_y = kernel_size[0]
self.kernel_size_x = kernel_size[1]
else:
self.kernel_size_y = kernel_size
self.kernel_size_x = kernel_size
if isinstance(stride, (tuple, list)):
self.stride_y = stride[0]
self.stride_x = stride[1]
else:
self.stride_y = stride
self.stride_x = stride
def forward(self, x: 'torch.Tensor'):
_, _, height, width = x.shape
padding_y = (self.stride_y * (math.ceil(height / self.stride_y) - 1
) + self.kernel_size_y - height) / 2
padding_x = (self.stride_x * (math.ceil(width / self.stride_x) - 1) +
self.kernel_size_x - width) / 2
padding = [math.floor(padding_x), math.ceil(padding_x), math.floor(
padding_y), math.ceil(padding_y)]
return F.pad(input=x, pad=padding)
class ConvReLU(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
leaky_relu_neg_slope=0.1):
"""
Performs two convolutions and a leaky relu. The first operation only convolves in y direction, the second one
only in x direction.
:param in_channels: Number of input channels
:param out_channels: Number of output channels
:param kernel_size: Kernel size for the convolutions, first in y direction, then in x direction
:param stride: Stride for the convolutions, first in y direction, then in x direction
"""
super().__init__()
self.pad = PadSameConv2d(kernel_size=kernel_size, stride=stride)
self.conv = Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=kernel_size, stride=stride)
self.leaky_relu = LeakyReLU(negative_slope=leaky_relu_neg_slope)
def forward(self, x: 'torch.Tensor'):
t = self.pad(x)
t = self.conv(t)
return self.leaky_relu(t)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import torch.nn.functional as F
from torch.nn import Conv2d
from torch.nn import LeakyReLU
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 7 % 7
x0 = xindex % 7
x2 = xindex // 49
x4 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask,
other=0.0)
tl.store(out_ptr0 + x4, tmp11, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(784)](primals_1, buf0, 784,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_convolution_leaky_relu_1[grid(256)](buf1,
primals_3, buf2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf1
del primals_3
return buf3, primals_2, buf0, buf2
class PadSameConv2d(torch.nn.Module):
def __init__(self, kernel_size, stride=1):
"""
Imitates padding_mode="same" from tensorflow.
:param kernel_size: Kernelsize of the convolution, int or tuple/list
:param stride: Stride of the convolution, int or tuple/list
"""
super().__init__()
if isinstance(kernel_size, (tuple, list)):
self.kernel_size_y = kernel_size[0]
self.kernel_size_x = kernel_size[1]
else:
self.kernel_size_y = kernel_size
self.kernel_size_x = kernel_size
if isinstance(stride, (tuple, list)):
self.stride_y = stride[0]
self.stride_x = stride[1]
else:
self.stride_y = stride
self.stride_x = stride
def forward(self, x: 'torch.Tensor'):
_, _, height, width = x.shape
padding_y = (self.stride_y * (math.ceil(height / self.stride_y) - 1
) + self.kernel_size_y - height) / 2
padding_x = (self.stride_x * (math.ceil(width / self.stride_x) - 1) +
self.kernel_size_x - width) / 2
padding = [math.floor(padding_x), math.ceil(padding_x), math.floor(
padding_y), math.ceil(padding_y)]
return F.pad(input=x, pad=padding)
class ConvReLUNew(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
leaky_relu_neg_slope=0.1):
"""
Performs two convolutions and a leaky relu. The first operation only convolves in y direction, the second one
only in x direction.
:param in_channels: Number of input channels
:param out_channels: Number of output channels
:param kernel_size: Kernel size for the convolutions, first in y direction, then in x direction
:param stride: Stride for the convolutions, first in y direction, then in x direction
"""
super().__init__()
self.pad = PadSameConv2d(kernel_size=kernel_size, stride=stride)
self.conv = Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=kernel_size, stride=stride)
self.leaky_relu = LeakyReLU(negative_slope=leaky_relu_neg_slope)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_3 = self.conv.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
shlomi-amitai/monorec
|
ConvReLU
| false
| 10,906
|
[
"MIT"
] | 0
|
74571c6cd8d06ae4fb15cbee5a41147c54c78556
|
https://github.com/shlomi-amitai/monorec/tree/74571c6cd8d06ae4fb15cbee5a41147c54c78556
|
VAE
|
import torch
import torch.utils.data
import torch.nn as nn
import torch.nn.functional as F
class Decoder(nn.Module):
""" VAE decoder """
def __init__(self, img_channels, latent_size):
super(Decoder, self).__init__()
self.latent_size = latent_size
self.img_channels = img_channels
self.fc1 = nn.Linear(latent_size, 1024)
self.deconv1 = nn.ConvTranspose2d(1024, 128, 5, stride=2)
self.deconv2 = nn.ConvTranspose2d(128, 64, 5, stride=2)
self.deconv3 = nn.ConvTranspose2d(64, 32, 6, stride=2)
self.deconv4 = nn.ConvTranspose2d(32, img_channels, 6, stride=2)
def forward(self, x):
x = F.relu(self.fc1(x))
x = x.unsqueeze(-1).unsqueeze(-1)
x = F.relu(self.deconv1(x))
x = F.relu(self.deconv2(x))
x = F.relu(self.deconv3(x))
reconstruction = torch.sigmoid(self.deconv4(x))
return reconstruction
class Encoder(nn.Module):
""" VAE encoder """
def __init__(self, img_channels, latent_size):
super(Encoder, self).__init__()
self.latent_size = latent_size
self.img_channels = img_channels
self.conv1 = nn.Conv2d(img_channels, 32, 4, stride=2)
self.conv2 = nn.Conv2d(32, 64, 4, stride=2)
self.conv3 = nn.Conv2d(64, 128, 4, stride=2)
self.conv4 = nn.Conv2d(128, 256, 4, stride=2)
self.fc_mu = nn.Linear(2 * 2 * 256, latent_size)
self.fc_logsigma = nn.Linear(2 * 2 * 256, latent_size)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = x.view(x.size(0), -1)
mu = self.fc_mu(x)
logsigma = self.fc_logsigma(x)
return mu, logsigma
class VAE(nn.Module):
""" Variational Autoencoder """
def __init__(self, img_channels, latent_size):
super(VAE, self).__init__()
self.encoder = Encoder(img_channels, latent_size)
self.decoder = Decoder(img_channels, latent_size)
def forward(self, x):
mu, logsigma = self.encoder(x)
sigma = logsigma.exp()
eps = torch.randn_like(sigma)
z = eps.mul(sigma).add_(mu)
recon_x = self.decoder(z)
return recon_x, mu, logsigma
def get_inputs():
return [torch.rand([4, 4, 64, 64])]
def get_init_inputs():
return [[], {'img_channels': 4, 'latent_size': 4}]
|
import torch
from torch import device
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.utils.data
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 128
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 4 * x2 + 16384 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 32 * x2 + 512 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 64 * x2 + 1024 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 128 * x2 + 2048 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 128 * x2 + 3200 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 64 * x2 + 1600 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 36 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 32 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 128
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 36 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 4 * x2 + 144 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 123008
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_10(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 50176
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_12(in_ptr0,
in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr,
XBLOCK: tl.constexpr):
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 256
y1 = yindex // 256
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 256 * x2 + 1024 * y1), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask)
tl.store(out_ptr1 + (y0 + 256 * x2 + 1024 * y1), tmp6, xmask)
@triton.jit
def triton_poi_fused_add_exp_mul_13(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask)
tmp2 = tl_math.exp(tmp1)
tmp3 = tmp0 * tmp2
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_14(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1024
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_15(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 12800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_16(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 43264
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_17(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 115200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_sigmoid_18(in_ptr0, in_ptr1, out_ptr0,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16384 * y1), ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(out_ptr0 + (x2 + 4096 * y3), tmp3, ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22, primals_23
) = args
args.clear()
assert_size_stride(primals_1, (32, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1))
assert_size_stride(primals_4, (64, 32, 4, 4), (512, 16, 4, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (128, 64, 4, 4), (1024, 16, 4, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (256, 128, 4, 4), (2048, 16, 4, 1))
assert_size_stride(primals_9, (256,), (1,))
assert_size_stride(primals_10, (4, 1024), (1024, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4, 1024), (1024, 1))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (1024, 4), (4, 1))
assert_size_stride(primals_15, (1024,), (1,))
assert_size_stride(primals_16, (1024, 128, 5, 5), (3200, 25, 5, 1))
assert_size_stride(primals_17, (128,), (1,))
assert_size_stride(primals_18, (128, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_19, (64,), (1,))
assert_size_stride(primals_20, (64, 32, 6, 6), (1152, 36, 6, 1))
assert_size_stride(primals_21, (32,), (1,))
assert_size_stride(primals_22, (32, 4, 6, 6), (144, 36, 6, 1))
assert_size_stride(primals_23, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((32, 4, 4, 4), (64, 1, 16, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(128, 16)](primals_1, buf0, 128, 16, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4, 64, 64), (16384, 1, 256, 4), torch
.float32)
triton_poi_fused_1[grid(16, 4096)](primals_3, buf1, 16, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 32, 4, 4), (512, 1, 128, 32), torch.
float32)
triton_poi_fused_2[grid(2048, 16)](primals_4, buf2, 2048, 16,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((128, 64, 4, 4), (1024, 1, 256, 64),
torch.float32)
triton_poi_fused_3[grid(8192, 16)](primals_6, buf3, 8192, 16,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf4 = empty_strided_cuda((256, 128, 4, 4), (2048, 1, 512, 128),
torch.float32)
triton_poi_fused_4[grid(32768, 16)](primals_8, buf4, 32768, 16,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_8
buf5 = empty_strided_cuda((1024, 128, 5, 5), (3200, 1, 640, 128),
torch.float32)
triton_poi_fused_5[grid(131072, 25)](primals_16, buf5, 131072, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_16
buf6 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64),
torch.float32)
triton_poi_fused_6[grid(8192, 25)](primals_18, buf6, 8192, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_18
buf7 = empty_strided_cuda((64, 32, 6, 6), (1152, 1, 192, 32), torch
.float32)
triton_poi_fused_7[grid(2048, 36)](primals_20, buf7, 2048, 36,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_20
buf8 = empty_strided_cuda((32, 4, 6, 6), (144, 1, 24, 4), torch.float32
)
triton_poi_fused_8[grid(128, 36)](primals_22, buf8, 128, 36, XBLOCK
=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_22
buf9 = extern_kernels.convolution(buf1, buf0, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 32, 31, 31), (30752, 1, 992, 32))
buf10 = buf9
del buf9
triton_poi_fused_convolution_relu_9[grid(123008)](buf10, primals_2,
123008, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf11 = extern_kernels.convolution(buf10, buf2, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 64, 14, 14), (12544, 1, 896, 64))
buf12 = buf11
del buf11
triton_poi_fused_convolution_relu_10[grid(50176)](buf12, primals_5,
50176, XBLOCK=512, num_warps=4, num_stages=1)
del primals_5
buf13 = extern_kernels.convolution(buf12, buf3, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 128, 6, 6), (4608, 1, 768, 128))
buf14 = buf13
del buf13
triton_poi_fused_convolution_relu_11[grid(18432)](buf14, primals_7,
18432, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf15 = extern_kernels.convolution(buf14, buf4, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 256, 2, 2), (1024, 1, 512, 256))
buf16 = empty_strided_cuda((4, 256, 2, 2), (1024, 4, 2, 1), torch.
float32)
buf33 = empty_strided_cuda((4, 256, 2, 2), (1024, 1, 512, 256),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_12[grid(1024, 4)](
buf15, primals_9, buf16, buf33, 1024, 4, XBLOCK=4, YBLOCK=64,
num_warps=4, num_stages=1)
del primals_9
buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_11, reinterpret_tensor(buf16, (4, 1024
), (1024, 1), 0), reinterpret_tensor(primals_10, (1024, 4), (1,
1024), 0), alpha=1, beta=1, out=buf17)
del primals_11
buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_13, reinterpret_tensor(buf16, (4, 1024
), (1024, 1), 0), reinterpret_tensor(primals_12, (1024, 4), (1,
1024), 0), alpha=1, beta=1, out=buf18)
del primals_13
buf19 = torch.ops.aten.randn.default([4, 4], dtype=torch.float32,
device=device(type='cuda', index=0), pin_memory=False)
buf20 = buf19
del buf19
buf21 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_exp_mul_13[grid(16)](buf20, buf18, buf17,
buf21, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf22 = reinterpret_tensor(buf15, (4, 1024), (1024, 1), 0)
del buf15
extern_kernels.mm(buf21, reinterpret_tensor(primals_14, (4, 1024),
(1, 4), 0), out=buf22)
buf23 = buf22
del buf22
buf32 = empty_strided_cuda((4, 1024), (1024, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_14[grid(4096)](buf23,
primals_15, buf32, 4096, XBLOCK=128, num_warps=4, num_stages=1)
del primals_15
buf24 = extern_kernels.convolution(reinterpret_tensor(buf23, (4,
1024, 1, 1), (1024, 1, 0, 0), 0), buf5, stride=(2, 2), padding=
(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf24, (4, 128, 5, 5), (3200, 1, 640, 128))
buf25 = buf24
del buf24
triton_poi_fused_convolution_relu_15[grid(12800)](buf25, primals_17,
12800, XBLOCK=256, num_warps=4, num_stages=1)
del primals_17
buf26 = extern_kernels.convolution(buf25, buf6, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 64, 13, 13), (10816, 1, 832, 64))
buf27 = buf26
del buf26
triton_poi_fused_convolution_relu_16[grid(43264)](buf27, primals_19,
43264, XBLOCK=512, num_warps=4, num_stages=1)
del primals_19
buf28 = extern_kernels.convolution(buf27, buf7, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 32, 30, 30), (28800, 1, 960, 32))
buf29 = buf28
del buf28
triton_poi_fused_convolution_relu_17[grid(115200)](buf29,
primals_21, 115200, XBLOCK=512, num_warps=8, num_stages=1)
del primals_21
buf30 = extern_kernels.convolution(buf29, buf8, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf30, (4, 4, 64, 64), (16384, 1, 256, 4))
buf31 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1),
torch.float32)
triton_poi_fused_convolution_sigmoid_18[grid(16, 4096)](buf30,
primals_23, buf31, 16, 4096, XBLOCK=64, YBLOCK=16, num_warps=4,
num_stages=1)
del buf30
del primals_23
return (buf31, buf17, buf18, buf0, buf1, buf2, buf3, buf4, buf5, buf6,
buf7, buf8, buf10, buf12, buf14, reinterpret_tensor(buf16, (4, 1024
), (1024, 1), 0), buf18, buf20, buf21, reinterpret_tensor(buf23, (4,
1024, 1, 1), (1024, 1, 1, 1), 0), buf25, buf27, buf29, buf31, buf32,
primals_14, primals_12, primals_10, buf33)
class Decoder(nn.Module):
""" VAE decoder """
def __init__(self, img_channels, latent_size):
super(Decoder, self).__init__()
self.latent_size = latent_size
self.img_channels = img_channels
self.fc1 = nn.Linear(latent_size, 1024)
self.deconv1 = nn.ConvTranspose2d(1024, 128, 5, stride=2)
self.deconv2 = nn.ConvTranspose2d(128, 64, 5, stride=2)
self.deconv3 = nn.ConvTranspose2d(64, 32, 6, stride=2)
self.deconv4 = nn.ConvTranspose2d(32, img_channels, 6, stride=2)
def forward(self, x):
x = F.relu(self.fc1(x))
x = x.unsqueeze(-1).unsqueeze(-1)
x = F.relu(self.deconv1(x))
x = F.relu(self.deconv2(x))
x = F.relu(self.deconv3(x))
reconstruction = torch.sigmoid(self.deconv4(x))
return reconstruction
class Encoder(nn.Module):
""" VAE encoder """
def __init__(self, img_channels, latent_size):
super(Encoder, self).__init__()
self.latent_size = latent_size
self.img_channels = img_channels
self.conv1 = nn.Conv2d(img_channels, 32, 4, stride=2)
self.conv2 = nn.Conv2d(32, 64, 4, stride=2)
self.conv3 = nn.Conv2d(64, 128, 4, stride=2)
self.conv4 = nn.Conv2d(128, 256, 4, stride=2)
self.fc_mu = nn.Linear(2 * 2 * 256, latent_size)
self.fc_logsigma = nn.Linear(2 * 2 * 256, latent_size)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = x.view(x.size(0), -1)
mu = self.fc_mu(x)
logsigma = self.fc_logsigma(x)
return mu, logsigma
class VAENew(nn.Module):
""" Variational Autoencoder """
def __init__(self, img_channels, latent_size):
super(VAENew, self).__init__()
self.encoder = Encoder(img_channels, latent_size)
self.decoder = Decoder(img_channels, latent_size)
def forward(self, input_0):
primals_1 = self.encoder.conv1.weight
primals_2 = self.encoder.conv1.bias
primals_4 = self.encoder.conv2.weight
primals_5 = self.encoder.conv2.bias
primals_6 = self.encoder.conv3.weight
primals_7 = self.encoder.conv3.bias
primals_8 = self.encoder.conv4.weight
primals_9 = self.encoder.conv4.bias
primals_10 = self.encoder.fc_mu.weight
primals_11 = self.encoder.fc_mu.bias
primals_12 = self.encoder.fc_logsigma.weight
primals_13 = self.encoder.fc_logsigma.bias
primals_14 = self.decoder.fc1.weight
primals_15 = self.decoder.fc1.bias
primals_16 = self.decoder.deconv1.weight
primals_17 = self.decoder.deconv1.bias
primals_18 = self.decoder.deconv2.weight
primals_19 = self.decoder.deconv2.bias
primals_20 = self.decoder.deconv3.weight
primals_21 = self.decoder.deconv3.bias
primals_22 = self.decoder.deconv4.weight
primals_23 = self.decoder.deconv4.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23])
return output[0], output[1], output[2]
|
susanwe/world-models
|
VAE
| false
| 10,907
|
[
"MIT"
] | 0
|
0f246a430683e6ab741726df0a97f35830044356
|
https://github.com/susanwe/world-models/tree/0f246a430683e6ab741726df0a97f35830044356
|
ConvSig
|
import math
import torch
import torch.nn.functional as F
from torch.nn import Conv2d
from torch.nn import Sigmoid
class PadSameConv2d(torch.nn.Module):
def __init__(self, kernel_size, stride=1):
"""
Imitates padding_mode="same" from tensorflow.
:param kernel_size: Kernelsize of the convolution, int or tuple/list
:param stride: Stride of the convolution, int or tuple/list
"""
super().__init__()
if isinstance(kernel_size, (tuple, list)):
self.kernel_size_y = kernel_size[0]
self.kernel_size_x = kernel_size[1]
else:
self.kernel_size_y = kernel_size
self.kernel_size_x = kernel_size
if isinstance(stride, (tuple, list)):
self.stride_y = stride[0]
self.stride_x = stride[1]
else:
self.stride_y = stride
self.stride_x = stride
def forward(self, x: 'torch.Tensor'):
_, _, height, width = x.shape
padding_y = (self.stride_y * (math.ceil(height / self.stride_y) - 1
) + self.kernel_size_y - height) / 2
padding_x = (self.stride_x * (math.ceil(width / self.stride_x) - 1) +
self.kernel_size_x - width) / 2
padding = [math.floor(padding_x), math.ceil(padding_x), math.floor(
padding_y), math.ceil(padding_y)]
return F.pad(input=x, pad=padding)
class ConvSig(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1):
"""
Performs two convolutions and a leaky relu. The first operation only convolves in y direction, the second one
only in x direction.
:param in_channels: Number of input channels
:param out_channels: Number of output channels
:param kernel_size: Kernel size for the convolutions, first in y direction, then in x direction
:param stride: Stride for the convolutions, first in y direction, then in x direction
"""
super().__init__()
self.pad = PadSameConv2d(kernel_size=kernel_size, stride=stride)
self.conv = Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=kernel_size, stride=stride)
self.sig = Sigmoid()
def forward(self, x: 'torch.Tensor'):
t = self.pad(x)
t = self.conv(t)
return self.sig(t)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import torch.nn.functional as F
from torch.nn import Conv2d
from torch.nn import Sigmoid
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 7 % 7
x0 = xindex % 7
x2 = xindex // 49
x4 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask,
other=0.0)
tl.store(out_ptr0 + x4, tmp11, xmask)
@triton.jit
def triton_poi_fused_convolution_sigmoid_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x3, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(784)](primals_1, buf0, 784,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_sigmoid_1[grid(256)](buf2, primals_3,
256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
return buf2, primals_2, buf0, buf2
class PadSameConv2d(torch.nn.Module):
def __init__(self, kernel_size, stride=1):
"""
Imitates padding_mode="same" from tensorflow.
:param kernel_size: Kernelsize of the convolution, int or tuple/list
:param stride: Stride of the convolution, int or tuple/list
"""
super().__init__()
if isinstance(kernel_size, (tuple, list)):
self.kernel_size_y = kernel_size[0]
self.kernel_size_x = kernel_size[1]
else:
self.kernel_size_y = kernel_size
self.kernel_size_x = kernel_size
if isinstance(stride, (tuple, list)):
self.stride_y = stride[0]
self.stride_x = stride[1]
else:
self.stride_y = stride
self.stride_x = stride
def forward(self, x: 'torch.Tensor'):
_, _, height, width = x.shape
padding_y = (self.stride_y * (math.ceil(height / self.stride_y) - 1
) + self.kernel_size_y - height) / 2
padding_x = (self.stride_x * (math.ceil(width / self.stride_x) - 1) +
self.kernel_size_x - width) / 2
padding = [math.floor(padding_x), math.ceil(padding_x), math.floor(
padding_y), math.ceil(padding_y)]
return F.pad(input=x, pad=padding)
class ConvSigNew(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1):
"""
Performs two convolutions and a leaky relu. The first operation only convolves in y direction, the second one
only in x direction.
:param in_channels: Number of input channels
:param out_channels: Number of output channels
:param kernel_size: Kernel size for the convolutions, first in y direction, then in x direction
:param stride: Stride for the convolutions, first in y direction, then in x direction
"""
super().__init__()
self.pad = PadSameConv2d(kernel_size=kernel_size, stride=stride)
self.conv = Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=kernel_size, stride=stride)
self.sig = Sigmoid()
def forward(self, input_0):
primals_1 = self.conv.weight
primals_3 = self.conv.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
shlomi-amitai/monorec
|
ConvSig
| false
| 10,908
|
[
"MIT"
] | 0
|
74571c6cd8d06ae4fb15cbee5a41147c54c78556
|
https://github.com/shlomi-amitai/monorec/tree/74571c6cd8d06ae4fb15cbee5a41147c54c78556
|
GlobalAttention_text
|
import torch
import torch.nn as nn
import torch.nn.parallel
class GlobalAttention_text(nn.Module):
def __init__(self, idf, cdf):
super(GlobalAttention_text, self).__init__()
self.conv_context = nn.Conv1d(cdf, idf, kernel_size=1, stride=1,
padding=0)
self.sm = nn.Softmax()
self.mask = None
def applyMask(self, mask):
self.mask = mask
def forward(self, input, context):
"""
input: batch x idf x ih x iw (queryL=ihxiw)
context: batch x cdf x sourceL
"""
ih, iw = input.size(2), input.size(3)
queryL = ih * iw
batch_size, sourceL = context.size(0), context.size(2)
target = input.view(batch_size, -1, queryL)
targetT = torch.transpose(target, 1, 2).contiguous()
sourceT = self.conv_context(context)
attn = torch.bmm(targetT, sourceT)
attn = attn.view(batch_size * queryL, sourceL)
if self.mask is not None:
mask = self.mask.repeat(queryL, 1)
attn.data.masked_fill_(mask.data, -float('inf'))
attn = attn.view(batch_size, queryL, sourceL)
attn = torch.nn.Softmax(dim=1)(attn)
text_weighted = torch.bmm(target, attn)
return text_weighted
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'idf': 4, 'cdf': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_transpose_0(in_ptr0, out_ptr0, out_ptr1, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
y2 = yindex % 4
y3 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x1 + 16 * y0), xmask & ymask)
tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask)
tl.store(out_ptr1 + (y2 + 4 * x1 + 64 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_per_fused__softmax_2(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 4
x1 = xindex // 4
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * r2 + 64 * x1), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 64
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp4 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.exp(tmp2)
tmp5 = tmp3 / tmp4
tl.store(in_out_ptr0 + x3, tmp5, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_3, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4), (16, 4, 1))
buf1 = empty_strided_cuda((4, 16, 4), (64, 1, 16), torch.float32)
buf8 = empty_strided_cuda((4, 4, 16), (64, 1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_transpose_0[grid(16, 16)](primals_1, buf1,
buf8, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
buf2 = buf0
del buf0
triton_poi_fused_convolution_1[grid(64)](buf2, primals_4, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
extern_kernels.bmm(buf1, buf2, out=buf3)
del buf1
buf4 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf5 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
triton_per_fused__softmax_2[grid(16)](buf3, buf4, buf5, 16, 16,
XBLOCK=1, num_warps=2, num_stages=1)
buf6 = buf3
del buf3
triton_poi_fused__softmax_3[grid(256)](buf6, buf4, buf5, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del buf4
del buf5
buf7 = buf2
del buf2
extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 4, 16), (64,
16, 1), 0), buf6, out=buf7)
return buf7, primals_2, primals_3, reinterpret_tensor(primals_1, (4, 16,
4), (64, 1, 16), 0), buf6, buf8
class GlobalAttention_textNew(nn.Module):
def __init__(self, idf, cdf):
super(GlobalAttention_textNew, self).__init__()
self.conv_context = nn.Conv1d(cdf, idf, kernel_size=1, stride=1,
padding=0)
self.sm = nn.Softmax()
self.mask = None
def applyMask(self, mask):
self.mask = mask
def forward(self, input_0, input_1):
primals_3 = self.conv_context.weight
primals_4 = self.conv_context.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
ts170/T2I_CL
|
GlobalAttention_text
| false
| 10,909
|
[
"MIT"
] | 0
|
8754bea1101aabcbf8108b95e722f7aaeb385869
|
https://github.com/ts170/T2I_CL/tree/8754bea1101aabcbf8108b95e722f7aaeb385869
|
ConvReLU2
|
import math
import torch
import torch.nn.functional as F
from torch.nn import Conv2d
from torch.nn import LeakyReLU
class PadSameConv2d(torch.nn.Module):
def __init__(self, kernel_size, stride=1):
"""
Imitates padding_mode="same" from tensorflow.
:param kernel_size: Kernelsize of the convolution, int or tuple/list
:param stride: Stride of the convolution, int or tuple/list
"""
super().__init__()
if isinstance(kernel_size, (tuple, list)):
self.kernel_size_y = kernel_size[0]
self.kernel_size_x = kernel_size[1]
else:
self.kernel_size_y = kernel_size
self.kernel_size_x = kernel_size
if isinstance(stride, (tuple, list)):
self.stride_y = stride[0]
self.stride_x = stride[1]
else:
self.stride_y = stride
self.stride_x = stride
def forward(self, x: 'torch.Tensor'):
_, _, height, width = x.shape
padding_y = (self.stride_y * (math.ceil(height / self.stride_y) - 1
) + self.kernel_size_y - height) / 2
padding_x = (self.stride_x * (math.ceil(width / self.stride_x) - 1) +
self.kernel_size_x - width) / 2
padding = [math.floor(padding_x), math.ceil(padding_x), math.floor(
padding_y), math.ceil(padding_y)]
return F.pad(input=x, pad=padding)
class ConvReLU2(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
leaky_relu_neg_slope=0.1):
"""
Performs two convolutions and a leaky relu. The first operation only convolves in y direction, the second one
only in x direction.
:param in_channels: Number of input channels
:param out_channels: Number of output channels
:param kernel_size: Kernel size for the convolutions, first in y direction, then in x direction
:param stride: Stride for the convolutions, first in y direction, then in x direction
"""
super().__init__()
self.pad_0 = PadSameConv2d(kernel_size=(kernel_size, 1), stride=(
stride, 1))
self.conv_y = Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=(kernel_size, 1), stride=(stride, 1))
self.leaky_relu = LeakyReLU(negative_slope=leaky_relu_neg_slope)
self.pad_1 = PadSameConv2d(kernel_size=(1, kernel_size), stride=(1,
stride))
self.conv_x = Conv2d(in_channels=out_channels, out_channels=
out_channels, kernel_size=(1, kernel_size), stride=(1, stride))
def forward(self, x: 'torch.Tensor'):
t = self.pad_0(x)
t = self.conv_y(t)
t = self.leaky_relu(t)
t = self.pad_1(t)
t = self.conv_x(t)
return self.leaky_relu(t)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import torch.nn.functional as F
from torch.nn import Conv2d
from torch.nn import LeakyReLU
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 448
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 7
x2 = xindex // 28
x3 = xindex % 28
x4 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (-4 + x3 + 16 * x2), tmp5 & xmask, other=0.0)
tl.store(out_ptr0 + x4, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_constant_pad_nd_convolution_leaky_relu_2(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 448
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 7
x4 = xindex // 7
x2 = xindex // 28 % 4
x5 = xindex
tmp0 = -1 + x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (-1 + x0 + 4 * x4), tmp5 & xmask, other=0.0).to(tl
.int1)
tmp7 = tl.load(in_ptr1 + (-1 + x0 + 4 * x4), tmp5 & xmask, other=0.0)
tmp8 = tl.load(in_ptr2 + x2, tmp5 & xmask, eviction_policy='evict_last',
other=0.0)
tmp9 = tmp7 + tmp8
tmp10 = 0.1
tmp11 = tmp9 * tmp10
tmp12 = tl.where(tmp6, tmp9, tmp11)
tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype)
tmp14 = tl.where(tmp5, tmp12, tmp13)
tl.store(out_ptr0 + x5, tmp14, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 1), (16, 4, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 1, 4), (16, 4, 4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 7, 4), (112, 28, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(448)](primals_1, buf0, 448,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_leaky_relu_1[grid(256)](buf1,
primals_3, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4, 7), (112, 28, 7, 1), torch.float32)
triton_poi_fused_constant_pad_nd_convolution_leaky_relu_2[grid(448)](
buf2, buf1, primals_3, buf3, 448, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_3
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf6 = buf1
del buf1
triton_poi_fused_convolution_leaky_relu_3[grid(256)](buf4,
primals_5, buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf4
del primals_5
return buf6, primals_2, primals_4, buf0, buf2, buf3, buf5
class PadSameConv2d(torch.nn.Module):
def __init__(self, kernel_size, stride=1):
"""
Imitates padding_mode="same" from tensorflow.
:param kernel_size: Kernelsize of the convolution, int or tuple/list
:param stride: Stride of the convolution, int or tuple/list
"""
super().__init__()
if isinstance(kernel_size, (tuple, list)):
self.kernel_size_y = kernel_size[0]
self.kernel_size_x = kernel_size[1]
else:
self.kernel_size_y = kernel_size
self.kernel_size_x = kernel_size
if isinstance(stride, (tuple, list)):
self.stride_y = stride[0]
self.stride_x = stride[1]
else:
self.stride_y = stride
self.stride_x = stride
def forward(self, x: 'torch.Tensor'):
_, _, height, width = x.shape
padding_y = (self.stride_y * (math.ceil(height / self.stride_y) - 1
) + self.kernel_size_y - height) / 2
padding_x = (self.stride_x * (math.ceil(width / self.stride_x) - 1) +
self.kernel_size_x - width) / 2
padding = [math.floor(padding_x), math.ceil(padding_x), math.floor(
padding_y), math.ceil(padding_y)]
return F.pad(input=x, pad=padding)
class ConvReLU2New(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
leaky_relu_neg_slope=0.1):
"""
Performs two convolutions and a leaky relu. The first operation only convolves in y direction, the second one
only in x direction.
:param in_channels: Number of input channels
:param out_channels: Number of output channels
:param kernel_size: Kernel size for the convolutions, first in y direction, then in x direction
:param stride: Stride for the convolutions, first in y direction, then in x direction
"""
super().__init__()
self.pad_0 = PadSameConv2d(kernel_size=(kernel_size, 1), stride=(
stride, 1))
self.conv_y = Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=(kernel_size, 1), stride=(stride, 1))
self.leaky_relu = LeakyReLU(negative_slope=leaky_relu_neg_slope)
self.pad_1 = PadSameConv2d(kernel_size=(1, kernel_size), stride=(1,
stride))
self.conv_x = Conv2d(in_channels=out_channels, out_channels=
out_channels, kernel_size=(1, kernel_size), stride=(1, stride))
def forward(self, input_0):
primals_2 = self.conv_y.weight
primals_3 = self.conv_y.bias
primals_4 = self.conv_x.weight
primals_5 = self.conv_x.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
shlomi-amitai/monorec
|
ConvReLU2
| false
| 10,910
|
[
"MIT"
] | 0
|
74571c6cd8d06ae4fb15cbee5a41147c54c78556
|
https://github.com/shlomi-amitai/monorec/tree/74571c6cd8d06ae4fb15cbee5a41147c54c78556
|
GlobalAttentionGeneral
|
import torch
import torch.nn as nn
import torch.nn.parallel
class GlobalAttentionGeneral(nn.Module):
def __init__(self, idf, cdf):
super(GlobalAttentionGeneral, self).__init__()
self.sm = nn.Softmax()
self.mask = None
def applyMask(self, mask):
self.mask = mask
def forward(self, input, context_key, content_value):
"""
input: batch x idf x ih x iw (queryL=ihxiw)
context: batch x cdf x sourceL
"""
ih, iw = input.size(2), input.size(3)
queryL = ih * iw
batch_size, sourceL = context_key.size(0), context_key.size(2)
target = input.view(batch_size, -1, queryL)
targetT = torch.transpose(target, 1, 2).contiguous()
sourceT = context_key
attn = torch.bmm(targetT, sourceT)
attn = attn.view(batch_size * queryL, sourceL)
if self.mask is not None:
mask = self.mask.repeat(queryL, 1)
attn.data.masked_fill_(mask.data, -float('inf'))
attn = self.sm(attn)
attn = attn.view(batch_size, queryL, sourceL)
attn = torch.transpose(attn, 1, 2).contiguous()
weightedContext = torch.bmm(content_value, attn)
weightedContext = weightedContext.view(batch_size, -1, ih, iw)
attn = attn.view(batch_size, -1, ih, iw)
return weightedContext, attn
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4,
4, 4])]
def get_init_inputs():
return [[], {'idf': 4, 'cdf': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tmp1 = tl.load(in_ptr0 + (4 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2 + 16 * y3), tmp8, xmask & ymask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 16, 4), (64, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
extern_kernels.bmm(buf0, arg1_1, out=buf1)
del arg1_1
buf2 = reinterpret_tensor(buf0, (64, 4), (4, 1), 0)
del buf0
triton_poi_fused__softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (4, 4, 16), (64, 16, 1), 0)
del buf1
triton_poi_fused_clone_2[grid(16, 16)](buf2, buf3, 16, 16, XBLOCK=
16, YBLOCK=16, num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf2, (4, 4, 16), (64, 16, 1), 0)
del buf2
extern_kernels.bmm(arg2_1, buf3, out=buf4)
del arg2_1
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
class GlobalAttentionGeneralNew(nn.Module):
def __init__(self, idf, cdf):
super(GlobalAttentionGeneralNew, self).__init__()
self.sm = nn.Softmax()
self.mask = None
def applyMask(self, mask):
self.mask = mask
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0], output[1]
|
ts170/T2I_CL
|
GlobalAttentionGeneral
| false
| 10,911
|
[
"MIT"
] | 0
|
8754bea1101aabcbf8108b95e722f7aaeb385869
|
https://github.com/ts170/T2I_CL/tree/8754bea1101aabcbf8108b95e722f7aaeb385869
|
Memory
|
import torch
import torch.nn as nn
import torch.nn.parallel
class Memory(nn.Module):
def __init__(self):
super(Memory, self).__init__()
self.sm = nn.Softmax()
self.mask = None
def applyMask(self, mask):
self.mask = mask
def forward(self, input, context_key, content_value):
"""
input: batch x idf x ih x iw (queryL=ihxiw)
context: batch x idf x sourceL
"""
ih, iw = input.size(2), input.size(3)
queryL = ih * iw
batch_size, sourceL = context_key.size(0), context_key.size(2)
target = input.view(batch_size, -1, queryL)
targetT = torch.transpose(target, 1, 2).contiguous()
sourceT = context_key
weight = torch.bmm(targetT, sourceT)
weight = weight.view(batch_size * queryL, sourceL)
if self.mask is not None:
mask = self.mask.repeat(queryL, 1)
weight.data.masked_fill_(mask.data, -float('inf'))
weight = torch.nn.functional.softmax(weight, dim=1)
weight = weight.view(batch_size, queryL, sourceL)
weight = torch.transpose(weight, 1, 2).contiguous()
weightedContext = torch.bmm(content_value, weight)
weightedContext = weightedContext.view(batch_size, -1, ih, iw)
weight = weight.view(batch_size, -1, ih, iw)
return weightedContext, weight
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4,
4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tmp1 = tl.load(in_ptr0 + (4 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2 + 16 * y3), tmp8, xmask & ymask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 16, 4), (64, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
extern_kernels.bmm(buf0, arg1_1, out=buf1)
del arg1_1
buf2 = reinterpret_tensor(buf0, (64, 4), (4, 1), 0)
del buf0
triton_poi_fused__softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (4, 4, 16), (64, 16, 1), 0)
del buf1
triton_poi_fused_clone_2[grid(16, 16)](buf2, buf3, 16, 16, XBLOCK=
16, YBLOCK=16, num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf2, (4, 4, 16), (64, 16, 1), 0)
del buf2
extern_kernels.bmm(arg2_1, buf3, out=buf4)
del arg2_1
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
class MemoryNew(nn.Module):
def __init__(self):
super(MemoryNew, self).__init__()
self.sm = nn.Softmax()
self.mask = None
def applyMask(self, mask):
self.mask = mask
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0], output[1]
|
ts170/T2I_CL
|
Memory
| false
| 10,912
|
[
"MIT"
] | 0
|
8754bea1101aabcbf8108b95e722f7aaeb385869
|
https://github.com/ts170/T2I_CL/tree/8754bea1101aabcbf8108b95e722f7aaeb385869
|
Backprojection
|
import torch
import torch.nn as nn
class Backprojection(nn.Module):
def __init__(self, batch_size, height, width):
super(Backprojection, self).__init__()
self.N, self.H, self.W = batch_size, height, width
yy, xx = torch.meshgrid([torch.arange(0.0, float(self.H)), torch.
arange(0.0, float(self.W))])
yy = yy.contiguous().view(-1)
xx = xx.contiguous().view(-1)
self.ones = nn.Parameter(torch.ones(self.N, 1, self.H * self.W),
requires_grad=False)
self.coord = torch.unsqueeze(torch.stack([xx, yy], 0), 0).repeat(self
.N, 1, 1)
self.coord = nn.Parameter(torch.cat([self.coord, self.ones], 1),
requires_grad=False)
def forward(self, depth, inv_K):
cam_p_norm = torch.matmul(inv_K[:, :3, :3], self.coord[:depth.shape
[0], :, :])
cam_p_euc = depth.view(depth.shape[0], 1, -1) * cam_p_norm
cam_p_h = torch.cat([cam_p_euc, self.ones[:depth.shape[0], :, :]], 1)
return cam_p_h
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'batch_size': 4, 'height': 4, 'width': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 4
x0 = xindex % 16
x2 = xindex // 64
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 3, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (x0 + 16 * x1 + 48 * x2), tmp4 & xmask, other=0.0)
tmp7 = tmp5 * tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tl.full([1], 4, tl.int64)
tmp13 = tl.load(in_ptr2 + (x0 + 16 * x2), tmp10 & xmask,
eviction_policy='evict_last', other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tl.store(out_ptr0 + x3, tmp14, xmask)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 3, 16), (48, 16, 1))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg3_1, (4, 1, 16), (16, 16, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 16), (48, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 3, 3), (16, 4, 1),
0), arg1_1, out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(256)](arg2_1, buf0, arg3_1, buf1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg2_1
del arg3_1
del buf0
return buf1,
class BackprojectionNew(nn.Module):
def __init__(self, batch_size, height, width):
super(BackprojectionNew, self).__init__()
self.N, self.H, self.W = batch_size, height, width
yy, xx = torch.meshgrid([torch.arange(0.0, float(self.H)), torch.
arange(0.0, float(self.W))])
yy = yy.contiguous().view(-1)
xx = xx.contiguous().view(-1)
self.ones = nn.Parameter(torch.ones(self.N, 1, self.H * self.W),
requires_grad=False)
self.coord = torch.unsqueeze(torch.stack([xx, yy], 0), 0).repeat(self
.N, 1, 1)
self.coord = nn.Parameter(torch.cat([self.coord, self.ones], 1),
requires_grad=False)
def forward(self, input_0, input_1):
arg3_1 = self.ones
arg1_1 = self.coord
arg0_1 = input_0
arg2_1 = input_1
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0]
|
shlomi-amitai/monorec
|
Backprojection
| false
| 10,913
|
[
"MIT"
] | 0
|
74571c6cd8d06ae4fb15cbee5a41147c54c78556
|
https://github.com/shlomi-amitai/monorec/tree/74571c6cd8d06ae4fb15cbee5a41147c54c78556
|
Encoder
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Scaled_Dot_Product_Attention(nn.Module):
"""Scaled Dot-Product Attention """
def __init__(self):
super(Scaled_Dot_Product_Attention, self).__init__()
def forward(self, Q, K, V, scale=None):
"""
Args:
Q: [batch_size, len_Q, dim_Q]
K: [batch_size, len_K, dim_K]
V: [batch_size, len_V, dim_V]
scale: 缩放因子 论文为根号dim_K
Return:
self-attention后的张量,以及attention张量
"""
attention = torch.matmul(Q, K.permute(0, 2, 1))
if scale:
attention = attention * scale
attention = F.softmax(attention, dim=-1)
context = torch.matmul(attention, V)
return context
class Multi_Head_Attention(nn.Module):
def __init__(self, dim_model, num_head, dropout=0.0):
super(Multi_Head_Attention, self).__init__()
self.num_head = num_head
assert dim_model % num_head == 0
self.dim_head = dim_model // self.num_head
self.fc_Q = nn.Linear(dim_model, num_head * self.dim_head)
self.fc_K = nn.Linear(dim_model, num_head * self.dim_head)
self.fc_V = nn.Linear(dim_model, num_head * self.dim_head)
self.attention = Scaled_Dot_Product_Attention()
self.fc = nn.Linear(num_head * self.dim_head, dim_model)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(dim_model)
def forward(self, x):
batch_size = x.size(0)
Q = self.fc_Q(x)
K = self.fc_K(x)
V = self.fc_V(x)
Q = Q.view(batch_size * self.num_head, -1, self.dim_head)
K = K.view(batch_size * self.num_head, -1, self.dim_head)
V = V.view(batch_size * self.num_head, -1, self.dim_head)
scale = K.size(-1) ** -0.5
context = self.attention(Q, K, V, scale)
context = context.view(batch_size, -1, self.dim_head * self.num_head)
out = self.fc(context)
out = self.dropout(out)
out = out + x
out = self.layer_norm(out)
return out
class Position_wise_Feed_Forward(nn.Module):
def __init__(self, dim_model, hidden, dropout=0.0):
super(Position_wise_Feed_Forward, self).__init__()
self.fc1 = nn.Linear(dim_model, hidden)
self.fc2 = nn.Linear(hidden, dim_model)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(dim_model)
def forward(self, x):
out = self.fc1(x)
out = F.relu(out)
out = self.fc2(out)
out = self.dropout(out)
out = out + x
out = self.layer_norm(out)
return out
class Encoder(nn.Module):
def __init__(self, dim_model, num_head, hidden, dropout):
super(Encoder, self).__init__()
self.attention = Multi_Head_Attention(dim_model, num_head, dropout)
self.feed_forward = Position_wise_Feed_Forward(dim_model, hidden,
dropout)
def forward(self, x):
out = self.attention(x)
out = self.feed_forward(out)
return out
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'dim_model': 4, 'num_head': 4, 'hidden': 4, 'dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = tmp2 - tmp2
tmp4 = tmp3 * tmp1
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp5 / tmp5
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
tl.store(out_ptr1 + x2, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex % 16
x4 = xindex // 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp1 = tl.load(in_ptr1 + x3, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x5, tmp13, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_3(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_5(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4, 4), (4, 1))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (4, 4), (4, 1))
assert_size_stride(primals_15, (4,), (1,))
assert_size_stride(primals_16, (4,), (1,))
assert_size_stride(primals_17, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, primals_1, reinterpret_tensor(
primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, primals_1, reinterpret_tensor(
primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, primals_1, reinterpret_tensor(
primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_6
del primals_7
buf3 = empty_strided_cuda((16, 1, 1), (1, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (16, 1, 1), (1, 1, 1),
0), reinterpret_tensor(buf1, (16, 1, 1), (1, 1, 1), 0), out=buf3)
buf4 = buf3
del buf3
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(16)](buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((16, 1, 1), (1, 1, 1), torch.float32)
extern_kernels.bmm(buf4, reinterpret_tensor(buf2, (16, 1, 1), (1, 1,
1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (4, 4), (4,
1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha
=1, beta=1, out=buf6)
del primals_9
buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_native_layer_norm_1[grid(16)](buf6, primals_1,
buf7, buf8, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_2[grid(64)](buf6, primals_1,
buf7, buf8, primals_10, primals_11, buf9, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_11
buf10 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf9, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_12, (4, 4), (1, 4), 0), out=buf10)
buf11 = reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0)
del buf10
buf17 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_3[grid(64)](buf11,
primals_13, buf17, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_13
buf12 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_14, (4, 4), (1, 4), 0), out=buf12)
buf13 = reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0)
del buf12
triton_poi_fused_add_4[grid(64)](buf13, primals_15, buf9, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_15
buf14 = buf8
del buf8
buf15 = buf7
del buf7
triton_poi_fused_native_layer_norm_5[grid(16)](buf13, buf14, buf15,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_6[grid(64)](buf13, buf14, buf15,
primals_16, primals_17, buf16, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf14
del buf15
del primals_17
return buf16, primals_1, primals_10, primals_16, buf4, reinterpret_tensor(
buf5, (4, 4), (4, 1), 0), buf6, reinterpret_tensor(buf9, (16, 4), (
4, 1), 0), reinterpret_tensor(buf11, (16, 4), (4, 1), 0
), buf13, primals_14, buf17, primals_12, primals_8, reinterpret_tensor(
buf2, (16, 1, 1), (1, 1, 1), 0), reinterpret_tensor(buf0, (16, 1, 1
), (1, 1, 1), 0), reinterpret_tensor(buf1, (16, 1, 1), (1, 1, 1), 0)
class Scaled_Dot_Product_Attention(nn.Module):
"""Scaled Dot-Product Attention """
def __init__(self):
super(Scaled_Dot_Product_Attention, self).__init__()
def forward(self, Q, K, V, scale=None):
"""
Args:
Q: [batch_size, len_Q, dim_Q]
K: [batch_size, len_K, dim_K]
V: [batch_size, len_V, dim_V]
scale: 缩放因子 论文为根号dim_K
Return:
self-attention后的张量,以及attention张量
"""
attention = torch.matmul(Q, K.permute(0, 2, 1))
if scale:
attention = attention * scale
attention = F.softmax(attention, dim=-1)
context = torch.matmul(attention, V)
return context
class Multi_Head_Attention(nn.Module):
def __init__(self, dim_model, num_head, dropout=0.0):
super(Multi_Head_Attention, self).__init__()
self.num_head = num_head
assert dim_model % num_head == 0
self.dim_head = dim_model // self.num_head
self.fc_Q = nn.Linear(dim_model, num_head * self.dim_head)
self.fc_K = nn.Linear(dim_model, num_head * self.dim_head)
self.fc_V = nn.Linear(dim_model, num_head * self.dim_head)
self.attention = Scaled_Dot_Product_Attention()
self.fc = nn.Linear(num_head * self.dim_head, dim_model)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(dim_model)
def forward(self, x):
batch_size = x.size(0)
Q = self.fc_Q(x)
K = self.fc_K(x)
V = self.fc_V(x)
Q = Q.view(batch_size * self.num_head, -1, self.dim_head)
K = K.view(batch_size * self.num_head, -1, self.dim_head)
V = V.view(batch_size * self.num_head, -1, self.dim_head)
scale = K.size(-1) ** -0.5
context = self.attention(Q, K, V, scale)
context = context.view(batch_size, -1, self.dim_head * self.num_head)
out = self.fc(context)
out = self.dropout(out)
out = out + x
out = self.layer_norm(out)
return out
class Position_wise_Feed_Forward(nn.Module):
def __init__(self, dim_model, hidden, dropout=0.0):
super(Position_wise_Feed_Forward, self).__init__()
self.fc1 = nn.Linear(dim_model, hidden)
self.fc2 = nn.Linear(hidden, dim_model)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(dim_model)
def forward(self, x):
out = self.fc1(x)
out = F.relu(out)
out = self.fc2(out)
out = self.dropout(out)
out = out + x
out = self.layer_norm(out)
return out
class EncoderNew(nn.Module):
def __init__(self, dim_model, num_head, hidden, dropout):
super(EncoderNew, self).__init__()
self.attention = Multi_Head_Attention(dim_model, num_head, dropout)
self.feed_forward = Position_wise_Feed_Forward(dim_model, hidden,
dropout)
def forward(self, input_0):
primals_1 = self.attention.fc_Q.weight
primals_3 = self.attention.fc_Q.bias
primals_2 = self.attention.fc_K.weight
primals_5 = self.attention.fc_K.bias
primals_4 = self.attention.fc_V.weight
primals_7 = self.attention.fc_V.bias
primals_6 = self.attention.fc.weight
primals_9 = self.attention.fc.bias
primals_10 = self.attention.layer_norm.weight
primals_11 = self.attention.layer_norm.bias
primals_8 = self.feed_forward.fc1.weight
primals_13 = self.feed_forward.fc1.bias
primals_12 = self.feed_forward.fc2.weight
primals_15 = self.feed_forward.fc2.bias
primals_16 = self.feed_forward.layer_norm.weight
primals_17 = self.feed_forward.layer_norm.bias
primals_14 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17])
return output[0]
|
tianjiansmile/Chinese-Text-Classification-Pytorch
|
Encoder
| false
| 10,914
|
[
"MIT"
] | 0
|
05cc211b161f61e6bb32ab185dadcffec2f5b5de
|
https://github.com/tianjiansmile/Chinese-Text-Classification-Pytorch/tree/05cc211b161f61e6bb32ab185dadcffec2f5b5de
|
ShuffleCatAlt
|
import torch
import torch.nn as nn
class ShuffleCatAlt(nn.Module):
def forward(self, a, b):
assert a.size() == b.size()
n, c, h, w = a.size()
x = torch.zeros(n, c * 2, h, w, dtype=a.dtype, device=a.device)
x[:, ::2] = a
x[:, 1::2] = b
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_copy_zeros_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 8
x0 = xindex % 16
x2 = xindex // 128
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 1, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = (-1 + x1) % 2
tmp4 = tl.full([1], 0, tl.int64)
tmp5 = tmp3 == tmp4
tmp6 = tmp2 & tmp5
tmp7 = tl.load(in_ptr0 + (x0 + 16 * triton_helpers.div_floor_integer(-1 +
x1, 2) + 64 * x2), tmp6 & xmask, other=0.0)
tmp8 = x3 // 16 % 8 % 2
tmp9 = tmp8 == tmp4
tmp10 = tl.load(in_ptr1 + (x0 + 16 * (x1 // 2) + 64 * x2), tmp9 & xmask,
other=0.0)
tmp11 = 0.0
tmp12 = tl.where(tmp9, tmp10, tmp11)
tmp13 = tl.where(tmp6, tmp7, tmp12)
tl.store(out_ptr0 + x3, tmp13, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_copy_zeros_0[grid(512)](arg1_1, arg0_1, buf0, 512,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class ShuffleCatAltNew(nn.Module):
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
tony23545/yolact_edge
|
ShuffleCatAlt
| false
| 10,915
|
[
"MIT"
] | 0
|
11840512ab46f22dce6aea37a7823110175adffa
|
https://github.com/tony23545/yolact_edge/tree/11840512ab46f22dce6aea37a7823110175adffa
|
ShuffleCatChunk
|
import torch
import torch.nn as nn
class ShuffleCatChunk(nn.Module):
def forward(self, a, b):
assert a.size() == b.size()
_n, c, _h, _w = a.size()
a = torch.chunk(a, chunks=c, dim=1)
b = torch.chunk(b, chunks=c, dim=1)
x = [None] * (c * 2)
x[::2] = a
x[1::2] = b
x = torch.cat(x, dim=1)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 8
x0 = xindex % 16
x2 = xindex // 128
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + 64 * x2), tmp9 & xmask, eviction_policy
='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tmp17 = tl.full([1], 4, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tmp16 & tmp18
tmp20 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), tmp19 & xmask,
eviction_policy='evict_last', other=0.0)
tmp21 = tmp0 >= tmp17
tmp22 = tl.full([1], 5, tl.int64)
tmp23 = tmp0 < tmp22
tmp24 = tmp21 & tmp23
tmp25 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp24 & xmask,
eviction_policy='evict_last', other=0.0)
tmp26 = tmp0 >= tmp22
tmp27 = tl.full([1], 6, tl.int64)
tmp28 = tmp0 < tmp27
tmp29 = tmp26 & tmp28
tmp30 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), tmp29 & xmask,
eviction_policy='evict_last', other=0.0)
tmp31 = tmp0 >= tmp27
tmp32 = tl.full([1], 7, tl.int64)
tmp33 = tmp0 < tmp32
tmp34 = tmp31 & tmp33
tmp35 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp34 & xmask,
eviction_policy='evict_last', other=0.0)
tmp36 = tmp0 >= tmp32
tl.full([1], 8, tl.int64)
tmp39 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), tmp36 & xmask,
eviction_policy='evict_last', other=0.0)
tmp40 = tl.where(tmp34, tmp35, tmp39)
tmp41 = tl.where(tmp29, tmp30, tmp40)
tmp42 = tl.where(tmp24, tmp25, tmp41)
tmp43 = tl.where(tmp19, tmp20, tmp42)
tmp44 = tl.where(tmp14, tmp15, tmp43)
tmp45 = tl.where(tmp9, tmp10, tmp44)
tmp46 = tl.where(tmp4, tmp5, tmp45)
tl.store(out_ptr0 + x3, tmp46, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](arg0_1, arg1_1, buf0, 512, XBLOCK
=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class ShuffleCatChunkNew(nn.Module):
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
tony23545/yolact_edge
|
ShuffleCatChunk
| false
| 10,916
|
[
"MIT"
] | 0
|
11840512ab46f22dce6aea37a7823110175adffa
|
https://github.com/tony23545/yolact_edge/tree/11840512ab46f22dce6aea37a7823110175adffa
|
MuLawDecoding
|
import torch
from torch import Tensor
import torchaudio.functional as F
class MuLawDecoding(torch.nn.Module):
"""Decode mu-law encoded signal. For more info see the
`Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_
This expects an input with values between 0 and quantization_channels - 1
and returns a signal scaled between -1 and 1.
Args:
quantization_channels (int, optional): Number of channels. (Default: ``256``)
"""
__constants__ = ['quantization_channels']
def __init__(self, quantization_channels: 'int'=256) ->None:
super(MuLawDecoding, self).__init__()
self.quantization_channels = quantization_channels
def forward(self, x_mu: 'Tensor') ->Tensor:
"""
Args:
x_mu (Tensor): A mu-law encoded signal which needs to be decoded.
Returns:
Tensor: The signal decoded.
"""
return F.mu_law_decoding(x_mu, self.quantization_channels)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_abs_div_exp_lift_fresh_log1p_mul_sign_sub_0(in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.00392156862745098
tmp2 = tmp0 * tmp1
tmp3 = 2.0
tmp4 = tmp2 * tmp3
tmp5 = 1.0
tmp6 = tmp4 - tmp5
tmp7 = tl.full([1], 0, tl.int32)
tmp8 = tmp7 < tmp6
tmp9 = tmp8.to(tl.int8)
tmp10 = tmp6 < tmp7
tmp11 = tmp10.to(tl.int8)
tmp12 = tmp9 - tmp11
tmp13 = tmp12.to(tmp6.dtype)
tmp14 = tl_math.abs(tmp6)
tmp15 = 5.545177459716797
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp17 - tmp5
tmp19 = tmp13 * tmp18
tmp20 = tmp19 * tmp1
tl.store(out_ptr0 + x0, tmp20, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_abs_div_exp_lift_fresh_log1p_mul_sign_sub_0[grid(256)
](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class MuLawDecodingNew(torch.nn.Module):
"""Decode mu-law encoded signal. For more info see the
`Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_
This expects an input with values between 0 and quantization_channels - 1
and returns a signal scaled between -1 and 1.
Args:
quantization_channels (int, optional): Number of channels. (Default: ``256``)
"""
__constants__ = ['quantization_channels']
def __init__(self, quantization_channels: 'int'=256) ->None:
super(MuLawDecodingNew, self).__init__()
self.quantization_channels = quantization_channels
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
tbright17/audio
|
MuLawDecoding
| false
| 10,917
|
[
"BSD-2-Clause"
] | 0
|
00d38203e401b8d9472a8f8394a10e2c309be02c
|
https://github.com/tbright17/audio/tree/00d38203e401b8d9472a8f8394a10e2c309be02c
|
TransposedUpsample
|
import torch
import torch.nn as nn
class TransposedUpsample(nn.Module):
"""Learned 2x upsampling without padding"""
def __init__(self, channels, out_channels=None, ks=5):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.up = nn.ConvTranspose2d(self.channels, self.out_channels,
kernel_size=ks, stride=2)
def forward(self, x):
return self.up(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 1936
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 121 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 5, 5), (100, 25, 5, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2,
2), padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 11, 11), (484, 121, 11, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(1936)](buf1, primals_2, 1936,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
return buf1, primals_1, primals_3
class TransposedUpsampleNew(nn.Module):
"""Learned 2x upsampling without padding"""
def __init__(self, channels, out_channels=None, ks=5):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.up = nn.ConvTranspose2d(self.channels, self.out_channels,
kernel_size=ks, stride=2)
def forward(self, input_0):
primals_1 = self.up.weight
primals_2 = self.up.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
transat/latent-diffusion
|
TransposedUpsample
| false
| 10,918
|
[
"MIT"
] | 0
|
1ea0d5bb3fb0fe3f7e8c42cbae91423780977f83
|
https://github.com/transat/latent-diffusion/tree/1ea0d5bb3fb0fe3f7e8c42cbae91423780977f83
|
MuLawEncoding
|
import torch
from torch import Tensor
import torchaudio.functional as F
class MuLawEncoding(torch.nn.Module):
"""Encode signal based on mu-law companding. For more info see the
`Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_
This algorithm assumes the signal has been scaled to between -1 and 1 and
returns a signal encoded with values from 0 to quantization_channels - 1
Args:
quantization_channels (int, optional): Number of channels. (Default: ``256``)
"""
__constants__ = ['quantization_channels']
def __init__(self, quantization_channels: 'int'=256) ->None:
super(MuLawEncoding, self).__init__()
self.quantization_channels = quantization_channels
def forward(self, x: 'Tensor') ->Tensor:
"""
Args:
x (Tensor): A signal to be encoded.
Returns:
x_mu (Tensor): An encoded signal.
"""
return F.mu_law_encoding(x, self.quantization_channels)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy_abs_add_div_lift_fresh_log1p_mul_sign_0(in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp1 < tmp0
tmp3 = tmp2.to(tl.int8)
tmp4 = tmp0 < tmp1
tmp5 = tmp4.to(tl.int8)
tmp6 = tmp3 - tmp5
tmp7 = tmp6.to(tmp0.dtype)
tmp8 = tl_math.abs(tmp0)
tmp9 = 255.0
tmp10 = tmp9 * tmp8
tmp11 = libdevice.log1p(tmp10)
tmp12 = tmp7 * tmp11
tmp13 = 0.18033687961558437
tmp14 = tmp12 * tmp13
tmp15 = 1.0
tmp16 = tmp14 + tmp15
tmp17 = 0.5
tmp18 = tmp16 * tmp17
tmp19 = tmp18 * tmp9
tmp20 = tmp19 + tmp17
tmp21 = tmp20.to(tl.int64)
tl.store(out_ptr0 + x0, tmp21, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int64)
get_raw_stream(0)
triton_poi_fused__to_copy_abs_add_div_lift_fresh_log1p_mul_sign_0[grid
(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class MuLawEncodingNew(torch.nn.Module):
"""Encode signal based on mu-law companding. For more info see the
`Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_
This algorithm assumes the signal has been scaled to between -1 and 1 and
returns a signal encoded with values from 0 to quantization_channels - 1
Args:
quantization_channels (int, optional): Number of channels. (Default: ``256``)
"""
__constants__ = ['quantization_channels']
def __init__(self, quantization_channels: 'int'=256) ->None:
super(MuLawEncodingNew, self).__init__()
self.quantization_channels = quantization_channels
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
tbright17/audio
|
MuLawEncoding
| false
| 10,919
|
[
"BSD-2-Clause"
] | 0
|
00d38203e401b8d9472a8f8394a10e2c309be02c
|
https://github.com/tbright17/audio/tree/00d38203e401b8d9472a8f8394a10e2c309be02c
|
SlidingWindowCmn
|
import torch
from torch import Tensor
import torchaudio.functional as F
class SlidingWindowCmn(torch.nn.Module):
"""
Apply sliding-window cepstral mean (and optionally variance) normalization per utterance.
Args:
cmn_window (int, optional): Window in frames for running average CMN computation (int, default = 600)
min_cmn_window (int, optional): Minimum CMN window used at start of decoding (adds latency only at start).
Only applicable if center == false, ignored if center==true (int, default = 100)
center (bool, optional): If true, use a window centered on the current frame
(to the extent possible, modulo end effects). If false, window is to the left. (bool, default = false)
norm_vars (bool, optional): If true, normalize variance to one. (bool, default = false)
"""
def __init__(self, cmn_window: 'int'=600, min_cmn_window: 'int'=100,
center: 'bool'=False, norm_vars: 'bool'=False) ->None:
super().__init__()
self.cmn_window = cmn_window
self.min_cmn_window = min_cmn_window
self.center = center
self.norm_vars = norm_vars
def forward(self, waveform: 'Tensor') ->Tensor:
"""
Args:
waveform (Tensor): Tensor of audio of dimension (..., time).
Returns:
Tensor: Tensor of audio of dimension (..., time).
"""
cmn_waveform = F.sliding_window_cmn(waveform, self.cmn_window, self
.min_cmn_window, self.center, self.norm_vars)
return cmn_waveform
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_copy_div_sub_zeros_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex
tmp3 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp0 = x1
tmp1 = tl.full([1], 3, tl.int32)
tmp2 = tmp0 == tmp1
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = tmp8 + tmp3
tmp10 = 0.25
tmp11 = tmp9 * tmp10
tmp12 = tmp3 - tmp11
tmp13 = tl.full([1], 2, tl.int32)
tmp14 = tmp0 == tmp13
tmp15 = tmp7 - tmp11
tmp16 = tl.full([1], 1, tl.int32)
tmp17 = tmp0 == tmp16
tmp18 = tmp5 - tmp11
tmp19 = tl.full([1], 0, tl.int32)
tmp20 = tmp0 == tmp19
tmp21 = tmp4 - tmp11
tmp22 = 0.0
tmp23 = tl.where(tmp20, tmp21, tmp22)
tmp24 = tl.where(tmp17, tmp18, tmp23)
tmp25 = tl.where(tmp14, tmp15, tmp24)
tmp26 = tl.where(tmp2, tmp12, tmp25)
tl.store(out_ptr0 + x3, tmp26, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_copy_div_sub_zeros_0[grid(256)](arg0_1, buf0,
256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0),
class SlidingWindowCmnNew(torch.nn.Module):
"""
Apply sliding-window cepstral mean (and optionally variance) normalization per utterance.
Args:
cmn_window (int, optional): Window in frames for running average CMN computation (int, default = 600)
min_cmn_window (int, optional): Minimum CMN window used at start of decoding (adds latency only at start).
Only applicable if center == false, ignored if center==true (int, default = 100)
center (bool, optional): If true, use a window centered on the current frame
(to the extent possible, modulo end effects). If false, window is to the left. (bool, default = false)
norm_vars (bool, optional): If true, normalize variance to one. (bool, default = false)
"""
def __init__(self, cmn_window: 'int'=600, min_cmn_window: 'int'=100,
center: 'bool'=False, norm_vars: 'bool'=False) ->None:
super().__init__()
self.cmn_window = cmn_window
self.min_cmn_window = min_cmn_window
self.center = center
self.norm_vars = norm_vars
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
tbright17/audio
|
SlidingWindowCmn
| false
| 10,920
|
[
"BSD-2-Clause"
] | 0
|
00d38203e401b8d9472a8f8394a10e2c309be02c
|
https://github.com/tbright17/audio/tree/00d38203e401b8d9472a8f8394a10e2c309be02c
|
ShuffleCat
|
import torch
import torch.nn as nn
class ShuffleCat(nn.Module):
def forward(self, a, b):
assert a.size() == b.size()
n, c, h, w = a.size()
a = a.permute(0, 2, 3, 1).contiguous().view(-1, c)
b = b.permute(0, 2, 3, 1).contiguous().view(-1, c)
x = torch.cat((a, b), dim=0).transpose(1, 0).contiguous()
x = x.view(c * 2, n, h, w).permute(1, 0, 2, 3)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 128
x1 = xindex // 128
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (16 * x1 + 64 * (x0 // 16 % 4) + x0 % 16),
tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 128, tl.int64)
tmp9 = tl.load(in_ptr1 + (16 * x1 + 64 * ((-64 + x0) // 16 % 4) + (-64 +
x0) % 16), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(512)](arg0_1, arg1_1, buf0, 512,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return reinterpret_tensor(buf0, (4, 8, 4, 4), (16, 64, 4, 1), 0),
class ShuffleCatNew(nn.Module):
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
tony23545/yolact_edge
|
ShuffleCat
| false
| 10,921
|
[
"MIT"
] | 0
|
11840512ab46f22dce6aea37a7823110175adffa
|
https://github.com/tony23545/yolact_edge/tree/11840512ab46f22dce6aea37a7823110175adffa
|
AmplitudeToDB
|
import math
import torch
from torch import Tensor
import torchaudio.functional as F
from typing import Optional
class AmplitudeToDB(torch.nn.Module):
"""Turn a tensor from the power/amplitude scale to the decibel scale.
This output depends on the maximum value in the input tensor, and so
may return different values for an audio clip split into snippets vs. a
a full clip.
Args:
stype (str, optional): scale of input tensor ('power' or 'magnitude'). The
power being the elementwise square of the magnitude. (Default: ``'power'``)
top_db (float, optional): minimum negative cut-off in decibels. A reasonable number
is 80. (Default: ``None``)
"""
__constants__ = ['multiplier', 'amin', 'ref_value', 'db_multiplier']
def __init__(self, stype: 'str'='power', top_db: 'Optional[float]'=None
) ->None:
super(AmplitudeToDB, self).__init__()
self.stype = stype
if top_db is not None and top_db < 0:
raise ValueError('top_db must be positive value')
self.top_db = top_db
self.multiplier = 10.0 if stype == 'power' else 20.0
self.amin = 1e-10
self.ref_value = 1.0
self.db_multiplier = math.log10(max(self.amin, self.ref_value))
def forward(self, x: 'Tensor') ->Tensor:
"""Numerically stable implementation from Librosa.
https://librosa.github.io/librosa/_modules/librosa/core/spectrum.html
Args:
x (Tensor): Input tensor before being converted to decibel scale.
Returns:
Tensor: Output tensor in decibel scale.
"""
return F.amplitude_to_DB(x, self.multiplier, self.amin, self.
db_multiplier, self.top_db)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import math
from typing import Optional
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clamp_log10_mul_sub_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1e-10
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = libdevice.log10(tmp2)
tmp4 = 10.0
tmp5 = tmp3 * tmp4
tmp6 = 0.0
tmp7 = tmp5 - tmp6
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_log10_mul_sub_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class AmplitudeToDBNew(torch.nn.Module):
"""Turn a tensor from the power/amplitude scale to the decibel scale.
This output depends on the maximum value in the input tensor, and so
may return different values for an audio clip split into snippets vs. a
a full clip.
Args:
stype (str, optional): scale of input tensor ('power' or 'magnitude'). The
power being the elementwise square of the magnitude. (Default: ``'power'``)
top_db (float, optional): minimum negative cut-off in decibels. A reasonable number
is 80. (Default: ``None``)
"""
__constants__ = ['multiplier', 'amin', 'ref_value', 'db_multiplier']
def __init__(self, stype: 'str'='power', top_db: 'Optional[float]'=None
) ->None:
super(AmplitudeToDBNew, self).__init__()
self.stype = stype
if top_db is not None and top_db < 0:
raise ValueError('top_db must be positive value')
self.top_db = top_db
self.multiplier = 10.0 if stype == 'power' else 20.0
self.amin = 1e-10
self.ref_value = 1.0
self.db_multiplier = math.log10(max(self.amin, self.ref_value))
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
tbright17/audio
|
AmplitudeToDB
| false
| 10,922
|
[
"BSD-2-Clause"
] | 0
|
00d38203e401b8d9472a8f8394a10e2c309be02c
|
https://github.com/tbright17/audio/tree/00d38203e401b8d9472a8f8394a10e2c309be02c
|
SpatialRescaler
|
import torch
from functools import partial
import torch.nn as nn
class SpatialRescaler(nn.Module):
def __init__(self, n_stages=1, method='bilinear', multiplier=0.5,
in_channels=3, out_channels=None, bias=False):
super().__init__()
self.n_stages = n_stages
assert self.n_stages >= 0
assert method in ['nearest', 'linear', 'bilinear', 'trilinear',
'bicubic', 'area']
self.multiplier = multiplier
self.interpolator = partial(torch.nn.functional.interpolate, mode=
method)
self.remap_output = out_channels is not None
if self.remap_output:
None
self.channel_mapper = nn.Conv2d(in_channels, out_channels, 1,
bias=bias)
def forward(self, x):
for stage in range(self.n_stages):
x = self.interpolator(x, scale_factor=self.multiplier)
if self.remap_output:
x = self.channel_mapper(x)
return x
def encode(self, x):
return self(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from functools import partial
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(
in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 2 % 2
x0 = xindex % 2
x2 = xindex // 4
x3 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 2.0
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tl.full([1], 1, tl.int64)
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tmp14 = x0
tmp15 = tmp14.to(tl.float32)
tmp16 = tmp15 + tmp2
tmp17 = tmp16 * tmp4
tmp18 = tmp17 - tmp2
tmp19 = triton_helpers.maximum(tmp18, tmp7)
tmp20 = tmp19.to(tl.int32)
tmp21 = tmp20 + tmp10
tmp22 = triton_helpers.minimum(tmp21, tmp12)
tmp23 = tl.load(in_ptr0 + (tmp22 + 4 * tmp13 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp24 = tl.load(in_ptr0 + (tmp20 + 4 * tmp13 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp25 = tmp23 - tmp24
tmp26 = tmp20.to(tl.float32)
tmp27 = tmp19 - tmp26
tmp28 = triton_helpers.maximum(tmp27, tmp7)
tmp29 = 1.0
tmp30 = triton_helpers.minimum(tmp28, tmp29)
tmp31 = tmp25 * tmp30
tmp32 = tl.load(in_ptr0 + (tmp20 + 4 * tmp9 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp33 = tl.load(in_ptr0 + (tmp22 + 4 * tmp9 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp34 = tmp33 - tmp32
tmp35 = tmp34 * tmp30
tmp36 = tmp32 + tmp35
tmp37 = tmp24 + tmp31
tmp38 = tmp37 - tmp36
tmp39 = tmp9.to(tl.float32)
tmp40 = tmp8 - tmp39
tmp41 = triton_helpers.maximum(tmp40, tmp7)
tmp42 = triton_helpers.minimum(tmp41, tmp29)
tmp43 = tmp38 * tmp42
tmp44 = tmp36 + tmp43
tl.store(in_out_ptr0 + x3, tmp44, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
buf2 = buf0
del buf0
buf3 = buf2
del buf2
get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0[grid
(64)](buf3, arg0_1, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
return buf3,
class SpatialRescalerNew(nn.Module):
def __init__(self, n_stages=1, method='bilinear', multiplier=0.5,
in_channels=3, out_channels=None, bias=False):
super().__init__()
self.n_stages = n_stages
assert self.n_stages >= 0
assert method in ['nearest', 'linear', 'bilinear', 'trilinear',
'bicubic', 'area']
self.multiplier = multiplier
self.interpolator = partial(torch.nn.functional.interpolate, mode=
method)
self.remap_output = out_channels is not None
if self.remap_output:
None
self.channel_mapper = nn.Conv2d(in_channels, out_channels, 1,
bias=bias)
def encode(self, x):
return self(x)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
transat/latent-diffusion
|
SpatialRescaler
| false
| 10,923
|
[
"MIT"
] | 0
|
1ea0d5bb3fb0fe3f7e8c42cbae91423780977f83
|
https://github.com/transat/latent-diffusion/tree/1ea0d5bb3fb0fe3f7e8c42cbae91423780977f83
|
hsigmoid
|
import torch
import torch.onnx
import torch
import torch.nn as nn
import torch.nn.functional as F
class hsigmoid(nn.Module):
def forward(self, x):
out = F.relu6(x + 3, inplace=True) / 6
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.onnx
import torch
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_hardtanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 3.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = 0.16666666666666666
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_hardtanh_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class hsigmoidNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
tomy-0000/pytorch-ssd
|
hsigmoid
| false
| 10,924
|
[
"MIT"
] | 0
|
620c0020bbd418001d10263559406bb464139419
|
https://github.com/tomy-0000/pytorch-ssd/tree/620c0020bbd418001d10263559406bb464139419
|
BiaffineAttention
|
import torch
import torch.nn as nn
class BiaffineAttention(nn.Module):
def __init__(self, in_features, out_features):
super(BiaffineAttention, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.bilinear = torch.nn.Bilinear(in_features, in_features,
out_features, bias=False)
self.linear = torch.nn.Linear(2 * in_features, out_features, bias=True)
self.reset_parameters()
def forward(self, x1, x2):
return self.bilinear(x1, x2) + self.linear(torch.cat((x1, x2), dim=-1))
def reset_parameters(self):
self.bilinear.reset_parameters()
self.linear.reset_parameters()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 8), (8, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = torch.ops.aten._trilinear.default(reinterpret_tensor(
primals_3, (64, 4), (4, 1), 0), primals_1, reinterpret_tensor(
primals_2, (64, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3])
del primals_1
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_3, primals_2, buf2, 512,
XBLOCK=256, num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (64, 8), (8, 1), 0),
reinterpret_tensor(primals_4, (8, 4), (1, 8), 0), out=buf3)
del primals_4
buf4 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
triton_poi_fused_add_1[grid(256)](buf4, buf3, primals_5, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del buf3
del primals_5
return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0
), reinterpret_tensor(buf2, (64, 8), (8, 1), 0)
class BiaffineAttentionNew(nn.Module):
def __init__(self, in_features, out_features):
super(BiaffineAttentionNew, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.bilinear = torch.nn.Bilinear(in_features, in_features,
out_features, bias=False)
self.linear = torch.nn.Linear(2 * in_features, out_features, bias=True)
self.reset_parameters()
def reset_parameters(self):
self.bilinear.reset_parameters()
self.linear.reset_parameters()
def forward(self, input_0, input_1):
primals_1 = self.bilinear.weight
primals_4 = self.linear.weight
primals_5 = self.linear.bias
primals_2 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
vietbt/ViTextnormASR
|
BiaffineAttention
| false
| 10,925
|
[
"Apache-2.0"
] | 0
|
57444aa7247c67b2628d1802e9ed53dae4857ee4
|
https://github.com/vietbt/ViTextnormASR/tree/57444aa7247c67b2628d1802e9ed53dae4857ee4
|
GEGLU
|
import torch
import torch.nn.functional as F
import torch.nn as nn
class GEGLU(nn.Module):
def __init__(self, dim_in, dim_out):
super().__init__()
self.proj = nn.Linear(dim_in, dim_out * 2)
def forward(self, x):
x, gate = self.proj(x).chunk(2, dim=-1)
return x * F.gelu(gate)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim_in': 4, 'dim_out': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_gelu_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 8 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + 8 * x1), xmask)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = 0.7071067811865476
tmp5 = tmp1 * tmp4
tmp6 = libdevice.erf(tmp5)
tmp7 = 1.0
tmp8 = tmp6 + tmp7
tmp9 = tmp3 * tmp8
tmp10 = tmp0 * tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (8, 4), (4, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 8), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_gelu_mul_0[grid(256)](buf0, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf0, (4, 4, 4, 4), (128, 32, 8, 1), 0
), reinterpret_tensor(buf0, (4, 4, 4, 4), (128, 32, 8, 1), 4)
class GEGLUNew(nn.Module):
def __init__(self, dim_in, dim_out):
super().__init__()
self.proj = nn.Linear(dim_in, dim_out * 2)
def forward(self, input_0):
primals_1 = self.proj.weight
primals_2 = self.proj.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
transat/latent-diffusion
|
GEGLU
| false
| 10,926
|
[
"MIT"
] | 0
|
1ea0d5bb3fb0fe3f7e8c42cbae91423780977f83
|
https://github.com/transat/latent-diffusion/tree/1ea0d5bb3fb0fe3f7e8c42cbae91423780977f83
|
Vol
|
import math
import torch
from torch import Tensor
import torchaudio.functional as F
class Vol(torch.nn.Module):
"""Add a volume to an waveform.
Args:
gain (float): Interpreted according to the given gain_type:
If ``gain_type`` = ``amplitude``, ``gain`` is a positive amplitude ratio.
If ``gain_type`` = ``power``, ``gain`` is a power (voltage squared).
If ``gain_type`` = ``db``, ``gain`` is in decibels.
gain_type (str, optional): Type of gain. One of: ``amplitude``, ``power``, ``db`` (Default: ``amplitude``)
"""
def __init__(self, gain: 'float', gain_type: 'str'='amplitude'):
super(Vol, self).__init__()
self.gain = gain
self.gain_type = gain_type
if gain_type in ['amplitude', 'power'] and gain < 0:
raise ValueError(
'If gain_type = amplitude or power, gain must be positive.')
def forward(self, waveform: 'Tensor') ->Tensor:
"""
Args:
waveform (Tensor): Tensor of audio of dimension (..., time).
Returns:
Tensor: Tensor of audio of dimension (..., time).
"""
if self.gain_type == 'amplitude':
waveform = waveform * self.gain
if self.gain_type == 'db':
waveform = F.gain(waveform, self.gain)
if self.gain_type == 'power':
waveform = F.gain(waveform, 10 * math.log10(self.gain))
return torch.clamp(waveform, -1, 1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'gain': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clamp_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 4.0
tmp2 = tmp0 * tmp1
tmp3 = -1.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 1.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class VolNew(torch.nn.Module):
"""Add a volume to an waveform.
Args:
gain (float): Interpreted according to the given gain_type:
If ``gain_type`` = ``amplitude``, ``gain`` is a positive amplitude ratio.
If ``gain_type`` = ``power``, ``gain`` is a power (voltage squared).
If ``gain_type`` = ``db``, ``gain`` is in decibels.
gain_type (str, optional): Type of gain. One of: ``amplitude``, ``power``, ``db`` (Default: ``amplitude``)
"""
def __init__(self, gain: 'float', gain_type: 'str'='amplitude'):
super(VolNew, self).__init__()
self.gain = gain
self.gain_type = gain_type
if gain_type in ['amplitude', 'power'] and gain < 0:
raise ValueError(
'If gain_type = amplitude or power, gain must be positive.')
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
tbright17/audio
|
Vol
| false
| 10,927
|
[
"BSD-2-Clause"
] | 0
|
00d38203e401b8d9472a8f8394a10e2c309be02c
|
https://github.com/tbright17/audio/tree/00d38203e401b8d9472a8f8394a10e2c309be02c
|
ImageGradients
|
import torch
import torch as th
import torch.utils.data
class ImageGradients(th.nn.Module):
def __init__(self, c_in):
super(ImageGradients, self).__init__()
self.dx = th.nn.Conv2d(c_in, c_in, [3, 3], padding=1, bias=False,
groups=c_in)
self.dy = th.nn.Conv2d(c_in, c_in, [3, 3], padding=1, bias=False,
groups=c_in)
self.dx.weight.requires_grad = False
self.dy.weight.requires_grad = False
self.dx.weight.data.zero_()
self.dx.weight.data[:, :, 0, 0] = -1
self.dx.weight.data[:, :, 0, 2] = 1
self.dx.weight.data[:, :, 1, 0] = -2
self.dx.weight.data[:, :, 1, 2] = 2
self.dx.weight.data[:, :, 2, 0] = -1
self.dx.weight.data[:, :, 2, 2] = 1
self.dy.weight.data.zero_()
self.dy.weight.data[:, :, 0, 0] = -1
self.dy.weight.data[:, :, 2, 0] = 1
self.dy.weight.data[:, :, 0, 1] = -2
self.dy.weight.data[:, :, 2, 1] = 2
self.dy.weight.data[:, :, 0, 2] = -1
self.dy.weight.data[:, :, 2, 2] = 1
def forward(self, im):
return th.cat([self.dx(im), self.dy(im)], 1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'c_in': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch as th
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, out_ptr1, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask)
tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask)
tl.store(out_ptr1 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 8
x0 = xindex % 16
x2 = xindex // 128
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x0 + 64 * x2 + x1), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x0 + 64 * x2 + (-4 + x1)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x3, tmp10, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 1, 3, 3), (9, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16, 16)](arg1_1, buf0, buf2, 16,
16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del arg1_1
buf1 = extern_kernels.convolution(buf0, arg0_1, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 1, 16, 4))
del arg0_1
del buf0
buf3 = extern_kernels.convolution(buf2, arg2_1, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 1, 16, 4))
del arg2_1
del buf2
buf4 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
triton_poi_fused_cat_1[grid(512)](buf1, buf3, buf4, 512, XBLOCK=256,
num_warps=4, num_stages=1)
del buf1
del buf3
return buf4,
class ImageGradientsNew(th.nn.Module):
def __init__(self, c_in):
super(ImageGradientsNew, self).__init__()
self.dx = th.nn.Conv2d(c_in, c_in, [3, 3], padding=1, bias=False,
groups=c_in)
self.dy = th.nn.Conv2d(c_in, c_in, [3, 3], padding=1, bias=False,
groups=c_in)
self.dx.weight.requires_grad = False
self.dy.weight.requires_grad = False
self.dx.weight.data.zero_()
self.dx.weight.data[:, :, 0, 0] = -1
self.dx.weight.data[:, :, 0, 2] = 1
self.dx.weight.data[:, :, 1, 0] = -2
self.dx.weight.data[:, :, 1, 2] = 2
self.dx.weight.data[:, :, 2, 0] = -1
self.dx.weight.data[:, :, 2, 2] = 1
self.dy.weight.data.zero_()
self.dy.weight.data[:, :, 0, 0] = -1
self.dy.weight.data[:, :, 2, 0] = 1
self.dy.weight.data[:, :, 0, 1] = -2
self.dy.weight.data[:, :, 2, 1] = 2
self.dy.weight.data[:, :, 0, 2] = -1
self.dy.weight.data[:, :, 2, 2] = 1
def forward(self, input_0):
arg0_1 = self.dx.weight
arg2_1 = self.dy.weight
arg1_1 = input_0
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
sutkarsh/ttools
|
ImageGradients
| false
| 10,928
|
[
"MIT"
] | 0
|
a2e5fbf308566c0c54ab9d6ad1d9f8bc63f8fe99
|
https://github.com/sutkarsh/ttools/tree/a2e5fbf308566c0c54ab9d6ad1d9f8bc63f8fe99
|
Sparsemax
|
import torch
import torch.utils.data
import torch.nn as nn
class Sparsemax(nn.Module):
"""Sparsemax function."""
def __init__(self, dim=None):
"""Initialize sparsemax activation
Args:
dim (int, optional): The dimension over which to apply the sparsemax function.
"""
super(Sparsemax, self).__init__()
self.dim = -1 if dim is None else dim
def forward(self, input):
"""Forward function.
Args:
input (torch.Tensor): Input tensor. First dimension should be the batch size
Returns:
torch.Tensor: [batch_size x number_of_logits] Output tensor
"""
original_size = input.size()
input = input.view(-1, input.size(self.dim))
dim = 1
number_of_logits = input.size(dim)
input = input - torch.max(input, dim=dim, keepdim=True)[0].expand_as(
input)
zs = torch.sort(input=input, dim=dim, descending=True)[0]
range = torch.arange(start=1, end=number_of_logits + 1, device=
input.device).view(1, -1)
range = range.expand_as(zs).type(input.type())
bound = 1 + range * zs
cumulative_sum_zs = torch.cumsum(zs, dim)
is_gt = torch.gt(bound, cumulative_sum_zs).type(input.type())
k = torch.max(is_gt * range, dim, keepdim=True)[0]
zs_sparse = is_gt * zs
taus = (torch.sum(zs_sparse, dim, keepdim=True) - 1) / k
taus = taus.expand_as(input)
self.output = torch.max(torch.zeros_like(input), input - taus)
output = self.output.view(original_size)
return output
def backward(self, grad_output):
"""Backward function."""
dim = 1
nonzeros = torch.ne(self.output, 0)
sum = torch.sum(grad_output * nonzeros, dim=dim) / torch.sum(nonzeros,
dim=dim)
self.grad_input = nonzeros * (grad_output - sum.expand_as(grad_output))
return self.grad_input
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def _triton_helper_fn_add0(arg0_0, arg1_0):
tmp0 = arg0_0 + arg1_0
return tmp0
@triton.jit
def triton_per_fused_cumsum_sort_sub_0(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = r1
tmp10 = tmp9.to(tl.int16)
tmp11 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp12 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13, _tmp14 = triton_helpers.sort_with_index(tmp11, tmp12, None, 1,
stable=False, descending=True)
tmp15 = tmp13.to(tl.float32)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp17, = tl.associative_scan((tmp16,), 1, _triton_helper_fn_add0)
tl.store(out_ptr0 + (r1 + 4 * x0), tmp8, xmask)
tl.store(out_ptr1 + (r1 + 4 * x0), tmp13, xmask)
tl.store(out_ptr2 + (r1 + 4 * x0), tmp17, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_gt_max_mul_sum_1(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp26 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp30 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp1 * tmp0
tmp3 = tmp2 + tmp1
tmp5 = tmp3 > tmp4
tmp6 = tmp5.to(tl.float32)
tmp7 = tmp6 * tmp1
tmp9 = 2.0
tmp10 = tmp9 * tmp8
tmp11 = tmp10 + tmp1
tmp13 = tmp11 > tmp12
tmp14 = tmp13.to(tl.float32)
tmp15 = tmp14 * tmp9
tmp16 = triton_helpers.maximum(tmp7, tmp15)
tmp18 = 3.0
tmp19 = tmp18 * tmp17
tmp20 = tmp19 + tmp1
tmp22 = tmp20 > tmp21
tmp23 = tmp22.to(tl.float32)
tmp24 = tmp23 * tmp18
tmp25 = triton_helpers.maximum(tmp16, tmp24)
tmp27 = 4.0
tmp28 = tmp27 * tmp26
tmp29 = tmp28 + tmp1
tmp31 = tmp29 > tmp30
tmp32 = tmp31.to(tl.float32)
tmp33 = tmp32 * tmp27
tmp34 = triton_helpers.maximum(tmp25, tmp33)
tmp35 = tmp6 * tmp0
tmp36 = tmp14 * tmp8
tmp37 = tmp35 + tmp36
tmp38 = tmp23 * tmp17
tmp39 = tmp37 + tmp38
tmp40 = tmp32 * tmp26
tmp41 = tmp39 + tmp40
tl.store(out_ptr0 + x0, tmp34, xmask)
tl.store(out_ptr1 + x0, tmp41, xmask)
@triton.jit
def triton_poi_fused_maximum_sub_zeros_like_2(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = 1.0
tmp3 = tmp1 - tmp2
tmp5 = tmp3 / tmp4
tmp6 = tmp0 - tmp5
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_cumsum_sort_sub_0[grid(64)](arg0_1, buf0, buf1,
buf3, 64, 4, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
buf4 = empty_strided_cuda((64, 1), (1, 64), torch.float32)
buf5 = empty_strided_cuda((64, 1), (1, 64), torch.float32)
triton_poi_fused__to_copy_add_gt_max_mul_sum_1[grid(64)](buf1, buf3,
buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf1
buf6 = buf3
del buf3
triton_poi_fused_maximum_sub_zeros_like_2[grid(256)](buf0, buf5,
buf4, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del buf4
del buf5
return reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf6
class SparsemaxNew(nn.Module):
"""Sparsemax function."""
def __init__(self, dim=None):
"""Initialize sparsemax activation
Args:
dim (int, optional): The dimension over which to apply the sparsemax function.
"""
super(SparsemaxNew, self).__init__()
self.dim = -1 if dim is None else dim
def backward(self, grad_output):
"""Backward function."""
dim = 1
nonzeros = torch.ne(self.output, 0)
sum = torch.sum(grad_output * nonzeros, dim=dim) / torch.sum(nonzeros,
dim=dim)
self.grad_input = nonzeros * (grad_output - sum.expand_as(grad_output))
return self.grad_input
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
tkc-morita/secl
|
Sparsemax
| false
| 10,929
|
[
"MIT"
] | 0
|
d0156cea4fd95ea5071126dbf076a6da69752a37
|
https://github.com/tkc-morita/secl/tree/d0156cea4fd95ea5071126dbf076a6da69752a37
|
ConvChain
|
import torch
import torch.utils.data
import torch.nn as nn
def _get_activation(activation):
valid = ['relu', 'leaky_relu', 'lrelu', 'tanh', 'sigmoid']
assert activation in valid, 'activation should be one of {}'.format(valid)
if activation == 'relu':
return nn.ReLU(inplace=True)
if activation == 'leaky_relu' or activation == 'lrelu':
return nn.LeakyReLU(inplace=True)
if activation == 'sigmoid':
return nn.Sigmoid()
if activation == 'tanh':
return nn.Tanh()
return None
def _init_fc_or_conv(fc_conv, activation):
gain = 1.0
if activation is not None:
gain = nn.init.calculate_gain(activation)
nn.init.xavier_uniform_(fc_conv.weight, gain)
if fc_conv.bias is not None:
nn.init.constant_(fc_conv.bias, 0.0)
def _get_norm_layer(norm_layer, channels):
valid = ['instance', 'batch']
assert norm_layer in valid, 'norm_layer should be one of {}'.format(valid)
if norm_layer == 'instance':
layer = nn.InstanceNorm2d(channels, affine=True)
elif norm_layer == 'batch':
layer = nn.BatchNorm2d(channels, affine=True)
nn.init.constant_(layer.bias, 0.0)
nn.init.constant_(layer.weight, 1.0)
return layer
class ConvModule(nn.Module):
"""Basic convolution module with conv + norm(optional) + activation(optional).
Args:
n_in(int): number of input channels.
n_out(int): number of output channels.
ksize(int): size of the convolution kernel (square).
stride(int): downsampling factor
pad(bool): if True, zero pad the convolutions to maintain a constant size.
activation(str): nonlinear activation function between convolutions.
norm_layer(str): normalization to apply between the convolution modules.
"""
def __init__(self, n_in, n_out, ksize=3, stride=1, pad=True, activation
=None, norm_layer=None):
super(ConvModule, self).__init__()
assert isinstance(n_in, int
) and n_in > 0, 'Input channels should be a positive integer got {}'.format(
n_in)
assert isinstance(n_out, int
) and n_out > 0, 'Output channels should be a positive integer got {}'.format(
n_out)
assert isinstance(ksize, int
) and ksize > 0, 'Kernel size should be a positive integer got {}'.format(
ksize)
padding = (ksize - 1) // 2 if pad else 0
use_bias_in_conv = norm_layer is None
self.add_module('conv', nn.Conv2d(n_in, n_out, ksize, stride=stride,
padding=padding, bias=use_bias_in_conv))
if norm_layer is not None:
self.add_module('norm', _get_norm_layer(norm_layer, n_out))
if activation is not None:
self.add_module('activation', _get_activation(activation))
_init_fc_or_conv(self.conv, activation)
def forward(self, x):
for c in self.children():
x = c(x)
return x
class ConvChain(nn.Module):
"""Linear chain of convolution layers.
Args:
n_in(int): number of input channels.
ksize(int or list of int): size of the convolution kernel (square).
width(int or list of int): number of features channels in the intermediate layers.
depth(int): number of layers
strides(list of int): stride between kernels. If None, defaults to 1 for all.
pad(bool): if True, zero pad the convolutions to maintain a constant size.
activation(str): nonlinear activation function between convolutions.
norm_layer(str): normalization to apply between the convolution modules.
"""
def __init__(self, n_in, ksize=3, width=64, depth=3, strides=None, pad=
True, activation='relu', norm_layer=None):
super(ConvChain, self).__init__()
assert isinstance(n_in, int
) and n_in > 0, 'Input channels should be a positive integer'
assert isinstance(ksize, int) and ksize > 0 or isinstance(ksize, list
), 'Kernel size should be a positive integer or a list of integers'
assert isinstance(depth, int
) and depth > 0, 'Depth should be a positive integer'
assert isinstance(width, int) or isinstance(width, list
), 'Width should be a list or an int'
_in = [n_in]
if strides is None:
_strides = [1] * depth
else:
assert isinstance(strides, list), 'strides should be a list'
assert len(strides
) == depth, 'strides should have `depth` elements'
_strides = strides
if isinstance(width, int):
_in = _in + [width] * (depth - 1)
_out = [width] * depth
elif isinstance(width, list):
assert len(width
) == depth, 'Specifying width with a list should have `depth` elements'
_in = _in + width[:-1]
_out = width
if isinstance(ksize, int):
_ksizes = [ksize] * depth
elif isinstance(ksize, list):
assert len(ksize
) == depth, "kernel size list should have 'depth' entries"
_ksizes = ksize
_activations = [activation] * depth
_norms = [norm_layer] * depth
for lvl in range(depth):
self.add_module('conv{}'.format(lvl), ConvModule(_in[lvl], _out
[lvl], _ksizes[lvl], stride=_strides[lvl], pad=pad,
activation=_activations[lvl], norm_layer=_norms[lvl]))
def forward(self, x):
for m in self.children():
x = m(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_in': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x3, tmp4, None)
tl.store(out_ptr0 + x3, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (64, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (64,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 4, 4), (1024, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(4096)](buf1, primals_2,
4096, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 4, 4), (1024, 16, 4, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_0[grid(4096)](buf3, primals_5,
4096, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 64, 4, 4), (1024, 16, 4, 1))
buf5 = buf4
del buf4
buf6 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_1[grid(4096)](buf5
, primals_7, buf6, 4096, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
return buf5, primals_1, primals_3, primals_4, primals_6, buf1, buf3, buf6
def _get_activation(activation):
valid = ['relu', 'leaky_relu', 'lrelu', 'tanh', 'sigmoid']
assert activation in valid, 'activation should be one of {}'.format(valid)
if activation == 'relu':
return nn.ReLU(inplace=True)
if activation == 'leaky_relu' or activation == 'lrelu':
return nn.LeakyReLU(inplace=True)
if activation == 'sigmoid':
return nn.Sigmoid()
if activation == 'tanh':
return nn.Tanh()
return None
def _init_fc_or_conv(fc_conv, activation):
gain = 1.0
if activation is not None:
gain = nn.init.calculate_gain(activation)
nn.init.xavier_uniform_(fc_conv.weight, gain)
if fc_conv.bias is not None:
nn.init.constant_(fc_conv.bias, 0.0)
def _get_norm_layer(norm_layer, channels):
valid = ['instance', 'batch']
assert norm_layer in valid, 'norm_layer should be one of {}'.format(valid)
if norm_layer == 'instance':
layer = nn.InstanceNorm2d(channels, affine=True)
elif norm_layer == 'batch':
layer = nn.BatchNorm2d(channels, affine=True)
nn.init.constant_(layer.bias, 0.0)
nn.init.constant_(layer.weight, 1.0)
return layer
class ConvModule(nn.Module):
"""Basic convolution module with conv + norm(optional) + activation(optional).
Args:
n_in(int): number of input channels.
n_out(int): number of output channels.
ksize(int): size of the convolution kernel (square).
stride(int): downsampling factor
pad(bool): if True, zero pad the convolutions to maintain a constant size.
activation(str): nonlinear activation function between convolutions.
norm_layer(str): normalization to apply between the convolution modules.
"""
def __init__(self, n_in, n_out, ksize=3, stride=1, pad=True, activation
=None, norm_layer=None):
super(ConvModule, self).__init__()
assert isinstance(n_in, int
) and n_in > 0, 'Input channels should be a positive integer got {}'.format(
n_in)
assert isinstance(n_out, int
) and n_out > 0, 'Output channels should be a positive integer got {}'.format(
n_out)
assert isinstance(ksize, int
) and ksize > 0, 'Kernel size should be a positive integer got {}'.format(
ksize)
padding = (ksize - 1) // 2 if pad else 0
use_bias_in_conv = norm_layer is None
self.add_module('conv', nn.Conv2d(n_in, n_out, ksize, stride=stride,
padding=padding, bias=use_bias_in_conv))
if norm_layer is not None:
self.add_module('norm', _get_norm_layer(norm_layer, n_out))
if activation is not None:
self.add_module('activation', _get_activation(activation))
_init_fc_or_conv(self.conv, activation)
def forward(self, x):
for c in self.children():
x = c(x)
return x
class ConvChainNew(nn.Module):
"""Linear chain of convolution layers.
Args:
n_in(int): number of input channels.
ksize(int or list of int): size of the convolution kernel (square).
width(int or list of int): number of features channels in the intermediate layers.
depth(int): number of layers
strides(list of int): stride between kernels. If None, defaults to 1 for all.
pad(bool): if True, zero pad the convolutions to maintain a constant size.
activation(str): nonlinear activation function between convolutions.
norm_layer(str): normalization to apply between the convolution modules.
"""
def __init__(self, n_in, ksize=3, width=64, depth=3, strides=None, pad=
True, activation='relu', norm_layer=None):
super(ConvChainNew, self).__init__()
assert isinstance(n_in, int
) and n_in > 0, 'Input channels should be a positive integer'
assert isinstance(ksize, int) and ksize > 0 or isinstance(ksize, list
), 'Kernel size should be a positive integer or a list of integers'
assert isinstance(depth, int
) and depth > 0, 'Depth should be a positive integer'
assert isinstance(width, int) or isinstance(width, list
), 'Width should be a list or an int'
_in = [n_in]
if strides is None:
_strides = [1] * depth
else:
assert isinstance(strides, list), 'strides should be a list'
assert len(strides
) == depth, 'strides should have `depth` elements'
_strides = strides
if isinstance(width, int):
_in = _in + [width] * (depth - 1)
_out = [width] * depth
elif isinstance(width, list):
assert len(width
) == depth, 'Specifying width with a list should have `depth` elements'
_in = _in + width[:-1]
_out = width
if isinstance(ksize, int):
_ksizes = [ksize] * depth
elif isinstance(ksize, list):
assert len(ksize
) == depth, "kernel size list should have 'depth' entries"
_ksizes = ksize
_activations = [activation] * depth
_norms = [norm_layer] * depth
for lvl in range(depth):
self.add_module('conv{}'.format(lvl), ConvModule(_in[lvl], _out
[lvl], _ksizes[lvl], stride=_strides[lvl], pad=pad,
activation=_activations[lvl], norm_layer=_norms[lvl]))
def forward(self, input_0):
primals_1 = self.conv0.conv.weight
primals_2 = self.conv0.conv.bias
primals_4 = self.conv1.conv.weight
primals_5 = self.conv1.conv.bias
primals_6 = self.conv2.conv.weight
primals_7 = self.conv2.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
sutkarsh/ttools
|
ConvChain
| false
| 10,930
|
[
"MIT"
] | 0
|
a2e5fbf308566c0c54ab9d6ad1d9f8bc63f8fe99
|
https://github.com/sutkarsh/ttools/tree/a2e5fbf308566c0c54ab9d6ad1d9f8bc63f8fe99
|
DiscreteCrossEntropyLoss
|
import torch
import torch.utils.data
class DiscreteCrossEntropyLoss(torch.nn.Module):
def __init__(self, in_features, num_classes):
super(DiscreteCrossEntropyLoss, self).__init__()
self.in_features = in_features
self.num_classes = num_classes
self.fc = torch.nn.Linear(in_features, num_classes)
self.cross_entropy_loss = torch.nn.CrossEntropyLoss(reduction='none')
def forward(self, x, target, mask=None):
x = self.fc(x)
loss = self.cross_entropy_loss(x, target)
if mask is not None:
loss = loss * mask
return loss
def pack_init_args(self):
args = {'in_features': self.in_features, 'num_classes': self.
num_classes}
return args
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'num_classes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_mul_neg_sum_1(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp8 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp13 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask)
tmp16 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask)
tmp20 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask)
tmp24 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask)
tmp1 = tl_math.exp(tmp0)
tmp3 = tl_math.exp(tmp2)
tmp4 = tmp1 + tmp3
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp4 + tmp6
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tl_math.log(tmp10)
tmp12 = tmp0 - tmp11
tmp14 = tmp12 * tmp13
tmp15 = tmp2 - tmp11
tmp17 = tmp15 * tmp16
tmp18 = tmp14 + tmp17
tmp19 = tmp5 - tmp11
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp8 - tmp11
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = -tmp26
tl.store(out_ptr0 + x2, tmp27, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](buf0, buf1, 256, XBLOCK=
256, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__log_softmax_mul_neg_sum_1[grid(64)](buf1,
primals_4, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf1
return buf2, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0
class DiscreteCrossEntropyLossNew(torch.nn.Module):
def __init__(self, in_features, num_classes):
super(DiscreteCrossEntropyLossNew, self).__init__()
self.in_features = in_features
self.num_classes = num_classes
self.fc = torch.nn.Linear(in_features, num_classes)
self.cross_entropy_loss = torch.nn.CrossEntropyLoss(reduction='none')
def pack_init_args(self):
args = {'in_features': self.in_features, 'num_classes': self.
num_classes}
return args
def forward(self, input_0, input_1):
primals_1 = self.fc.weight
primals_2 = self.fc.bias
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
tkc-morita/secl
|
DiscreteCrossEntropyLoss
| false
| 10,931
|
[
"MIT"
] | 0
|
d0156cea4fd95ea5071126dbf076a6da69752a37
|
https://github.com/tkc-morita/secl/tree/d0156cea4fd95ea5071126dbf076a6da69752a37
|
Net
|
import torch
import torch.nn.functional as F
class Net(torch.nn.Module):
def __init__(self, n_feature, n_hidden, n_output):
super(Net, self).__init__()
self.hidden = torch.nn.Linear(n_feature, n_hidden)
self.predict = torch.nn.Linear(n_hidden, n_output)
def forward(self, x):
x = torch.sigmoid(self.hidden(x))
x = self.predict(x)
out = F.log_softmax(x, dim=1)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_feature': 4, 'n_hidden': 4, 'n_output': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x3, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_sigmoid_0[grid(256)](buf1, primals_2, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__log_softmax_1[grid(256)](buf2, buf3, 256, XBLOCK=
256, num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused__log_softmax_2[grid(256)](buf3, buf4, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del buf3
return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, buf4, primals_4
class NetNew(torch.nn.Module):
def __init__(self, n_feature, n_hidden, n_output):
super(NetNew, self).__init__()
self.hidden = torch.nn.Linear(n_feature, n_hidden)
self.predict = torch.nn.Linear(n_hidden, n_output)
def forward(self, input_0):
primals_1 = self.hidden.weight
primals_2 = self.hidden.bias
primals_4 = self.predict.weight
primals_5 = self.predict.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
wikeex/pytorch-learning
|
Net
| false
| 10,932
|
[
"MIT"
] | 0
|
8cd710d65a52b58b1593fbba6c4134e08ea18d9f
|
https://github.com/wikeex/pytorch-learning/tree/8cd710d65a52b58b1593fbba6c4134e08ea18d9f
|
PSNR
|
import torch
import torch as th
import torch.utils.data
class PSNR(th.nn.Module):
def __init__(self):
super(PSNR, self).__init__()
self.mse = th.nn.MSELoss()
def forward(self, out, ref):
mse = self.mse(out, ref)
return -10 * th.log10(mse + 1e-12)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch as th
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_log10_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 256.0
tmp8 = tmp6 / tmp7
tmp9 = 1e-12
tmp10 = tmp8 + tmp9
tmp11 = libdevice.log10(tmp10)
tmp12 = -10.0
tmp13 = tmp11 * tmp12
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp13, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_log10_mse_loss_mul_0[grid(1)](buf1, arg1_1,
arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class PSNRNew(th.nn.Module):
def __init__(self):
super(PSNRNew, self).__init__()
self.mse = th.nn.MSELoss()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
sutkarsh/ttools
|
PSNR
| false
| 10,933
|
[
"MIT"
] | 0
|
a2e5fbf308566c0c54ab9d6ad1d9f8bc63f8fe99
|
https://github.com/sutkarsh/ttools/tree/a2e5fbf308566c0c54ab9d6ad1d9f8bc63f8fe99
|
FCNet
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import *
class FCNet(nn.Module):
def __init__(self, input_size, output_size):
super().__init__()
self.l1 = nn.Linear(input_size, 5)
self.relu = nn.ReLU()
self.l2 = nn.Linear(5, output_size)
def forward(self, x):
output = self.l1(x)
output = self.relu(output)
output = self.l2(output)
return output.view(-1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'output_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 5
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (5, 4), (4, 1))
assert_size_stride(primals_2, (5,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 5), (5, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 5), (5, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 5), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 5), (80, 20, 5, 1), 0)
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(320)](buf1,
primals_2, buf3, 320, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 5), (
5, 1), 0), reinterpret_tensor(primals_4, (5, 4), (1, 5), 0),
alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (256,), (1,), 0), reinterpret_tensor(
primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 5), (
5, 1), 0), primals_4, buf3
class FCNetNew(nn.Module):
def __init__(self, input_size, output_size):
super().__init__()
self.l1 = nn.Linear(input_size, 5)
self.relu = nn.ReLU()
self.l2 = nn.Linear(5, output_size)
def forward(self, input_0):
primals_1 = self.l1.weight
primals_2 = self.l1.bias
primals_4 = self.l2.weight
primals_5 = self.l2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
rmfan/nni
|
FCNet
| false
| 10,934
|
[
"MIT"
] | 0
|
727ee1ce47e070061fe3dab8a2da5d3cd5e55546
|
https://github.com/rmfan/nni/tree/727ee1ce47e070061fe3dab8a2da5d3cd5e55546
|
FCChain
|
import torch
import torch.utils.data
import torch.nn as nn
def _get_activation(activation):
valid = ['relu', 'leaky_relu', 'lrelu', 'tanh', 'sigmoid']
assert activation in valid, 'activation should be one of {}'.format(valid)
if activation == 'relu':
return nn.ReLU(inplace=True)
if activation == 'leaky_relu' or activation == 'lrelu':
return nn.LeakyReLU(inplace=True)
if activation == 'sigmoid':
return nn.Sigmoid()
if activation == 'tanh':
return nn.Tanh()
return None
def _init_fc_or_conv(fc_conv, activation):
gain = 1.0
if activation is not None:
gain = nn.init.calculate_gain(activation)
nn.init.xavier_uniform_(fc_conv.weight, gain)
if fc_conv.bias is not None:
nn.init.constant_(fc_conv.bias, 0.0)
class FCModule(nn.Module):
"""Basic fully connected module with optional dropout.
Args:
n_in(int): number of input channels.
n_out(int): number of output channels.
activation(str): nonlinear activation function.
dropout(float): dropout ratio if defined, default to None: no dropout.
"""
def __init__(self, n_in, n_out, activation=None, dropout=None):
super(FCModule, self).__init__()
assert isinstance(n_in, int
) and n_in > 0, 'Input channels should be a positive integer'
assert isinstance(n_out, int
) and n_out > 0, 'Output channels should be a positive integer'
self.add_module('fc', nn.Linear(n_in, n_out))
if activation is not None:
self.add_module('activation', _get_activation(activation))
if dropout is not None:
self.add_module('dropout', nn.Dropout(dropout, inplace=True))
_init_fc_or_conv(self.fc, activation)
def forward(self, x):
for c in self.children():
x = c(x)
return x
class FCChain(nn.Module):
"""Linear chain of fully connected layers.
Args:
n_in(int): number of input channels.
width(int or list of int): number of features channels in the intermediate layers.
depth(int): number of layers
activation(str): nonlinear activation function between convolutions.
dropout(float or list of float): dropout ratio if defined, default to None: no dropout.
"""
def __init__(self, n_in, width=64, depth=3, activation='relu', dropout=None
):
super(FCChain, self).__init__()
assert isinstance(n_in, int
) and n_in > 0, 'Input channels should be a positive integer'
assert isinstance(depth, int
) and depth > 0, 'Depth should be a positive integer'
assert isinstance(width, int) or isinstance(width, list
), 'Width should be a list or an int'
_in = [n_in]
if isinstance(width, int):
_in = _in + [width] * (depth - 1)
_out = [width] * depth
elif isinstance(width, list):
assert len(width
) == depth, 'Specifying width with a least: should have `depth` entries'
_in = _in + width[:-1]
_out = width
_activations = [activation] * depth
if dropout is not None:
assert isinstance(dropout, float) or isinstance(dropout, list
), 'Dropout should be a float or a list of floats'
if dropout is None or isinstance(dropout, float):
_dropout = [dropout] * depth
elif isinstance(dropout, list):
assert len(dropout
) == depth, "When specifying a list of dropout, the list should have 'depth' elements."
_dropout = dropout
for lvl in range(depth):
self.add_module('fc{}'.format(lvl), FCModule(_in[lvl], _out[lvl
], activation=_activations[lvl], dropout=_dropout[lvl]))
def forward(self, x):
for m in self.children():
x = m(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_in': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x4 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x4, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, None)
tl.store(out_ptr0 + x4, tmp6, None)
@triton.jit
def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 64
x1 = xindex // 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1 + 256 * (x1 % 4 // 4) + 1024 * (
(4 * (x1 // 4 % 4) + x1 % 4) // 16)), None)
tl.store(out_ptr0 + x2, tmp0, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_view_2(in_out_ptr0, in_ptr0,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x4 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x4, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x4, tmp4, None)
tl.store(out_ptr1 + x4, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (64, 4), (4, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (64, 64), (64, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (64, 64), (64, 1))
assert_size_stride(primals_7, (64,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf0
buf11 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch
.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(4096)](buf1,
primals_2, buf11, 4096, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
triton_poi_fused_view_1[grid(4096)](buf1, buf2, 4096, XBLOCK=256,
num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (64, 64), (64, 1), 0)
del buf1
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (64, 64), (1,
64), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf3
buf10 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch
.bool)
triton_poi_fused_relu_threshold_backward_0[grid(4096)](buf4,
primals_5, buf10, 4096, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
triton_poi_fused_view_1[grid(4096)](buf4, buf5, 4096, XBLOCK=256,
num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf4, (64, 64), (64, 1), 0)
del buf4
extern_kernels.mm(buf5, reinterpret_tensor(primals_6, (64, 64), (1,
64), 0), out=buf6)
buf7 = reinterpret_tensor(buf6, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf6
buf8 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.
float32)
buf9 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool
)
triton_poi_fused_relu_threshold_backward_view_2[grid(4096)](buf7,
primals_7, buf8, buf9, 4096, XBLOCK=256, num_warps=4, num_stages=1)
del buf7
del primals_7
return buf8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf2, buf5, buf9, primals_6, buf10, primals_4, buf11
def _get_activation(activation):
valid = ['relu', 'leaky_relu', 'lrelu', 'tanh', 'sigmoid']
assert activation in valid, 'activation should be one of {}'.format(valid)
if activation == 'relu':
return nn.ReLU(inplace=True)
if activation == 'leaky_relu' or activation == 'lrelu':
return nn.LeakyReLU(inplace=True)
if activation == 'sigmoid':
return nn.Sigmoid()
if activation == 'tanh':
return nn.Tanh()
return None
def _init_fc_or_conv(fc_conv, activation):
gain = 1.0
if activation is not None:
gain = nn.init.calculate_gain(activation)
nn.init.xavier_uniform_(fc_conv.weight, gain)
if fc_conv.bias is not None:
nn.init.constant_(fc_conv.bias, 0.0)
class FCModule(nn.Module):
"""Basic fully connected module with optional dropout.
Args:
n_in(int): number of input channels.
n_out(int): number of output channels.
activation(str): nonlinear activation function.
dropout(float): dropout ratio if defined, default to None: no dropout.
"""
def __init__(self, n_in, n_out, activation=None, dropout=None):
super(FCModule, self).__init__()
assert isinstance(n_in, int
) and n_in > 0, 'Input channels should be a positive integer'
assert isinstance(n_out, int
) and n_out > 0, 'Output channels should be a positive integer'
self.add_module('fc', nn.Linear(n_in, n_out))
if activation is not None:
self.add_module('activation', _get_activation(activation))
if dropout is not None:
self.add_module('dropout', nn.Dropout(dropout, inplace=True))
_init_fc_or_conv(self.fc, activation)
def forward(self, x):
for c in self.children():
x = c(x)
return x
class FCChainNew(nn.Module):
"""Linear chain of fully connected layers.
Args:
n_in(int): number of input channels.
width(int or list of int): number of features channels in the intermediate layers.
depth(int): number of layers
activation(str): nonlinear activation function between convolutions.
dropout(float or list of float): dropout ratio if defined, default to None: no dropout.
"""
def __init__(self, n_in, width=64, depth=3, activation='relu', dropout=None
):
super(FCChainNew, self).__init__()
assert isinstance(n_in, int
) and n_in > 0, 'Input channels should be a positive integer'
assert isinstance(depth, int
) and depth > 0, 'Depth should be a positive integer'
assert isinstance(width, int) or isinstance(width, list
), 'Width should be a list or an int'
_in = [n_in]
if isinstance(width, int):
_in = _in + [width] * (depth - 1)
_out = [width] * depth
elif isinstance(width, list):
assert len(width
) == depth, 'Specifying width with a least: should have `depth` entries'
_in = _in + width[:-1]
_out = width
_activations = [activation] * depth
if dropout is not None:
assert isinstance(dropout, float) or isinstance(dropout, list
), 'Dropout should be a float or a list of floats'
if dropout is None or isinstance(dropout, float):
_dropout = [dropout] * depth
elif isinstance(dropout, list):
assert len(dropout
) == depth, "When specifying a list of dropout, the list should have 'depth' elements."
_dropout = dropout
for lvl in range(depth):
self.add_module('fc{}'.format(lvl), FCModule(_in[lvl], _out[lvl
], activation=_activations[lvl], dropout=_dropout[lvl]))
def forward(self, input_0):
primals_1 = self.fc0.fc.weight
primals_2 = self.fc0.fc.bias
primals_4 = self.fc1.fc.weight
primals_5 = self.fc1.fc.bias
primals_6 = self.fc2.fc.weight
primals_7 = self.fc2.fc.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
sutkarsh/ttools
|
FCChain
| false
| 10,935
|
[
"MIT"
] | 0
|
a2e5fbf308566c0c54ab9d6ad1d9f8bc63f8fe99
|
https://github.com/sutkarsh/ttools/tree/a2e5fbf308566c0c54ab9d6ad1d9f8bc63f8fe99
|
FixupBasicBlock
|
import torch
import torch as th
import torch.utils.data
import torch.nn as nn
def _get_activation(activation):
valid = ['relu', 'leaky_relu', 'lrelu', 'tanh', 'sigmoid']
assert activation in valid, 'activation should be one of {}'.format(valid)
if activation == 'relu':
return nn.ReLU(inplace=True)
if activation == 'leaky_relu' or activation == 'lrelu':
return nn.LeakyReLU(inplace=True)
if activation == 'sigmoid':
return nn.Sigmoid()
if activation == 'tanh':
return nn.Tanh()
return None
def _init_fc_or_conv(fc_conv, activation):
gain = 1.0
if activation is not None:
gain = nn.init.calculate_gain(activation)
nn.init.xavier_uniform_(fc_conv.weight, gain)
if fc_conv.bias is not None:
nn.init.constant_(fc_conv.bias, 0.0)
def _get_norm_layer(norm_layer, channels):
valid = ['instance', 'batch']
assert norm_layer in valid, 'norm_layer should be one of {}'.format(valid)
if norm_layer == 'instance':
layer = nn.InstanceNorm2d(channels, affine=True)
elif norm_layer == 'batch':
layer = nn.BatchNorm2d(channels, affine=True)
nn.init.constant_(layer.bias, 0.0)
nn.init.constant_(layer.weight, 1.0)
return layer
class ConvModule(nn.Module):
"""Basic convolution module with conv + norm(optional) + activation(optional).
Args:
n_in(int): number of input channels.
n_out(int): number of output channels.
ksize(int): size of the convolution kernel (square).
stride(int): downsampling factor
pad(bool): if True, zero pad the convolutions to maintain a constant size.
activation(str): nonlinear activation function between convolutions.
norm_layer(str): normalization to apply between the convolution modules.
"""
def __init__(self, n_in, n_out, ksize=3, stride=1, pad=True, activation
=None, norm_layer=None):
super(ConvModule, self).__init__()
assert isinstance(n_in, int
) and n_in > 0, 'Input channels should be a positive integer got {}'.format(
n_in)
assert isinstance(n_out, int
) and n_out > 0, 'Output channels should be a positive integer got {}'.format(
n_out)
assert isinstance(ksize, int
) and ksize > 0, 'Kernel size should be a positive integer got {}'.format(
ksize)
padding = (ksize - 1) // 2 if pad else 0
use_bias_in_conv = norm_layer is None
self.add_module('conv', nn.Conv2d(n_in, n_out, ksize, stride=stride,
padding=padding, bias=use_bias_in_conv))
if norm_layer is not None:
self.add_module('norm', _get_norm_layer(norm_layer, n_out))
if activation is not None:
self.add_module('activation', _get_activation(activation))
_init_fc_or_conv(self.conv, activation)
def forward(self, x):
for c in self.children():
x = c(x)
return x
class FixupBasicBlock(nn.Module):
expansion = 1
def __init__(self, n_features, ksize=3, pad=True, activation='relu'):
super(FixupBasicBlock, self).__init__()
self.bias1a = nn.Parameter(th.zeros(1))
self.conv1 = ConvModule(n_features, n_features, ksize=ksize, stride
=1, pad=pad, activation=None, norm_layer=None)
self.bias1b = nn.Parameter(th.zeros(1))
self.activation = _get_activation(activation)
self.bias2a = nn.Parameter(th.zeros(1))
self.conv2 = ConvModule(n_features, n_features, ksize=ksize, stride
=1, pad=pad, activation=None, norm_layer=None)
self.scale = nn.Parameter(th.ones(1))
self.bias2b = nn.Parameter(th.zeros(1))
self.activation2 = _get_activation(activation)
self.ksize = 3
self.pad = pad
def forward(self, x):
identity = x
out = self.conv1(x + self.bias1a)
out = self.activation(out + self.bias1b)
out = self.conv2(out + self.bias2a)
out = out * self.scale + self.bias2b
crop = (self.ksize - 1) // 2 * 2
if crop > 0 and not self.pad:
identity = identity[:, :, crop:-crop, crop:-crop]
out += identity
out = self.activation2(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch as th
import torch.utils.data
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_poi_fused_add_convolution_relu_threshold_backward_1(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp8 = tl.load(in_ptr3 + 0)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp5 = tmp2 + tmp4
tmp6 = tl.full([1], 0, tl.int32)
tmp7 = triton_helpers.maximum(tmp6, tmp5)
tmp10 = tmp7 + tmp9
tmp11 = 0.0
tmp12 = tmp7 <= tmp11
tl.store(out_ptr0 + x3, tmp10, xmask)
tl.store(out_ptr1 + x3, tmp12, xmask)
@triton.jit
def triton_poi_fused_add_convolution_mul_relu_threshold_backward_2(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp6 = tl.load(in_ptr2 + 0)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp9 = tl.load(in_ptr3 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp2 * tmp4
tmp8 = tmp5 + tmp7
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 0, tl.int32)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp13 = 0.0
tmp14 = tmp12 <= tmp13
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp12, xmask)
tl.store(out_ptr1 + x3, tmp14, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (1,), (1,))
assert_size_stride(primals_6, (1,), (1,))
assert_size_stride(primals_7, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (1,), (1,))
assert_size_stride(primals_10, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](primals_1, primals_2, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_add_convolution_relu_threshold_backward_1[grid(256)](
buf1, primals_4, primals_5, primals_6, buf2, buf7, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_4
del primals_5
del primals_6
buf3 = extern_kernels.convolution(buf2, primals_7, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = buf3
del buf3
buf5 = buf1
del buf1
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_add_convolution_mul_relu_threshold_backward_2[grid
(256)](buf4, primals_8, primals_9, primals_10, primals_1, buf5,
buf6, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_10
del primals_8
return buf5, primals_3, primals_7, primals_9, buf0, buf2, buf4, buf6, buf7
def _get_activation(activation):
valid = ['relu', 'leaky_relu', 'lrelu', 'tanh', 'sigmoid']
assert activation in valid, 'activation should be one of {}'.format(valid)
if activation == 'relu':
return nn.ReLU(inplace=True)
if activation == 'leaky_relu' or activation == 'lrelu':
return nn.LeakyReLU(inplace=True)
if activation == 'sigmoid':
return nn.Sigmoid()
if activation == 'tanh':
return nn.Tanh()
return None
def _init_fc_or_conv(fc_conv, activation):
gain = 1.0
if activation is not None:
gain = nn.init.calculate_gain(activation)
nn.init.xavier_uniform_(fc_conv.weight, gain)
if fc_conv.bias is not None:
nn.init.constant_(fc_conv.bias, 0.0)
def _get_norm_layer(norm_layer, channels):
valid = ['instance', 'batch']
assert norm_layer in valid, 'norm_layer should be one of {}'.format(valid)
if norm_layer == 'instance':
layer = nn.InstanceNorm2d(channels, affine=True)
elif norm_layer == 'batch':
layer = nn.BatchNorm2d(channels, affine=True)
nn.init.constant_(layer.bias, 0.0)
nn.init.constant_(layer.weight, 1.0)
return layer
class ConvModule(nn.Module):
"""Basic convolution module with conv + norm(optional) + activation(optional).
Args:
n_in(int): number of input channels.
n_out(int): number of output channels.
ksize(int): size of the convolution kernel (square).
stride(int): downsampling factor
pad(bool): if True, zero pad the convolutions to maintain a constant size.
activation(str): nonlinear activation function between convolutions.
norm_layer(str): normalization to apply between the convolution modules.
"""
def __init__(self, n_in, n_out, ksize=3, stride=1, pad=True, activation
=None, norm_layer=None):
super(ConvModule, self).__init__()
assert isinstance(n_in, int
) and n_in > 0, 'Input channels should be a positive integer got {}'.format(
n_in)
assert isinstance(n_out, int
) and n_out > 0, 'Output channels should be a positive integer got {}'.format(
n_out)
assert isinstance(ksize, int
) and ksize > 0, 'Kernel size should be a positive integer got {}'.format(
ksize)
padding = (ksize - 1) // 2 if pad else 0
use_bias_in_conv = norm_layer is None
self.add_module('conv', nn.Conv2d(n_in, n_out, ksize, stride=stride,
padding=padding, bias=use_bias_in_conv))
if norm_layer is not None:
self.add_module('norm', _get_norm_layer(norm_layer, n_out))
if activation is not None:
self.add_module('activation', _get_activation(activation))
_init_fc_or_conv(self.conv, activation)
def forward(self, x):
for c in self.children():
x = c(x)
return x
class FixupBasicBlockNew(nn.Module):
expansion = 1
def __init__(self, n_features, ksize=3, pad=True, activation='relu'):
super(FixupBasicBlockNew, self).__init__()
self.bias1a = nn.Parameter(th.zeros(1))
self.conv1 = ConvModule(n_features, n_features, ksize=ksize, stride
=1, pad=pad, activation=None, norm_layer=None)
self.bias1b = nn.Parameter(th.zeros(1))
self.activation = _get_activation(activation)
self.bias2a = nn.Parameter(th.zeros(1))
self.conv2 = ConvModule(n_features, n_features, ksize=ksize, stride
=1, pad=pad, activation=None, norm_layer=None)
self.scale = nn.Parameter(th.ones(1))
self.bias2b = nn.Parameter(th.zeros(1))
self.activation2 = _get_activation(activation)
self.ksize = 3
self.pad = pad
def forward(self, input_0):
primals_2 = self.bias1a
primals_5 = self.bias1b
primals_6 = self.bias2a
primals_9 = self.scale
primals_10 = self.bias2b
primals_3 = self.conv1.conv.weight
primals_4 = self.conv1.conv.bias
primals_7 = self.conv2.conv.weight
primals_8 = self.conv2.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
|
sutkarsh/ttools
|
FixupBasicBlock
| false
| 10,936
|
[
"MIT"
] | 0
|
a2e5fbf308566c0c54ab9d6ad1d9f8bc63f8fe99
|
https://github.com/sutkarsh/ttools/tree/a2e5fbf308566c0c54ab9d6ad1d9f8bc63f8fe99
|
PFLDLoss
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import *
class PFLDLoss(nn.Module):
"""Weighted loss of L2 distance with the pose angle for PFLD."""
def __init__(self):
super(PFLDLoss, self).__init__()
def forward(self, landmark_gt, euler_angle_gt, angle, landmarks):
"""
Calculate weighted L2 loss for PFLD.
Parameters
----------
landmark_gt : tensor
the ground truth of landmarks
euler_angle_gt : tensor
the ground truth of pose angle
angle : tensor
the predicted pose angle
landmarks : float32
the predicted landmarks
Returns
-------
output: tensor
the weighted L2 loss
output: tensor
the normal L2 loss
"""
weight_angle = torch.sum(1 - torch.cos(angle - euler_angle_gt), axis=1)
l2_distant = torch.sum((landmark_gt - landmarks) ** 2, axis=1)
return torch.mean(weight_angle * l2_distant), torch.mean(l2_distant)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_cos_mean_mul_pow_rsub_sub_sum_0(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK:
tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp4 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp5 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp9 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp10 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp14 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp15 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp19 = tl.load(in_ptr2 + (r0 + 64 * r1), None)
tmp20 = tl.load(in_ptr3 + (r0 + 64 * r1), None)
tmp25 = tl.load(in_ptr2 + (16 + r0 + 64 * r1), None)
tmp26 = tl.load(in_ptr3 + (16 + r0 + 64 * r1), None)
tmp31 = tl.load(in_ptr2 + (32 + r0 + 64 * r1), None)
tmp32 = tl.load(in_ptr3 + (32 + r0 + 64 * r1), None)
tmp37 = tl.load(in_ptr2 + (48 + r0 + 64 * r1), None)
tmp38 = tl.load(in_ptr3 + (48 + r0 + 64 * r1), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp21 = tmp19 - tmp20
tmp22 = tl_math.cos(tmp21)
tmp23 = 1.0
tmp24 = tmp23 - tmp22
tmp27 = tmp25 - tmp26
tmp28 = tl_math.cos(tmp27)
tmp29 = tmp23 - tmp28
tmp30 = tmp24 + tmp29
tmp33 = tmp31 - tmp32
tmp34 = tl_math.cos(tmp33)
tmp35 = tmp23 - tmp34
tmp36 = tmp30 + tmp35
tmp39 = tmp37 - tmp38
tmp40 = tl_math.cos(tmp39)
tmp41 = tmp23 - tmp40
tmp42 = tmp36 + tmp41
tmp43 = tmp42 * tmp18
tmp44 = tl.broadcast_to(tmp43, [XBLOCK, RBLOCK])
tmp46 = tl.sum(tmp44, 1)[:, None]
tmp47 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK])
tmp49 = tl.sum(tmp47, 1)[:, None]
tmp50 = 64.0
tmp51 = tmp46 / tmp50
tmp52 = tmp49 / tmp50
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp51, None)
tl.debug_barrier()
tl.store(in_out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp52, None)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf2
del buf2
buf5 = buf3
del buf3
get_raw_stream(0)
triton_per_fused_cos_mean_mul_pow_rsub_sub_sum_0[grid(1)](buf4,
buf5, arg2_1, arg3_1, arg0_1, arg1_1, 1, 64, XBLOCK=1,
num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
return buf4, buf5
class PFLDLossNew(nn.Module):
"""Weighted loss of L2 distance with the pose angle for PFLD."""
def __init__(self):
super(PFLDLossNew, self).__init__()
def forward(self, input_0, input_1, input_2, input_3):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0], output[1]
|
rmfan/nni
|
PFLDLoss
| false
| 10,937
|
[
"MIT"
] | 0
|
727ee1ce47e070061fe3dab8a2da5d3cd5e55546
|
https://github.com/rmfan/nni/tree/727ee1ce47e070061fe3dab8a2da5d3cd5e55546
|
ComputeDeltas
|
import torch
from torch import Tensor
import torchaudio.functional as F
class ComputeDeltas(torch.nn.Module):
"""Compute delta coefficients of a tensor, usually a spectrogram.
See `torchaudio.functional.compute_deltas` for more details.
Args:
win_length (int): The window length used for computing delta. (Default: ``5``)
mode (str): Mode parameter passed to padding. (Default: ``'replicate'``)
"""
__constants__ = ['win_length']
def __init__(self, win_length: 'int'=5, mode: 'str'='replicate') ->None:
super(ComputeDeltas, self).__init__()
self.win_length = win_length
self.mode = mode
def forward(self, specgram: 'Tensor') ->Tensor:
"""
Args:
specgram (Tensor): Tensor of audio of dimension (..., freq, time).
Returns:
Tensor: Tensor of deltas of dimension (..., freq, time).
"""
return F.compute_deltas(specgram, win_length=self.win_length, mode=
self.mode)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_replication_pad1d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4 * x1 + (3 * (3 <= 0 * (0 >= -2 + x0) + (-2 +
x0) * (-2 + x0 > 0)) + (0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 >
0)) * (0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0) < 3))), xmask,
eviction_policy='evict_last')
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_arange_repeat_1(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x2 = xindex
tmp0 = -2 + x0
tmp1 = tmp0.to(tl.float32)
tl.store(out_ptr0 + x2, tmp1, xmask)
@triton.jit
def triton_poi_fused_div_2(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = 0.1
tmp2 = tmp0 * tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 64, 8), (512, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_replication_pad1d_0[grid(512)](arg0_1, buf0, 512,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((64, 1, 5), (5, 5, 1), torch.float32)
triton_poi_fused_arange_repeat_1[grid(320)](buf1, 320, XBLOCK=256,
num_warps=4, num_stages=1)
buf2 = extern_kernels.convolution(buf0, buf1, stride=(1,), padding=
(0,), dilation=(1,), transposed=False, output_padding=(0,),
groups=64, bias=None)
assert_size_stride(buf2, (1, 64, 4), (256, 4, 1))
del buf0
del buf1
buf3 = buf2
del buf2
triton_poi_fused_div_2[grid(256)](buf3, 256, XBLOCK=128, num_warps=
4, num_stages=1)
return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0),
class ComputeDeltasNew(torch.nn.Module):
"""Compute delta coefficients of a tensor, usually a spectrogram.
See `torchaudio.functional.compute_deltas` for more details.
Args:
win_length (int): The window length used for computing delta. (Default: ``5``)
mode (str): Mode parameter passed to padding. (Default: ``'replicate'``)
"""
__constants__ = ['win_length']
def __init__(self, win_length: 'int'=5, mode: 'str'='replicate') ->None:
super(ComputeDeltasNew, self).__init__()
self.win_length = win_length
self.mode = mode
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
tbright17/audio
|
ComputeDeltas
| false
| 10,938
|
[
"BSD-2-Clause"
] | 0
|
00d38203e401b8d9472a8f8394a10e2c309be02c
|
https://github.com/tbright17/audio/tree/00d38203e401b8d9472a8f8394a10e2c309be02c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.