entry_point
stringlengths 1
65
| original_triton_python_code
stringlengths 208
619k
| optimised_triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
listlengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
|
|---|---|---|---|---|---|---|---|---|---|---|
SimpleConv
|
import torch
import torch.nn as nn
class SimpleConv(nn.Module):
def __init__(self, in_size):
super(SimpleConv, self).__init__()
self.conv = nn.Conv2d(in_size, 6, 3, padding='same')
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv(x)
x = self.relu(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 6
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr0 + x3, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (6, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (6,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 6, 4, 4), (96, 16, 4, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 6, 4, 4), (96, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_convolution_relu_threshold_backward_0[grid(384)](buf1,
primals_2, buf2, 384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return buf1, primals_1, primals_3, buf2
class SimpleConvNew(nn.Module):
def __init__(self, in_size):
super(SimpleConvNew, self).__init__()
self.conv = nn.Conv2d(in_size, 6, 3, padding='same')
self.relu = nn.ReLU()
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
msc5/ml-tools
|
SimpleConv
| false
| 4,033
|
[
"Apache-2.0"
] | 0
|
75ca504bdc0495e8a929ad73501b7de692b3089a
|
https://github.com/msc5/ml-tools/tree/75ca504bdc0495e8a929ad73501b7de692b3089a
|
_Decoder
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
class _Decoder(nn.Module):
def __init__(self, z_dim):
super(_Decoder, self).__init__()
self.fc1 = nn.Linear(z_dim, 600)
self.fc2 = nn.Linear(600, 600)
self.fc3 = nn.Linear(600, 784)
def forward(self, z):
h = F.relu(self.fc1(z))
h = F.relu(self.fc2(h))
h = torch.sigmoid(self.fc3(h))
return h
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'z_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 38400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 600
x2 = xindex % 2400
x3 = xindex // 2400
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x2 + 2432 * x3), tmp6, xmask)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 50176
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 784
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (600, 4), (4, 1))
assert_size_stride(primals_2, (600,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (600, 600), (600, 1))
assert_size_stride(primals_5, (600,), (1,))
assert_size_stride(primals_6, (784, 600), (600, 1))
assert_size_stride(primals_7, (784,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 600), (600, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 600), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 600), (9600, 2400, 600, 1), 0
)
del buf0
buf7 = empty_strided_cuda((4, 4, 4, 600), (9728, 2432, 600, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(38400)](buf1,
primals_2, buf7, 38400, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 600), (600, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 600), (600, 1), 0),
reinterpret_tensor(primals_4, (600, 600), (1, 600), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 600), (9600, 2400, 600, 1), 0
)
del buf2
buf6 = empty_strided_cuda((4, 4, 4, 600), (9728, 2432, 600, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(38400)](buf3,
primals_5, buf6, 38400, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 784), (784, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 600), (600, 1), 0),
reinterpret_tensor(primals_6, (600, 784), (1, 600), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 784), (12544, 3136, 784,
1), 0)
del buf4
triton_poi_fused_sigmoid_1[grid(50176)](buf5, primals_7, 50176,
XBLOCK=512, num_warps=4, num_stages=1)
del primals_7
return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 600), (600, 1), 0
), reinterpret_tensor(buf3, (64, 600), (600, 1), 0
), buf5, primals_6, buf6, primals_4, buf7
class _DecoderNew(nn.Module):
def __init__(self, z_dim):
super(_DecoderNew, self).__init__()
self.fc1 = nn.Linear(z_dim, 600)
self.fc2 = nn.Linear(600, 600)
self.fc3 = nn.Linear(600, 784)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
mori97/revae
|
_Decoder
| false
| 4,034
|
[
"MIT"
] | 0
|
465009076a9be78e8ddb9021a0699b32fc695f30
|
https://github.com/mori97/revae/tree/465009076a9be78e8ddb9021a0699b32fc695f30
|
AGELU
|
import math
import torch
import torch.utils.data
import torch.cuda
import torch.utils.checkpoint
def agelu(x):
SQRT_M2_PI = math.sqrt(2 / math.pi)
COEFF = 0.044715
return 0.5 * x * (1.0 + torch.tanh(SQRT_M2_PI * (x + COEFF * torch.pow(
x, 3))))
class AGELU(torch.nn.Module):
def forward(self, input):
return agelu(input)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.utils.data
import torch.cuda
import torch.utils.checkpoint
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_pow_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tmp0 * tmp0
tmp4 = tmp3 * tmp0
tmp5 = 0.044715
tmp6 = tmp4 * tmp5
tmp7 = tmp0 + tmp6
tmp8 = 0.7978845608028654
tmp9 = tmp7 * tmp8
tmp10 = libdevice.tanh(tmp9)
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp2 * tmp12
tl.store(out_ptr0 + x0, tmp13, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_pow_tanh_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
def agelu(x):
SQRT_M2_PI = math.sqrt(2 / math.pi)
COEFF = 0.044715
return 0.5 * x * (1.0 + torch.tanh(SQRT_M2_PI * (x + COEFF * torch.pow(
x, 3))))
class AGELUNew(torch.nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
mullovc/NMTGMinor
|
AGELU
| false
| 4,035
|
[
"MIT"
] | 0
|
b1b7b1e018eaa0d99a43449655937cc050a29987
|
https://github.com/mullovc/NMTGMinor/tree/b1b7b1e018eaa0d99a43449655937cc050a29987
|
LinReLU
|
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
class LinReLU(torch.nn.Module):
__constants__ = ['bias']
def __init__(self, in_features: 'int', out_features: 'int') ->None:
super(LinReLU, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weights = Parameter(torch.Tensor(in_features, out_features))
self.bias = Parameter(torch.Tensor(in_features))
self.reset_parameters()
def reset_parameters(self) ->None:
nn.init.xavier_uniform_(self.weights)
torch.nn.init.trunc_normal_(self.bias, std=0.5)
def forward(self, inputs: 'torch.Tensor') ->torch.Tensor:
output = (inputs - self.bias) @ self.weights
output = F.relu(output)
return output
def extra_repr(self):
return (
f'in_features={self.in_features}, out_features={self.out_features}'
)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_sub_0[grid(256)](primals_2, primals_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
primals_3, out=buf1)
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(256)](buf2, buf3,
256, XBLOCK=128, num_warps=4, num_stages=1)
return buf2, buf3, reinterpret_tensor(buf0, (4, 64), (1, 4), 0
), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0)
class LinReLUNew(torch.nn.Module):
__constants__ = ['bias']
def __init__(self, in_features: 'int', out_features: 'int') ->None:
super(LinReLUNew, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weights = Parameter(torch.Tensor(in_features, out_features))
self.bias = Parameter(torch.Tensor(in_features))
self.reset_parameters()
def reset_parameters(self) ->None:
nn.init.xavier_uniform_(self.weights)
torch.nn.init.trunc_normal_(self.bias, std=0.5)
def extra_repr(self):
return (
f'in_features={self.in_features}, out_features={self.out_features}'
)
def forward(self, input_0):
primals_3 = self.weights
primals_1 = self.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
mrahman93/nam
|
LinReLU
| false
| 4,036
|
[
"MIT"
] | 0
|
1a2f286a87ffa024040e3330088b4a375700c1c6
|
https://github.com/mrahman93/nam/tree/1a2f286a87ffa024040e3330088b4a375700c1c6
|
ExU
|
import torch
import torch.nn.functional as F
from torch.nn.parameter import Parameter
class ExU(torch.nn.Module):
def __init__(self, in_features: 'int', out_features: 'int') ->None:
super(ExU, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weights = Parameter(torch.Tensor(in_features, out_features))
self.bias = Parameter(torch.Tensor(in_features))
self.reset_parameters()
def reset_parameters(self) ->None:
torch.nn.init.trunc_normal_(self.weights, mean=4.0, std=0.5)
torch.nn.init.trunc_normal_(self.bias, std=0.5)
def forward(self, inputs: 'torch.Tensor', n: 'int'=1) ->torch.Tensor:
output = (inputs - self.bias).matmul(torch.exp(self.weights))
output = F.relu(output)
output = torch.clamp(output, 0, n)
return output
def extra_repr(self):
return (
f'in_features={self.in_features}, out_features={self.out_features}'
)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_exp_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl_math.exp(tmp0)
tl.store(out_ptr0 + x0, tmp1, xmask)
@triton.jit
def triton_poi_fused_sub_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_clamp_ge_le_logical_and_relu_threshold_backward_2(in_ptr0,
out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 1.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = tmp2 >= tmp3
tmp8 = tmp2 <= tmp5
tmp9 = tmp7 & tmp8
tmp10 = tmp2 <= tmp3
tl.store(out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr1 + x0, tmp9, xmask)
tl.store(out_ptr2 + x0, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_exp_0[grid(16)](primals_3, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_3
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_sub_1[grid(256)](primals_2, primals_1, buf1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
buf0, out=buf2)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_clamp_ge_le_logical_and_relu_threshold_backward_2[grid
(256)](buf2, buf3, buf4, buf5, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf2
return buf3, buf0, buf4, buf5, reinterpret_tensor(buf1, (4, 64), (1, 4), 0)
class ExUNew(torch.nn.Module):
def __init__(self, in_features: 'int', out_features: 'int') ->None:
super(ExUNew, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weights = Parameter(torch.Tensor(in_features, out_features))
self.bias = Parameter(torch.Tensor(in_features))
self.reset_parameters()
def reset_parameters(self) ->None:
torch.nn.init.trunc_normal_(self.weights, mean=4.0, std=0.5)
torch.nn.init.trunc_normal_(self.bias, std=0.5)
def extra_repr(self):
return (
f'in_features={self.in_features}, out_features={self.out_features}'
)
def forward(self, input_0):
primals_3 = self.weights
primals_1 = self.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
mrahman93/nam
|
ExU
| false
| 4,037
|
[
"MIT"
] | 0
|
1a2f286a87ffa024040e3330088b4a375700c1c6
|
https://github.com/mrahman93/nam/tree/1a2f286a87ffa024040e3330088b4a375700c1c6
|
ReLUDropout
|
import torch
import torch.utils.data
import torch.cuda
import torch.utils.checkpoint
def relu_dropout(x, p=0, training=False, variational=False, batch_first=False):
if not training or p == 0:
return x.clamp_(min=0)
p1m = 1 - p
if variational:
if batch_first:
mask = torch.rand_like(x[:, 0, :]) > p1m
mask = mask.unsqueeze(1).repeat(1, x.size(1), 1)
else:
mask = torch.rand_like(x[0]) > p1m
mask = mask.unsqueeze(0).repeat(x.size(0), 1, 1)
else:
mask = torch.rand_like(x) > p1m
mask |= x < 0
return x.masked_fill_(mask, 0).div_(p1m)
class ReLUDropout(torch.nn.Dropout):
def __init__(self, p=0.5, variational=False, batch_first=False, inplace
=False):
super().__init__(p, inplace=True)
self.variational = variational
self.batch_first = batch_first
def forward(self, input):
return relu_dropout(input, p=self.p, training=self.training,
variational=self.variational, batch_first=self.batch_first)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
import torch.cuda
import torch.utils.checkpoint
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_clamp_0(in_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tl.store(out_ptr1 + x0, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
get_raw_stream(0)
triton_poi_fused_clamp_0[grid(256)](arg0_1, arg0_1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
return arg0_1,
def relu_dropout(x, p=0, training=False, variational=False, batch_first=False):
if not training or p == 0:
return x.clamp_(min=0)
p1m = 1 - p
if variational:
if batch_first:
mask = torch.rand_like(x[:, 0, :]) > p1m
mask = mask.unsqueeze(1).repeat(1, x.size(1), 1)
else:
mask = torch.rand_like(x[0]) > p1m
mask = mask.unsqueeze(0).repeat(x.size(0), 1, 1)
else:
mask = torch.rand_like(x) > p1m
mask |= x < 0
return x.masked_fill_(mask, 0).div_(p1m)
class ReLUDropoutNew(torch.nn.Dropout):
def __init__(self, p=0.5, variational=False, batch_first=False, inplace
=False):
super().__init__(p, inplace=True)
self.variational = variational
self.batch_first = batch_first
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
mullovc/NMTGMinor
|
ReLUDropout
| false
| 4,038
|
[
"MIT"
] | 0
|
b1b7b1e018eaa0d99a43449655937cc050a29987
|
https://github.com/mullovc/NMTGMinor/tree/b1b7b1e018eaa0d99a43449655937cc050a29987
|
MLMTaskHead
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Linear
from torch.nn import LayerNorm
class MLMTaskHead(nn.Module):
def __init__(self, ntoken, ninp):
super().__init__()
self.mlm_span = Linear(ninp, ninp)
self.activation = F.gelu
self.norm_layer = LayerNorm(ninp, eps=1e-12)
self.mlm_head = Linear(ninp, ntoken)
def forward(self, src):
output = self.mlm_span(src)
output = self.activation(output)
output = self.norm_layer(output)
output = self.mlm_head(output)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'ntoken': 4, 'ninp': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Linear
from torch.nn import LayerNorm
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_gelu_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp23 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tmp10 = tmp9 * tmp1
tmp11 = tmp9 * tmp3
tmp12 = libdevice.erf(tmp11)
tmp13 = tmp12 + tmp6
tmp14 = tmp10 * tmp13
tmp15 = tmp8 + tmp14
tmp17 = tmp16 * tmp1
tmp18 = tmp16 * tmp3
tmp19 = libdevice.erf(tmp18)
tmp20 = tmp19 + tmp6
tmp21 = tmp17 * tmp20
tmp22 = tmp15 + tmp21
tmp24 = tmp23 * tmp1
tmp25 = tmp23 * tmp3
tmp26 = libdevice.erf(tmp25)
tmp27 = tmp26 + tmp6
tmp28 = tmp24 * tmp27
tmp29 = tmp22 + tmp28
tmp30 = 4.0
tmp31 = tmp29 / tmp30
tmp32 = tmp8 - tmp31
tmp33 = tmp32 * tmp32
tmp34 = tmp14 - tmp31
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp21 - tmp31
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp28 - tmp31
tmp41 = tmp40 * tmp40
tmp42 = tmp39 + tmp41
tmp43 = tmp42 / tmp30
tl.store(out_ptr0 + x0, tmp31, xmask)
tl.store(out_ptr1 + x0, tmp43, xmask)
@triton.jit
def triton_poi_fused_gelu_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp9 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tmp10 = tmp8 - tmp9
tmp12 = 1e-12
tmp13 = tmp11 + tmp12
tmp14 = libdevice.rsqrt(tmp13)
tmp15 = tmp10 * tmp14
tmp17 = tmp15 * tmp16
tmp19 = tmp17 + tmp18
tl.store(out_ptr0 + x2, tmp19, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_gelu_native_layer_norm_0[grid(64)](buf0, buf1,
buf2, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_gelu_native_layer_norm_1[grid(256)](buf0, buf1,
buf2, primals_4, primals_5, buf3, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del buf1
del buf2
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_7
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), primals_6
class MLMTaskHeadNew(nn.Module):
def __init__(self, ntoken, ninp):
super().__init__()
self.mlm_span = Linear(ninp, ninp)
self.activation = F.gelu
self.norm_layer = LayerNorm(ninp, eps=1e-12)
self.mlm_head = Linear(ninp, ntoken)
def forward(self, input_0):
primals_1 = self.mlm_span.weight
primals_2 = self.mlm_span.bias
primals_4 = self.norm_layer.weight
primals_5 = self.norm_layer.bias
primals_6 = self.mlm_head.weight
primals_7 = self.mlm_head.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
mrshenli/pipeline_experiments
|
MLMTaskHead
| false
| 4,039
|
[
"MIT"
] | 0
|
09386ab70386a1f4b49ae078c132f4037a887f9b
|
https://github.com/mrshenli/pipeline_experiments/tree/09386ab70386a1f4b49ae078c132f4037a887f9b
|
SimpleTextClassifier
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class SimpleTextClassifier(nn.Module):
"""Text Classifier with 1 hidden layer
"""
def __init__(self, num_labels, vocab_size):
super(SimpleTextClassifier, self).__init__()
self.linear1 = nn.Linear(vocab_size, 128)
self.linear2 = nn.Linear(128, num_labels)
def forward(self, feature_vec):
hidden1 = self.linear1(feature_vec).clamp(min=0)
output = self.linear2(hidden1)
return F.log_softmax(output, dim=1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_labels': 4, 'vocab_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clamp_ge_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = tmp2 >= tmp3
tl.store(out_ptr0 + x2, tmp4, None)
tl.store(out_ptr1 + x2, tmp5, None)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x3, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (128, 4), (4, 1))
assert_size_stride(primals_2, (128,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 128), (128, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.float32)
buf5 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_clamp_ge_0[grid(8192)](buf0, primals_2, buf1, buf5,
8192, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 128),
(128, 1), 0), reinterpret_tensor(primals_4, (128, 4), (1, 128),
0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__log_softmax_1[grid(256)](buf2, buf3, 256, XBLOCK=
256, num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused__log_softmax_2[grid(256)](buf3, buf4, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del buf3
return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 128), (128, 1), 0
), buf4, primals_4, buf5
class SimpleTextClassifierNew(nn.Module):
"""Text Classifier with 1 hidden layer
"""
def __init__(self, num_labels, vocab_size):
super(SimpleTextClassifierNew, self).__init__()
self.linear1 = nn.Linear(vocab_size, 128)
self.linear2 = nn.Linear(128, num_labels)
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
mtfelix/pytorch_active_learning
|
SimpleTextClassifier
| false
| 4,040
|
[
"MIT"
] | 0
|
495f20c9cf5100cf2a100f4a4c6103e05fb62ca2
|
https://github.com/mtfelix/pytorch_active_learning/tree/495f20c9cf5100cf2a100f4a4c6103e05fb62ca2
|
ScaledDotProductAttention
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class ScaledDotProductAttention(nn.Module):
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
def forward(self, q, k, v, mask=None):
attn = torch.matmul(q / self.temperature, k.transpose(2, 3))
if mask is not None:
mask = mask.unsqueeze(1)
attn = attn.masked_fill(mask == 0, -1000000000.0)
attn = self.dropout(F.softmax(attn, dim=-1))
output = torch.matmul(attn, v)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'temperature': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.25
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(arg1_1, (16, 4, 4), (16, 1, 4), 0), out=buf1
)
del arg1_1
buf2 = buf0
del buf0
triton_poi_fused__softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
triton_poi_fused__softmax_2[grid(256)](buf2, buf3, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf4
)
del arg2_1
del buf3
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0),
class ScaledDotProductAttentionNew(nn.Module):
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
muberraozmen/MrMP
|
ScaledDotProductAttention
| false
| 4,041
|
[
"MIT"
] | 0
|
da6bcccbad85a682c848ff4aa1121c773d779e57
|
https://github.com/muberraozmen/MrMP/tree/da6bcccbad85a682c848ff4aa1121c773d779e57
|
Gaussian
|
import torch
from torch import Tensor
import torch.utils.tensorboard
import torch.utils.data
class Gaussian(torch.nn.Module):
"""Gaussian activation"""
def forward(self, x: 'Tensor') ->Tensor:
return torch.exp(-x * x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.utils.tensorboard
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_exp_mul_neg_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = -tmp0
tmp2 = tmp1 * tmp0
tmp3 = tl_math.exp(tmp2)
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_exp_mul_neg_0[grid(256)](arg0_1, buf0, 256, XBLOCK
=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class GaussianNew(torch.nn.Module):
"""Gaussian activation"""
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
cdever01/torchani
|
Gaussian
| false
| 4,042
|
[
"MIT"
] | 0
|
3f7e1347a06422f50010c04a65219e22f2179bfa
|
https://github.com/cdever01/torchani/tree/3f7e1347a06422f50010c04a65219e22f2179bfa
|
Net
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 32, 5)
self.conv2 = nn.Conv2d(32, 64, 5)
self.conv3 = nn.Conv2d(64, 128, 5)
self.fc1 = nn.Linear(512, 512)
self.fc2 = nn.Linear(512, 2)
def forward(self, x):
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv2(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv3(x)), (2, 2))
x = x.view(-1, 512)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.softmax(x, dim=1)
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 32 * x2 + 800 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 64 * x2 + 1600 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 128
xnumel = 3600
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 3600 * y3), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (y0 + 32 * x2 + 115200 * y1), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 115200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 32
x1 = xindex // 32 % 30
x2 = xindex // 960
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1 + 3840 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (32 + x0 + 64 * x1 + 3840 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (1920 + x0 + 64 * x1 + 3840 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (1952 + x0 + 64 * x1 + 3840 * x2), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr1 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 173056
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 43264
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = xindex // 64 % 13
x2 = xindex // 832
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 3328 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 3328 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (1664 + x0 + 128 * x1 + 3328 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (1728 + x0 + 128 * x1 + 3328 * x2), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr1 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 41472
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_7(in_ptr0, out_ptr0, out_ptr1,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 128
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x3 = xindex
y0 = yindex % 4
y1 = yindex // 4 % 4
y2 = yindex // 16
y4 = yindex
y5 = yindex % 16
tmp0 = tl.load(in_ptr0 + (x3 + 256 * y0 + 2304 * y1 + 10368 * y2),
xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (128 + x3 + 256 * y0 + 2304 * y1 + 10368 * y2),
xmask & ymask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (1152 + x3 + 256 * y0 + 2304 * y1 + 10368 * y2
), xmask & ymask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (1280 + x3 + 256 * y0 + 2304 * y1 + 10368 *
y2), xmask & ymask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1, 1], 1, tl.int8)
tmp4 = tl.full([1, 1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1, 1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1, 1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x3 + 128 * y4), tmp15, xmask & ymask)
tl.store(out_ptr1 + (y5 + 16 * x3 + 2048 * y2), tmp16, xmask & ymask)
@triton.jit
def triton_poi_fused_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused__softmax_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 2
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 2 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 2 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp1 - tmp3
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp2 - tmp3
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tmp5 / tmp10
tl.store(out_ptr0 + x2, tmp11, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (32, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (64, 32, 5, 5), (800, 25, 5, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (128, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (512, 512), (512, 1))
assert_size_stride(primals_9, (512,), (1,))
assert_size_stride(primals_10, (2, 512), (512, 1))
assert_size_stride(primals_11, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 32, 5, 5), (800, 1, 160, 32), torch.
float32)
get_raw_stream(0)
triton_poi_fused_0[grid(2048, 25)](primals_4, buf0, 2048, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_4
buf1 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64),
torch.float32)
triton_poi_fused_1[grid(8192, 25)](primals_6, buf1, 8192, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_6
buf2 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 60, 60), (115200, 3600, 60, 1))
buf3 = empty_strided_cuda((4, 32, 60, 60), (115200, 1, 1920, 32),
torch.float32)
triton_poi_fused_convolution_relu_2[grid(128, 3600)](buf2,
primals_2, buf3, 128, 3600, XBLOCK=32, YBLOCK=32, num_warps=4,
num_stages=1)
del buf2
del primals_2
buf4 = empty_strided_cuda((4, 32, 30, 30), (28800, 1, 960, 32),
torch.float32)
buf5 = empty_strided_cuda((4, 32, 30, 30), (28800, 1, 960, 32),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_3[grid(115200)](buf3, buf4,
buf5, 115200, XBLOCK=512, num_warps=8, num_stages=1)
buf6 = extern_kernels.convolution(buf4, buf0, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 64, 26, 26), (43264, 1, 1664, 64))
buf7 = buf6
del buf6
triton_poi_fused_convolution_relu_4[grid(173056)](buf7, primals_5,
173056, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf8 = empty_strided_cuda((4, 64, 13, 13), (10816, 1, 832, 64),
torch.float32)
buf9 = empty_strided_cuda((4, 64, 13, 13), (10816, 1, 832, 64),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_5[grid(43264)](buf7, buf8,
buf9, 43264, XBLOCK=512, num_warps=4, num_stages=1)
buf10 = extern_kernels.convolution(buf8, buf1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 128, 9, 9), (10368, 1, 1152, 128))
buf11 = buf10
del buf10
triton_poi_fused_convolution_relu_6[grid(41472)](buf11, primals_7,
41472, XBLOCK=512, num_warps=4, num_stages=1)
del primals_7
buf12 = empty_strided_cuda((4, 128, 4, 4), (2048, 1, 512, 128),
torch.int8)
buf13 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.
float32)
triton_poi_fused_max_pool2d_with_indices_7[grid(64, 128)](buf11,
buf12, buf13, 64, 128, XBLOCK=128, YBLOCK=2, num_warps=4,
num_stages=1)
buf14 = empty_strided_cuda((16, 512), (512, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf13, (16, 512), (512, 1), 0),
reinterpret_tensor(primals_8, (512, 512), (1, 512), 0), out=buf14)
buf15 = buf14
del buf14
triton_poi_fused_relu_8[grid(8192)](buf15, primals_9, 8192, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_9
buf16 = empty_strided_cuda((16, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_11, buf15, reinterpret_tensor(
primals_10, (512, 2), (1, 512), 0), alpha=1, beta=1, out=buf16)
del primals_11
buf17 = empty_strided_cuda((16, 2), (2, 1), torch.float32)
triton_poi_fused__softmax_9[grid(32)](buf16, buf17, 32, XBLOCK=32,
num_warps=1, num_stages=1)
del buf16
return (buf17, primals_1, primals_3, buf0, buf1, buf3, buf4, buf5, buf7,
buf8, buf9, buf11, buf12, reinterpret_tensor(buf13, (16, 512), (512,
1), 0), buf15, buf17, primals_10, primals_8)
class NetNew(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 32, 5)
self.conv2 = nn.Conv2d(32, 64, 5)
self.conv3 = nn.Conv2d(64, 128, 5)
self.fc1 = nn.Linear(512, 512)
self.fc2 = nn.Linear(512, 2)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.fc1.weight
primals_9 = self.fc1.bias
primals_10 = self.fc2.weight
primals_11 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
mmayers88/learn_pytorch
|
Net
| false
| 4,043
|
[
"MIT"
] | 0
|
0dbc1aed24d869109feb23bfa6e970686cf485e3
|
https://github.com/mmayers88/learn_pytorch/tree/0dbc1aed24d869109feb23bfa6e970686cf485e3
|
AttNLocalNew
|
import torch
import torch.nn as nn
class AttNLocalNew(nn.Module):
"""
自动限制矩阵
实现斜对角线保留权重,其他的设为-inf
"""
def __init__(self, maxlen=128, limit=20):
super(AttNLocalNew, self).__init__()
self.limit = limit
self.maxlen = maxlen
pass
def forward(self, x):
mask = torch.ones_like(x).tril(diagonal=-1) + torch.ones_like(x).triu(
diagonal=self.limit)
x[mask == 1] = -float('Inf')
return x
pass
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_index_put_lift_fresh_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x3 = xindex
tmp11 = tl.load(in_ptr0 + x3, xmask)
tmp0 = x0 + -1 * x1
tmp1 = tl.full([1], -1, tl.int64)
tmp2 = tmp0 <= tmp1
tmp3 = 1.0
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = tl.full([1], 20, tl.int64)
tmp7 = tmp0 >= tmp6
tmp8 = tl.where(tmp7, tmp3, tmp4)
tmp9 = tmp5 + tmp8
tmp10 = tmp9 == tmp3
tmp12 = float('-inf')
tmp13 = tl.where(tmp10, tmp12, tmp11)
tl.store(out_ptr0 + x3, tmp13, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
get_raw_stream(0)
triton_poi_fused_index_put_lift_fresh_0[grid(256)](arg0_1, arg0_1,
256, XBLOCK=256, num_warps=4, num_stages=1)
return arg0_1,
class AttNLocalNewNew(nn.Module):
"""
自动限制矩阵
实现斜对角线保留权重,其他的设为-inf
"""
def __init__(self, maxlen=128, limit=20):
super(AttNLocalNewNew, self).__init__()
self.limit = limit
self.maxlen = maxlen
pass
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
napoler/tkit-attnlocal-pytorch
|
AttNLocalNew
| false
| 4,044
|
[
"Apache-2.0"
] | 0
|
ec1c32cb49635824f978b3ec19b4c80505ea735b
|
https://github.com/napoler/tkit-attnlocal-pytorch/tree/ec1c32cb49635824f978b3ec19b4c80505ea735b
|
my_MLP2
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class my_MLP2(nn.Module):
def __init__(self, input_dim, output_dim, softmax_type='vanilla'):
super().__init__()
self.input = nn.Linear(input_dim, 128)
self.hidden1 = nn.Linear(128, 128)
self.hidden2 = nn.Linear(128, 128)
self.hidden3 = nn.Linear(128, 128)
self.output = nn.Linear(128, output_dim)
self.softmax = nn.Softmax(dim=1)
self.softmax_type = softmax_type
self.hyp = nn.Linear(128, 1)
def forward(self, parameters):
l_1 = self.input(parameters)
l_2 = F.relu(self.hidden1(l_1))
l_3 = F.relu(self.hidden2(l_2))
l_4 = F.relu(self.hidden3(l_3))
if self.softmax_type == 'vanilla':
w_pred = self.softmax(self.output(l_4))
elif self.softmax_type == 'radius_one':
w_pred = self.softmax(self.output(l_4)) * 2 - 1
else:
w_pred = self.output(l_4)
hyp = torch.sigmoid(self.hyp(l_3)) * 5 + 1
return w_pred, hyp
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'output_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_3(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = 5.0
tmp3 = tmp1 * tmp2
tmp4 = 1.0
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (128, 4), (4, 1))
assert_size_stride(primals_2, (128,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (128, 128), (128, 1))
assert_size_stride(primals_5, (128,), (1,))
assert_size_stride(primals_6, (128, 128), (128, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (128, 128), (128, 1))
assert_size_stride(primals_9, (128,), (1,))
assert_size_stride(primals_10, (4, 128), (128, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (1, 128), (128, 1))
assert_size_stride(primals_13, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4),
0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_4, (128, 128), (
1, 128), 0), out=buf1)
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf1
buf15 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf2,
primals_5, buf15, 8192, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf3 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (64, 128), (128, 1), 0),
reinterpret_tensor(primals_6, (128, 128), (1, 128), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf3
buf14 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf4,
primals_7, buf14, 8192, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf5 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf4, (64, 128), (128, 1), 0),
reinterpret_tensor(primals_8, (128, 128), (1, 128), 0), out=buf5)
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf5
buf13 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf6,
primals_9, buf13, 8192, XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_11, reinterpret_tensor(buf6, (64, 128),
(128, 1), 0), reinterpret_tensor(primals_10, (128, 4), (1, 128),
0), alpha=1, beta=1, out=buf7)
del primals_11
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf7, buf8, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf9 = reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf7
triton_poi_fused__softmax_2[grid(256)](buf8, buf9, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf8
buf11 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_13, reinterpret_tensor(buf4, (64, 128),
(128, 1), 0), reinterpret_tensor(primals_12, (128, 1), (1, 128),
0), alpha=1, beta=1, out=buf11)
del primals_13
buf12 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_add_mul_sigmoid_3[grid(64)](buf11, buf12, 64,
XBLOCK=64, num_warps=1, num_stages=1)
return (buf9, buf12, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
buf0, reinterpret_tensor(buf2, (64, 128), (128, 1), 0),
reinterpret_tensor(buf4, (64, 128), (128, 1), 0),
reinterpret_tensor(buf6, (64, 128), (128, 1), 0), buf9, buf11,
primals_12, primals_10, buf13, primals_8, buf14, primals_6, buf15,
primals_4)
class my_MLP2New(nn.Module):
def __init__(self, input_dim, output_dim, softmax_type='vanilla'):
super().__init__()
self.input = nn.Linear(input_dim, 128)
self.hidden1 = nn.Linear(128, 128)
self.hidden2 = nn.Linear(128, 128)
self.hidden3 = nn.Linear(128, 128)
self.output = nn.Linear(128, output_dim)
self.softmax = nn.Softmax(dim=1)
self.softmax_type = softmax_type
self.hyp = nn.Linear(128, 1)
def forward(self, input_0):
primals_1 = self.input.weight
primals_2 = self.input.bias
primals_4 = self.hidden1.weight
primals_5 = self.hidden1.bias
primals_6 = self.hidden2.weight
primals_7 = self.hidden2.bias
primals_8 = self.hidden3.weight
primals_9 = self.hidden3.bias
primals_10 = self.output.weight
primals_11 = self.output.bias
primals_12 = self.hyp.weight
primals_13 = self.hyp.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0], output[1]
|
mtcarilli/CME_approximations
|
my_MLP2
| false
| 4,045
|
[
"MIT"
] | 0
|
1ffd1cc0bd17679116964ee33634c0d76c50064e
|
https://github.com/mtcarilli/CME_approximations/tree/1ffd1cc0bd17679116964ee33634c0d76c50064e
|
my_MLP1
|
import torch
import torch.nn as nn
class my_MLP1(nn.Module):
def __init__(self, input_dim, npdf, h1_dim, h2_dim, norm_type='softmax'):
super().__init__()
self.input = nn.Linear(input_dim, h1_dim)
self.hidden = nn.Linear(h1_dim, h2_dim)
self.output = nn.Linear(h2_dim, npdf)
self.hyp = nn.Linear(h2_dim, 1)
self.softmax = nn.Softmax(dim=1)
self.sigmoid = torch.sigmoid
self.norm_type = norm_type
def forward(self, inputs):
l_1 = self.sigmoid(self.input(inputs))
l_2 = self.sigmoid(self.hidden(l_1))
w_un = self.output(l_2)
hyp = self.sigmoid(self.hyp(l_2))
if self.norm_type == 'softmax':
w_pred = self.softmax(w_un)
elif self.norm_type == 'normalize':
self.sigmoid(w_un)
w_pred = (w_un / w_un.sum(axis=0)).sum(axis=0)
else:
w_pred = torch.abs(self.output(w_un))
return w_pred, hyp
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'npdf': 4, 'h1_dim': 4, 'h2_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (1, 4), (4, 1))
assert_size_stride(primals_9, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_sigmoid_0[grid(256)](buf1, primals_2, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused_sigmoid_0[grid(256)](buf3, primals_5, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 1), (1, 4), 0), out=buf5)
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf5
triton_poi_fused_sigmoid_1[grid(64)](buf6, primals_9, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_9
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(256)](buf4, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf8 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
triton_poi_fused__softmax_3[grid(256)](buf7, buf8, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf7
return buf8, buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, buf3, buf6, buf8, primals_8, primals_6, primals_4
class my_MLP1New(nn.Module):
def __init__(self, input_dim, npdf, h1_dim, h2_dim, norm_type='softmax'):
super().__init__()
self.input = nn.Linear(input_dim, h1_dim)
self.hidden = nn.Linear(h1_dim, h2_dim)
self.output = nn.Linear(h2_dim, npdf)
self.hyp = nn.Linear(h2_dim, 1)
self.softmax = nn.Softmax(dim=1)
self.sigmoid = torch.sigmoid
self.norm_type = norm_type
def forward(self, input_0):
primals_1 = self.input.weight
primals_2 = self.input.bias
primals_4 = self.hidden.weight
primals_5 = self.hidden.bias
primals_6 = self.output.weight
primals_7 = self.output.bias
primals_8 = self.hyp.weight
primals_9 = self.hyp.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0], output[1]
|
mtcarilli/CME_approximations
|
my_MLP1
| false
| 4,046
|
[
"MIT"
] | 0
|
1ffd1cc0bd17679116964ee33634c0d76c50064e
|
https://github.com/mtcarilli/CME_approximations/tree/1ffd1cc0bd17679116964ee33634c0d76c50064e
|
R2CNNattetion
|
import torch
import torch.nn as nn
import torch.utils.data
class R2CNNattetion(nn.Module):
def __init__(self):
super(R2CNNattetion, self).__init__()
self.pool1 = nn.MaxPool2d(kernel_size=1)
self.pool2 = nn.MaxPool2d(kernel_size=2)
self.pool3 = nn.MaxPool2d(kernel_size=4)
self.deconv2 = nn.ConvTranspose2d(512, 512, kernel_size=(4, 4),
stride=(2, 2), padding=(1, 1))
self.deconv3 = nn.ConvTranspose2d(512, 512, kernel_size=(6, 6),
stride=(4, 4), padding=(1, 1))
def forward(self, x):
x1 = self.pool1(x)
x2 = self.pool2(x)
x3 = self.pool3(x)
x2 = self.deconv2(x2)
x3 = self.deconv3(x3)
x = x1 + x2 + x3
return x
def get_inputs():
return [torch.rand([4, 512, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 16
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 512 * x2 + 8192 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 36
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (x2 + 36 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 512 * x2 + 18432 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_2(in_ptr0, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex % 2
x3 = xindex // 2
y4 = yindex
x5 = xindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (2 * x2 + 8 * x3 + 16 * y4), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x2 + 8 * x3 + 16 * y4), xmask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + 2 * x2 + 8 * x3 + 16 * y4), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (5 + 2 * x2 + 8 * x3 + 16 * y4), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + (y0 + 512 * x5 + 2048 * y1), tmp6, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), None, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), None, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), None, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), None, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), None, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), None, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), None, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), None, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), None, eviction_policy=
'evict_last')
tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), None, eviction_policy=
'evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp20 = triton_helpers.maximum(tmp19, tmp18)
tmp22 = triton_helpers.maximum(tmp21, tmp20)
tmp24 = triton_helpers.maximum(tmp23, tmp22)
tmp26 = triton_helpers.maximum(tmp25, tmp24)
tmp28 = triton_helpers.maximum(tmp27, tmp26)
tmp30 = triton_helpers.maximum(tmp29, tmp28)
tl.store(out_ptr0 + x0, tmp30, None)
@triton.jit
def triton_poi_fused_add_convolution_max_pool2d_with_indices_4(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + (y0 + 512 * x2 + 8192 * y1), xmask,
eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + y0, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (y0 + 512 * x2 + 8192 * y1), xmask,
eviction_policy='evict_last')
tmp6 = tl.load(in_ptr4 + y0, None, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp4 + tmp7
tl.store(out_ptr0 + (x2 + 16 * y3), tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 512, 4, 4), (8192, 16, 4, 1))
assert_size_stride(primals_2, (512, 512, 4, 4), (8192, 16, 4, 1))
assert_size_stride(primals_3, (512,), (1,))
assert_size_stride(primals_4, (512, 512, 6, 6), (18432, 36, 6, 1))
assert_size_stride(primals_5, (512,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((512, 512, 4, 4), (8192, 1, 2048, 512),
torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(262144, 16)](primals_2, buf0, 262144, 16,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((512, 512, 6, 6), (18432, 1, 3072, 512),
torch.float32)
triton_poi_fused_1[grid(262144, 36)](primals_4, buf1, 262144, 36,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_4
buf2 = empty_strided_cuda((4, 512, 2, 2), (2048, 1, 1024, 512),
torch.float32)
triton_poi_fused_max_pool2d_with_indices_2[grid(2048, 4)](primals_1,
buf2, 2048, 4, XBLOCK=4, YBLOCK=256, num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((4, 512, 1, 1), (512, 1, 512, 512), torch
.float32)
triton_poi_fused_max_pool2d_with_indices_3[grid(2048)](primals_1,
buf3, 2048, XBLOCK=128, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf2, buf0, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 512, 4, 4), (8192, 1, 2048, 512))
buf5 = extern_kernels.convolution(buf3, buf1, stride=(4, 4),
padding=(1, 1), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 512, 4, 4), (8192, 1, 2048, 512))
buf6 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.
float32)
triton_poi_fused_add_convolution_max_pool2d_with_indices_4[grid(
2048, 16)](primals_1, buf4, primals_3, buf5, primals_5, buf6,
2048, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del buf4
del buf5
del primals_1
del primals_3
del primals_5
return buf6, buf0, buf1, buf2, buf3
class R2CNNattetionNew(nn.Module):
def __init__(self):
super(R2CNNattetionNew, self).__init__()
self.pool1 = nn.MaxPool2d(kernel_size=1)
self.pool2 = nn.MaxPool2d(kernel_size=2)
self.pool3 = nn.MaxPool2d(kernel_size=4)
self.deconv2 = nn.ConvTranspose2d(512, 512, kernel_size=(4, 4),
stride=(2, 2), padding=(1, 1))
self.deconv3 = nn.ConvTranspose2d(512, 512, kernel_size=(6, 6),
stride=(4, 4), padding=(1, 1))
def forward(self, input_0):
primals_2 = self.deconv2.weight
primals_3 = self.deconv2.bias
primals_4 = self.deconv3.weight
primals_5 = self.deconv3.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
leobean/CenterNet_simple
|
R2CNNattetion
| false
| 4,047
|
[
"MIT"
] | 0
|
13e2eab2c049563afde5defdf90434a310a32d02
|
https://github.com/leobean/CenterNet_simple/tree/13e2eab2c049563afde5defdf90434a310a32d02
|
CustomInverse
|
import torch
class CustomInverse(torch.nn.Module):
def forward(self, x, y):
ress = torch.inverse(x) + x
return ress, torch.all(y)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, ynumel, xnumel, YBLOCK: tl
.constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_out_ptr0 + (x2 + 4 * y3), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.debug_barrier()
tl.store(in_out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_per_fused_all_1(in_out_ptr0, in_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tmp0 != 0
tmp2 = tmp1 == 0
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(triton_helpers.any(tmp3, 0))
tmp6 = tmp5 == 0
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp6, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = torch.ops.aten.linalg_inv_ex.default(arg0_1)
buf1 = buf0[0]
del buf0
buf3 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_add_0[grid(64, 4)](buf3, arg0_1, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
del arg0_1
buf4 = empty_strided_cuda((), (), torch.bool)
buf5 = buf4
del buf4
triton_per_fused_all_1[grid(1)](buf5, arg1_1, 1, 256, num_warps=2,
num_stages=1)
del arg1_1
return buf3, buf5
class CustomInverseNew(torch.nn.Module):
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0], output[1]
|
natke/onnxruntime-extensions
|
CustomInverse
| false
| 4,048
|
[
"MIT"
] | 0
|
e7b7eb596016242a7e913044e889c4a0d7dc1000
|
https://github.com/natke/onnxruntime-extensions/tree/e7b7eb596016242a7e913044e889c4a0d7dc1000
|
Out
|
import torch
from torch import nn
class Out(nn.Module):
def forward(self, out):
out_std = torch.sqrt(out.var(0, unbiased=False) + 1e-08)
mean_std = out_std.mean()
mean_std = mean_std.expand(out.size(0), 1, 4, 4)
out = torch.cat((out, mean_std), 1)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_cat_mean_sqrt_var_0(in_ptr0, out_ptr1, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
r1 = rindex % 16
r2 = rindex // 16
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr0 + (64 + r0), None)
tmp3 = tl.load(in_ptr0 + (128 + r0), None)
tmp5 = tl.load(in_ptr0 + (192 + r0), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-08
tmp22 = tmp20 + tmp21
tmp23 = libdevice.sqrt(tmp22)
tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK])
tmp26 = tl.sum(tmp24, 1)[:, None]
tmp27 = 64.0
tmp28 = tmp26 / tmp27
tl.store(out_ptr1 + tl.broadcast_to(r1 + 80 * r2, [XBLOCK, RBLOCK]),
tmp28, None)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
x1 = xindex // 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tl.store(out_ptr0 + (x0 + 80 * x1), tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf3 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32)
buf2 = reinterpret_tensor(buf3, (4, 1, 4, 4), (80, 16, 4, 1), 64)
get_raw_stream(0)
triton_per_fused_add_cat_mean_sqrt_var_0[grid(1)](arg0_1, buf2, 1,
64, XBLOCK=1, num_warps=2, num_stages=1)
buf1 = reinterpret_tensor(buf3, (4, 4, 4, 4), (80, 16, 4, 1), 0)
triton_poi_fused_cat_1[grid(256)](arg0_1, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf3,
class OutNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
nazarblch/style-based-gan-pytorch
|
Out
| false
| 4,049
|
[
"MIT"
] | 0
|
5ed7fa114904501d77b414921cd9f439773ba24c
|
https://github.com/nazarblch/style-based-gan-pytorch/tree/5ed7fa114904501d77b414921cd9f439773ba24c
|
TwoArgNet
|
import torch
from torch import nn
class TwoArgNet(nn.Module):
def __init__(self, inc, outc):
super().__init__()
self.layer = nn.Linear(inc, outc)
def forward(self, t1, t2):
return self.layer(torch.cat((t1, t2), dim=1)).sigmoid()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'inc': 4, 'outc': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 8
x0 = xindex % 16
x2 = xindex // 128
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask,
other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((128, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (128, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf1, (4, 8, 4, 4), (128, 16, 4, 1), 0)
del buf1
triton_poi_fused_sigmoid_1[grid(512)](buf2, primals_4, 512, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_4
return buf2, reinterpret_tensor(buf0, (128, 4), (4, 1), 0), buf2
class TwoArgNetNew(nn.Module):
def __init__(self, inc, outc):
super().__init__()
self.layer = nn.Linear(inc, outc)
def forward(self, input_0, input_1):
primals_3 = self.layer.weight
primals_4 = self.layer.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
nazarblch/style-based-gan-pytorch
|
TwoArgNet
| false
| 4,050
|
[
"MIT"
] | 0
|
5ed7fa114904501d77b414921cd9f439773ba24c
|
https://github.com/nazarblch/style-based-gan-pytorch/tree/5ed7fa114904501d77b414921cd9f439773ba24c
|
FusedUpsample
|
import torch
from torch import nn
from torch.nn import functional as F
from math import sqrt
class FusedUpsample(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, padding=0):
super().__init__()
weight = torch.randn(in_channel, out_channel, kernel_size, kernel_size)
bias = torch.zeros(out_channel)
fan_in = in_channel * kernel_size * kernel_size
self.multiplier = sqrt(2 / fan_in)
self.weight = nn.Parameter(weight)
self.bias = nn.Parameter(bias)
self.pad = padding
def forward(self, input):
weight = F.pad(self.weight * self.multiplier, [1, 1, 1, 1])
weight = (weight[:, :, 1:, 1:] + weight[:, :, :-1, 1:] + weight[:,
:, 1:, :-1] + weight[:, :, :-1, :-1]) / 4
out = F.conv_transpose2d(input, weight, self.bias, stride=2,
padding=self.pad)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channel': 4, 'out_channel': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
from math import sqrt
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 5 % 5
x0 = xindex % 5
x2 = xindex // 25
x4 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp10 & xmask, other=0.0
)
tmp12 = 0.1767766952966369
tmp13 = tmp11 * tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp10, tmp13, tmp14)
tmp16 = -1 + x1
tmp17 = tmp16 >= tmp1
tmp18 = tmp16 < tmp3
tmp19 = tmp17 & tmp18
tmp20 = tmp19 & tmp6
tmp21 = tmp20 & tmp7
tmp22 = tl.load(in_ptr0 + (-4 + x0 + 4 * x1 + 16 * x2), tmp21 & xmask,
other=0.0)
tmp23 = tmp22 * tmp12
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp21, tmp23, tmp24)
tmp26 = tmp15 + tmp25
tmp27 = -1 + x0
tmp28 = tmp27 >= tmp1
tmp29 = tmp27 < tmp3
tmp30 = tmp8 & tmp28
tmp31 = tmp30 & tmp29
tmp32 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1 + 16 * x2), tmp31 & xmask,
other=0.0)
tmp33 = tmp32 * tmp12
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp31, tmp33, tmp34)
tmp36 = tmp26 + tmp35
tmp37 = tmp19 & tmp28
tmp38 = tmp37 & tmp29
tmp39 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp38 & xmask,
other=0.0)
tmp40 = tmp39 * tmp12
tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype)
tmp42 = tl.where(tmp38, tmp40, tmp41)
tmp43 = tmp36 + tmp42
tmp44 = 0.25
tmp45 = tmp43 * tmp44
tl.store(in_out_ptr0 + x4, tmp45, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 1936
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 121 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_add_div_0[grid(400)](buf1, primals_1, 400, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_1
buf2 = extern_kernels.convolution(primals_3, buf1, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 11, 11), (484, 121, 11, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_1[grid(1936)](buf3, primals_2, 1936,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
return buf3, primals_3, buf1
class FusedUpsampleNew(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, padding=0):
super().__init__()
weight = torch.randn(in_channel, out_channel, kernel_size, kernel_size)
bias = torch.zeros(out_channel)
fan_in = in_channel * kernel_size * kernel_size
self.multiplier = sqrt(2 / fan_in)
self.weight = nn.Parameter(weight)
self.bias = nn.Parameter(bias)
self.pad = padding
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
nazarblch/style-based-gan-pytorch
|
FusedUpsample
| false
| 4,051
|
[
"MIT"
] | 0
|
5ed7fa114904501d77b414921cd9f439773ba24c
|
https://github.com/nazarblch/style-based-gan-pytorch/tree/5ed7fa114904501d77b414921cd9f439773ba24c
|
MultiHeadAttention
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class XavierLinear(nn.Module):
def __init__(self, d_in, d_out, bias=True):
super().__init__()
self.linear = nn.Linear(d_in, d_out, bias=bias)
nn.init.xavier_normal_(self.linear.weight)
def forward(self, x):
return self.linear(x)
class ScaledDotProductAttention(nn.Module):
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
def forward(self, q, k, v, mask=None):
attn = torch.matmul(q / self.temperature, k.transpose(2, 3))
if mask is not None:
mask = mask.unsqueeze(1)
attn = attn.masked_fill(mask == 0, -1000000000.0)
attn = self.dropout(F.softmax(attn, dim=-1))
output = torch.matmul(attn, v)
return output
class MultiHeadAttention(nn.Module):
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = XavierLinear(d_model, n_head * d_k, bias=False)
self.w_ks = XavierLinear(d_model, n_head * d_k, bias=False)
self.w_vs = XavierLinear(d_model, n_head * d_v, bias=False)
self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5)
self.fc = XavierLinear(n_head * d_v, d_model, bias=False)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(d_model)
def forward(self, q, k, v, mask=None):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1)
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)
q = self.attention(q, k, v, mask=mask)
q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1)
q = self.dropout(self.fc(q))
q += residual
q = self.layer_norm(q)
return q
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'n_head': 4, 'd_model': 4, 'd_k': 4, 'd_v': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x4, tmp2, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (16, 4), (4, 1))
assert_size_stride(primals_6, (16, 4), (4, 1))
assert_size_stride(primals_7, (4, 16), (16, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf0)
del primals_4
buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 16), (1, 4), 0), out=buf1)
del primals_5
buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 16), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_div_0[grid(256)](buf0, buf3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused_clone_1[grid(64, 4)](buf1, buf4, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
buf5 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0)
del buf1
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused__softmax_3[grid(256)](buf6, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf8 = buf6
del buf6
triton_poi_fused_clone_4[grid(256)](buf2, buf8, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf9 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_4[grid(256)](buf9, buf10, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf9
buf11 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf10, (16, 16), (16, 1), 0),
reinterpret_tensor(primals_7, (16, 4), (1, 16), 0), out=buf11)
buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_native_layer_norm_5[grid(16)](buf11, primals_1,
buf12, buf13, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_6[grid(64)](buf11, primals_1,
buf12, buf13, primals_8, primals_9, buf14, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf12
del buf13
del primals_9
return buf14, primals_1, primals_8, reinterpret_tensor(primals_2, (16,
4), (4, 1), 0), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0
), buf7, reinterpret_tensor(buf10, (16, 16), (16, 1), 0
), buf11, primals_7, reinterpret_tensor(buf8, (16, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf4, (16, 4, 4), (16, 1, 4), 0)
class XavierLinear(nn.Module):
def __init__(self, d_in, d_out, bias=True):
super().__init__()
self.linear = nn.Linear(d_in, d_out, bias=bias)
nn.init.xavier_normal_(self.linear.weight)
def forward(self, x):
return self.linear(x)
class ScaledDotProductAttention(nn.Module):
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
def forward(self, q, k, v, mask=None):
attn = torch.matmul(q / self.temperature, k.transpose(2, 3))
if mask is not None:
mask = mask.unsqueeze(1)
attn = attn.masked_fill(mask == 0, -1000000000.0)
attn = self.dropout(F.softmax(attn, dim=-1))
output = torch.matmul(attn, v)
return output
class MultiHeadAttentionNew(nn.Module):
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = XavierLinear(d_model, n_head * d_k, bias=False)
self.w_ks = XavierLinear(d_model, n_head * d_k, bias=False)
self.w_vs = XavierLinear(d_model, n_head * d_v, bias=False)
self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5)
self.fc = XavierLinear(n_head * d_v, d_model, bias=False)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(d_model)
def forward(self, input_0, input_1, input_2):
primals_4 = self.w_qs.linear.weight
primals_5 = self.w_ks.linear.weight
primals_6 = self.w_vs.linear.weight
primals_7 = self.fc.linear.weight
primals_8 = self.layer_norm.weight
primals_9 = self.layer_norm.bias
primals_1 = input_0
primals_2 = input_1
primals_3 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
muberraozmen/MrMP
|
MultiHeadAttention
| false
| 4,052
|
[
"MIT"
] | 0
|
da6bcccbad85a682c848ff4aa1121c773d779e57
|
https://github.com/muberraozmen/MrMP/tree/da6bcccbad85a682c848ff4aa1121c773d779e57
|
DecoderLayer
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class XavierLinear(nn.Module):
def __init__(self, d_in, d_out, bias=True):
super().__init__()
self.linear = nn.Linear(d_in, d_out, bias=bias)
nn.init.xavier_normal_(self.linear.weight)
def forward(self, x):
return self.linear(x)
class PositionwiseFeedForward(nn.Module):
def __init__(self, d_in, d_hid, dropout=0.1):
super().__init__()
self.w_1 = XavierLinear(d_in, d_hid)
self.w_2 = XavierLinear(d_hid, d_in)
self.layer_norm = nn.LayerNorm(d_in, eps=1e-06)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
residual = x
x = self.w_2(F.relu(self.w_1(x)))
x = self.dropout(x)
x += residual
x = self.layer_norm(x)
return x
class ScaledDotProductAttention(nn.Module):
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
def forward(self, q, k, v, mask=None):
attn = torch.matmul(q / self.temperature, k.transpose(2, 3))
if mask is not None:
mask = mask.unsqueeze(1)
attn = attn.masked_fill(mask == 0, -1000000000.0)
attn = self.dropout(F.softmax(attn, dim=-1))
output = torch.matmul(attn, v)
return output
class MultiHeadAttention(nn.Module):
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = XavierLinear(d_model, n_head * d_k, bias=False)
self.w_ks = XavierLinear(d_model, n_head * d_k, bias=False)
self.w_vs = XavierLinear(d_model, n_head * d_v, bias=False)
self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5)
self.fc = XavierLinear(n_head * d_v, d_model, bias=False)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(d_model)
def forward(self, q, k, v, mask=None):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1)
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)
q = self.attention(q, k, v, mask=mask)
q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1)
q = self.dropout(self.fc(q))
q += residual
q = self.layer_norm(q)
return q
class DecoderLayer(nn.Module):
def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1):
super().__init__()
self.enc_attn = MultiHeadAttention(n_head, d_model, d_k, d_v,
dropout=dropout)
self.pos_ffn1 = PositionwiseFeedForward(d_model, d_inner, dropout=
dropout)
def forward(self, dec_input, enc_output, dec_enc_attn_mask=None):
dec_output = self.enc_attn(dec_input, enc_output, enc_output, mask=
dec_enc_attn_mask)
dec_output = self.pos_ffn1(dec_output)
return dec_output
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'd_inner': 4, 'n_head': 4, 'd_k': 4, 'd_v': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x4, tmp2, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_7(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_9(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-06
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_10(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (16, 4), (4, 1))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (16, 4), (4, 1))
assert_size_stride(primals_6, (4, 16), (16, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 16), (1, 4), 0), out=buf0)
del primals_3
buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 16), (1, 4), 0), out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_div_0[grid(256)](buf0, buf3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused_clone_1[grid(64, 4)](buf1, buf4, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
buf5 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0)
del buf1
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused__softmax_3[grid(256)](buf6, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf8 = buf6
del buf6
triton_poi_fused_clone_4[grid(256)](buf2, buf8, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf9 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_4[grid(256)](buf9, buf10, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf9
buf11 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf10, (16, 16), (16, 1), 0),
reinterpret_tensor(primals_6, (16, 4), (1, 16), 0), out=buf11)
buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_native_layer_norm_5[grid(16)](buf11, primals_1,
buf12, buf13, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_6[grid(64)](buf11, primals_1,
buf12, buf13, primals_7, primals_8, buf14, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_8
buf15 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf14, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf15)
buf16 = reinterpret_tensor(buf15, (4, 4, 4), (16, 4, 1), 0)
del buf15
buf22 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_7[grid(64)](buf16,
primals_10, buf22, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_10
buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf16, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf17)
buf18 = reinterpret_tensor(buf17, (4, 4, 4), (16, 4, 1), 0)
del buf17
triton_poi_fused_add_8[grid(64)](buf18, primals_12, buf14, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_12
buf19 = buf13
del buf13
buf20 = buf12
del buf12
triton_poi_fused_native_layer_norm_9[grid(16)](buf18, buf19, buf20,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf21 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_10[grid(64)](buf18, buf19, buf20,
primals_13, primals_14, buf21, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf19
del buf20
del primals_14
return buf21, primals_1, primals_7, primals_13, reinterpret_tensor(
primals_2, (16, 4), (4, 1), 0), buf7, reinterpret_tensor(buf10, (16,
16), (16, 1), 0), buf11, reinterpret_tensor(buf14, (16, 4), (4, 1), 0
), reinterpret_tensor(buf16, (16, 4), (4, 1), 0
), buf18, primals_11, buf22, primals_9, primals_6, reinterpret_tensor(
buf8, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf3, (16, 4,
4), (16, 1, 4), 0), reinterpret_tensor(buf4, (16, 4, 4), (16, 1, 4), 0)
class XavierLinear(nn.Module):
def __init__(self, d_in, d_out, bias=True):
super().__init__()
self.linear = nn.Linear(d_in, d_out, bias=bias)
nn.init.xavier_normal_(self.linear.weight)
def forward(self, x):
return self.linear(x)
class PositionwiseFeedForward(nn.Module):
def __init__(self, d_in, d_hid, dropout=0.1):
super().__init__()
self.w_1 = XavierLinear(d_in, d_hid)
self.w_2 = XavierLinear(d_hid, d_in)
self.layer_norm = nn.LayerNorm(d_in, eps=1e-06)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
residual = x
x = self.w_2(F.relu(self.w_1(x)))
x = self.dropout(x)
x += residual
x = self.layer_norm(x)
return x
class ScaledDotProductAttention(nn.Module):
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
def forward(self, q, k, v, mask=None):
attn = torch.matmul(q / self.temperature, k.transpose(2, 3))
if mask is not None:
mask = mask.unsqueeze(1)
attn = attn.masked_fill(mask == 0, -1000000000.0)
attn = self.dropout(F.softmax(attn, dim=-1))
output = torch.matmul(attn, v)
return output
class MultiHeadAttention(nn.Module):
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = XavierLinear(d_model, n_head * d_k, bias=False)
self.w_ks = XavierLinear(d_model, n_head * d_k, bias=False)
self.w_vs = XavierLinear(d_model, n_head * d_v, bias=False)
self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5)
self.fc = XavierLinear(n_head * d_v, d_model, bias=False)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(d_model)
def forward(self, q, k, v, mask=None):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1)
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)
q = self.attention(q, k, v, mask=mask)
q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1)
q = self.dropout(self.fc(q))
q += residual
q = self.layer_norm(q)
return q
class DecoderLayerNew(nn.Module):
def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1):
super().__init__()
self.enc_attn = MultiHeadAttention(n_head, d_model, d_k, d_v,
dropout=dropout)
self.pos_ffn1 = PositionwiseFeedForward(d_model, d_inner, dropout=
dropout)
def forward(self, input_0, input_1):
primals_3 = self.enc_attn.w_qs.linear.weight
primals_4 = self.enc_attn.w_ks.linear.weight
primals_5 = self.enc_attn.w_vs.linear.weight
primals_6 = self.enc_attn.fc.linear.weight
primals_7 = self.enc_attn.layer_norm.weight
primals_8 = self.enc_attn.layer_norm.bias
primals_9 = self.pos_ffn1.w_1.linear.weight
primals_10 = self.pos_ffn1.w_1.linear.bias
primals_11 = self.pos_ffn1.w_2.linear.weight
primals_12 = self.pos_ffn1.w_2.linear.bias
primals_13 = self.pos_ffn1.layer_norm.weight
primals_14 = self.pos_ffn1.layer_norm.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14])
return output[0]
|
muberraozmen/MrMP
|
DecoderLayer
| false
| 4,053
|
[
"MIT"
] | 0
|
da6bcccbad85a682c848ff4aa1121c773d779e57
|
https://github.com/muberraozmen/MrMP/tree/da6bcccbad85a682c848ff4aa1121c773d779e57
|
BiAttention
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
class BiAttention(nn.Module):
def __init__(self, input_size, dropout):
super().__init__()
self.dropout = nn.Dropout(p=dropout)
self.input_linear = nn.Linear(input_size, 1, bias=False)
self.memory_linear = nn.Linear(input_size, 1, bias=False)
self.dot_scale = nn.Parameter(torch.Tensor(input_size).uniform_(1.0 /
input_size ** 0.5))
def forward(self, input, memory, mask=None):
bsz, input_len, memory_len = input.size(0), input.size(1), memory.size(
1)
input = self.dropout(input)
memory = self.dropout(memory)
input_dot = self.input_linear(input)
memory_dot = self.memory_linear(memory).view(bsz, 1, memory_len)
cross_dot = torch.bmm(input * self.dot_scale, memory.permute(0, 2,
1).contiguous())
att = input_dot + memory_dot + cross_dot
if mask is not None:
att = att - 1e+30 * (1 - mask[:, None])
weight_one = F.softmax(att, dim=-1)
output_one = torch.bmm(weight_one, memory)
weight_two = F.softmax(att.max(dim=-1)[0], dim=-1).view(bsz, 1,
input_len)
output_two = torch.bmm(weight_two, input)
return torch.cat([input, output_one, input * output_one, output_two *
output_one], dim=-1)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_clone_transpose_1(in_ptr0, out_ptr0, out_ptr1, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
y2 = yindex % 4
y3 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x1 + 4 * y0), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
tl.store(out_ptr1 + (y2 + 4 * x1 + 16 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_add_max_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + 4 * x2, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr2 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr2 + (2 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr2 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp0 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = triton_helpers.maximum(tmp4, tmp8)
tmp11 = tmp0 + tmp10
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp9, tmp13)
tmp16 = tmp0 + tmp15
tmp18 = tmp16 + tmp17
tmp19 = triton_helpers.maximum(tmp14, tmp18)
tmp20 = tmp4 - tmp19
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp8 - tmp19
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp19
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp24 + tmp26
tmp28 = tmp18 - tmp19
tmp29 = tl_math.exp(tmp28)
tmp30 = tmp27 + tmp29
tmp31 = tmp4 > tmp8
tmp32 = tmp4 == tmp8
tmp33 = tmp4 != tmp4
tmp34 = tmp8 != tmp8
tmp35 = tmp33 > tmp34
tmp36 = tmp31 | tmp35
tmp37 = tmp33 & tmp34
tmp38 = tmp32 | tmp37
tmp39 = tl.full([1], 0, tl.int64)
tmp40 = tl.full([1], 1, tl.int64)
tmp41 = tmp39 < tmp40
tmp42 = tmp38 & tmp41
tmp43 = tmp36 | tmp42
tmp44 = tl.where(tmp43, tmp4, tmp8)
tmp45 = tl.where(tmp43, tmp39, tmp40)
tmp46 = tmp44 > tmp13
tmp47 = tmp44 == tmp13
tmp48 = tmp44 != tmp44
tmp49 = tmp13 != tmp13
tmp50 = tmp48 > tmp49
tmp51 = tmp46 | tmp50
tmp52 = tmp48 & tmp49
tmp53 = tmp47 | tmp52
tmp54 = tl.full([1], 2, tl.int64)
tmp55 = tmp45 < tmp54
tmp56 = tmp53 & tmp55
tmp57 = tmp51 | tmp56
tmp58 = tl.where(tmp57, tmp44, tmp13)
tmp59 = tl.where(tmp57, tmp45, tmp54)
tmp60 = tmp58 > tmp18
tmp61 = tmp58 == tmp18
tmp62 = tmp58 != tmp58
tmp63 = tmp18 != tmp18
tmp64 = tmp62 > tmp63
tmp65 = tmp60 | tmp64
tmp66 = tmp62 & tmp63
tmp67 = tmp61 | tmp66
tmp68 = tl.full([1], 3, tl.int64)
tmp69 = tmp59 < tmp68
tmp70 = tmp67 & tmp69
tmp71 = tmp65 | tmp70
tl.where(tmp71, tmp58, tmp18)
tmp73 = tl.where(tmp71, tmp59, tmp68)
tl.store(out_ptr0 + x2, tmp19, xmask)
tl.store(out_ptr1 + x2, tmp30, xmask)
tl.store(out_ptr2 + x2, tmp19, xmask)
tl.store(out_ptr3 + x2, tmp73, xmask)
@triton.jit
def triton_poi_fused__softmax_add_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
in_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 4
x0 = xindex % 4
x2 = xindex // 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_out_ptr0 + x4, xmask)
tmp5 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + x3, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp7 / tmp8
tl.store(in_out_ptr0 + x4, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_cat_6(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x3 = xindex // 16
x2 = xindex // 64
x4 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x3 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (4 * x3 + (-4 + x0)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + (4 * x3 + (-8 + x0)), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tl.load(in_ptr1 + (4 * x3 + (-8 + x0)), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp17 = tmp15 * tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp14, tmp17, tmp18)
tmp20 = tmp0 >= tmp12
tl.full([1], 16, tl.int64)
tmp23 = tl.load(in_ptr2 + (4 * x2 + (-12 + x0)), tmp20 & xmask,
eviction_policy='evict_last', other=0.0)
tmp24 = tl.load(in_ptr1 + (4 * x3 + (-12 + x0)), tmp20 & xmask,
eviction_policy='evict_last', other=0.0)
tmp25 = tmp23 * tmp24
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp20, tmp25, tmp26)
tmp28 = tl.where(tmp14, tmp19, tmp27)
tmp29 = tl.where(tmp9, tmp10, tmp28)
tmp30 = tl.where(tmp4, tmp5, tmp29)
tl.store(out_ptr0 + x4, tmp30, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (1, 4), (4, 1))
assert_size_stride(primals_4, (1, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 1), (1, 4), 0), out=buf0)
del primals_3
buf1 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(64)](primals_1, primals_5, buf2, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
buf15 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
triton_poi_fused_clone_transpose_1[grid(16, 4)](primals_2, buf3,
buf15, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf2, buf3, out=buf4)
del buf2
buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf10 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
triton_poi_fused__softmax_add_max_2[grid(16)](buf0, buf1, buf4,
buf5, buf6, buf9, buf10, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf7 = buf4
del buf4
triton_poi_fused__softmax_add_3[grid(64)](buf7, buf0, buf1, buf5,
buf6, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf0
del buf1
del buf5
buf8 = reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0)
del buf3
extern_kernels.bmm(buf7, primals_2, out=buf8)
buf11 = reinterpret_tensor(buf6, (4, 4), (4, 1), 0)
del buf6
triton_poi_fused__softmax_4[grid(16)](buf9, buf11, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf12 = buf9
del buf9
triton_poi_fused__softmax_5[grid(16)](buf11, buf12, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf13 = reinterpret_tensor(buf11, (4, 1, 4), (4, 4, 1), 0)
del buf11
extern_kernels.bmm(reinterpret_tensor(buf12, (4, 1, 4), (4, 4, 1),
0), primals_1, out=buf13)
buf14 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
triton_poi_fused_cat_6[grid(256)](primals_1, buf8, buf13, buf14,
256, XBLOCK=256, num_warps=4, num_stages=1)
return (buf14, primals_1, primals_2, buf7, buf8, buf12, buf13,
reinterpret_tensor(buf10, (4, 4, 1), (4, 1, 1), 0), buf15)
class BiAttentionNew(nn.Module):
def __init__(self, input_size, dropout):
super().__init__()
self.dropout = nn.Dropout(p=dropout)
self.input_linear = nn.Linear(input_size, 1, bias=False)
self.memory_linear = nn.Linear(input_size, 1, bias=False)
self.dot_scale = nn.Parameter(torch.Tensor(input_size).uniform_(1.0 /
input_size ** 0.5))
def forward(self, input_0, input_1):
primals_5 = self.dot_scale
primals_3 = self.input_linear.weight
primals_4 = self.memory_linear.weight
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
mwakaba2/KOBE
|
BiAttention
| false
| 4,054
|
[
"MIT"
] | 0
|
e225e78fb18b5fc9785d521a3cd611fff3eaaf87
|
https://github.com/mwakaba2/KOBE/tree/e225e78fb18b5fc9785d521a3cd611fff3eaaf87
|
FusedDownsample
|
import torch
from torch import nn
from torch.nn import functional as F
from math import sqrt
class FusedDownsample(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, padding=0):
super().__init__()
weight = torch.randn(out_channel, in_channel, kernel_size, kernel_size)
bias = torch.zeros(out_channel)
fan_in = in_channel * kernel_size * kernel_size
self.multiplier = sqrt(2 / fan_in)
self.weight = nn.Parameter(weight)
self.bias = nn.Parameter(bias)
self.pad = padding
def forward(self, input):
weight = F.pad(self.weight * self.multiplier, [1, 1, 1, 1])
weight = (weight[:, :, 1:, 1:] + weight[:, :, :-1, 1:] + weight[:,
:, 1:, :-1] + weight[:, :, :-1, :-1]) / 4
out = F.conv2d(input, weight, self.bias, stride=2, padding=self.pad)
return out
def get_inputs():
return [torch.rand([4, 4, 64, 64])]
def get_init_inputs():
return [[], {'in_channel': 4, 'out_channel': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
from math import sqrt
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 5 % 5
x0 = xindex % 5
x2 = xindex // 25
x4 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp10 & xmask, other=0.0
)
tmp12 = 0.1767766952966369
tmp13 = tmp11 * tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp10, tmp13, tmp14)
tmp16 = -1 + x1
tmp17 = tmp16 >= tmp1
tmp18 = tmp16 < tmp3
tmp19 = tmp17 & tmp18
tmp20 = tmp19 & tmp6
tmp21 = tmp20 & tmp7
tmp22 = tl.load(in_ptr0 + (-4 + x0 + 4 * x1 + 16 * x2), tmp21 & xmask,
other=0.0)
tmp23 = tmp22 * tmp12
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp21, tmp23, tmp24)
tmp26 = tmp15 + tmp25
tmp27 = -1 + x0
tmp28 = tmp27 >= tmp1
tmp29 = tmp27 < tmp3
tmp30 = tmp8 & tmp28
tmp31 = tmp30 & tmp29
tmp32 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1 + 16 * x2), tmp31 & xmask,
other=0.0)
tmp33 = tmp32 * tmp12
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp31, tmp33, tmp34)
tmp36 = tmp26 + tmp35
tmp37 = tmp19 & tmp28
tmp38 = tmp37 & tmp29
tmp39 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp38 & xmask,
other=0.0)
tmp40 = tmp39 * tmp12
tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype)
tmp42 = tl.where(tmp38, tmp40, tmp41)
tmp43 = tmp36 + tmp42
tmp44 = 0.25
tmp45 = tmp43 * tmp44
tl.store(in_out_ptr0 + x4, tmp45, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 14400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 900 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_add_div_0[grid(400)](buf1, primals_1, 400, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_1
buf2 = extern_kernels.convolution(primals_3, buf1, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 30, 30), (3600, 900, 30, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_1[grid(14400)](buf3, primals_2, 14400,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
return buf3, primals_3, buf1
class FusedDownsampleNew(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, padding=0):
super().__init__()
weight = torch.randn(out_channel, in_channel, kernel_size, kernel_size)
bias = torch.zeros(out_channel)
fan_in = in_channel * kernel_size * kernel_size
self.multiplier = sqrt(2 / fan_in)
self.weight = nn.Parameter(weight)
self.bias = nn.Parameter(bias)
self.pad = padding
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
nazarblch/style-based-gan-pytorch
|
FusedDownsample
| false
| 4,055
|
[
"MIT"
] | 0
|
5ed7fa114904501d77b414921cd9f439773ba24c
|
https://github.com/nazarblch/style-based-gan-pytorch/tree/5ed7fa114904501d77b414921cd9f439773ba24c
|
DeiTOutput
|
from _paritybench_helpers import _mock_config
import torch
from torch import nn
import torch.utils.checkpoint
class DeiTOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = hidden_states + input_tensor
return hidden_states
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(intermediate_size=4, hidden_size=4,
hidden_dropout_prob=0.5)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.utils.checkpoint
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](buf1, primals_2, primals_4, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
del primals_4
return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0)
class DeiTOutputNew(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_0, input_1):
primals_1 = self.dense.weight
primals_2 = self.dense.bias
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
ncoop57/transformers
|
DeiTOutput
| false
| 4,056
|
[
"Apache-2.0"
] | 0
|
d7e156bd1ae2467e9ea1dbc44f31da0ed2296aee
|
https://github.com/ncoop57/transformers/tree/d7e156bd1ae2467e9ea1dbc44f31da0ed2296aee
|
EncoderLayer
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class XavierLinear(nn.Module):
def __init__(self, d_in, d_out, bias=True):
super().__init__()
self.linear = nn.Linear(d_in, d_out, bias=bias)
nn.init.xavier_normal_(self.linear.weight)
def forward(self, x):
return self.linear(x)
class PositionwiseFeedForward(nn.Module):
def __init__(self, d_in, d_hid, dropout=0.1):
super().__init__()
self.w_1 = XavierLinear(d_in, d_hid)
self.w_2 = XavierLinear(d_hid, d_in)
self.layer_norm = nn.LayerNorm(d_in, eps=1e-06)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
residual = x
x = self.w_2(F.relu(self.w_1(x)))
x = self.dropout(x)
x += residual
x = self.layer_norm(x)
return x
class ScaledDotProductAttention(nn.Module):
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
def forward(self, q, k, v, mask=None):
attn = torch.matmul(q / self.temperature, k.transpose(2, 3))
if mask is not None:
mask = mask.unsqueeze(1)
attn = attn.masked_fill(mask == 0, -1000000000.0)
attn = self.dropout(F.softmax(attn, dim=-1))
output = torch.matmul(attn, v)
return output
class MultiHeadAttention(nn.Module):
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = XavierLinear(d_model, n_head * d_k, bias=False)
self.w_ks = XavierLinear(d_model, n_head * d_k, bias=False)
self.w_vs = XavierLinear(d_model, n_head * d_v, bias=False)
self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5)
self.fc = XavierLinear(n_head * d_v, d_model, bias=False)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(d_model)
def forward(self, q, k, v, mask=None):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1)
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)
q = self.attention(q, k, v, mask=mask)
q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1)
q = self.dropout(self.fc(q))
q += residual
q = self.layer_norm(q)
return q
class EncoderLayer(nn.Module):
def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1):
super().__init__()
self.slf_attn = MultiHeadAttention(n_head, d_model, d_k, d_v,
dropout=dropout)
self.pos_ffn = PositionwiseFeedForward(d_model, d_inner, dropout=
dropout)
def forward(self, enc_input, slf_attn_mask=None):
enc_output = self.slf_attn(enc_input, enc_input, enc_input, mask=
slf_attn_mask)
enc_output = self.pos_ffn(enc_output)
return enc_output
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'd_inner': 4, 'n_head': 4, 'd_k': 4, 'd_v': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x4, tmp2, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_7(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_9(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-06
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_10(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (16, 4), (4, 1))
assert_size_stride(primals_3, (16, 4), (4, 1))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (4, 16), (16, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 16), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf2)
del primals_4
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_div_0[grid(256)](buf0, buf3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused_clone_1[grid(64, 4)](buf1, buf4, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
buf5 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0)
del buf1
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused__softmax_3[grid(256)](buf6, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf8 = buf6
del buf6
triton_poi_fused_clone_4[grid(256)](buf2, buf8, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf9 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_4[grid(256)](buf9, buf10, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf9
buf11 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf10, (16, 16), (16, 1), 0),
reinterpret_tensor(primals_5, (16, 4), (1, 16), 0), out=buf11)
buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_native_layer_norm_5[grid(16)](buf11, primals_1,
buf12, buf13, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_6[grid(64)](buf11, primals_1,
buf12, buf13, primals_6, primals_7, buf14, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_7
buf15 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf14, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf15)
buf16 = reinterpret_tensor(buf15, (4, 4, 4), (16, 4, 1), 0)
del buf15
buf22 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_7[grid(64)](buf16,
primals_9, buf22, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_9
buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf16, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf17)
buf18 = reinterpret_tensor(buf17, (4, 4, 4), (16, 4, 1), 0)
del buf17
triton_poi_fused_add_8[grid(64)](buf18, primals_11, buf14, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_11
buf19 = buf13
del buf13
buf20 = buf12
del buf12
triton_poi_fused_native_layer_norm_9[grid(16)](buf18, buf19, buf20,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf21 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_10[grid(64)](buf18, buf19, buf20,
primals_12, primals_13, buf21, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf19
del buf20
del primals_13
return buf21, primals_1, primals_6, primals_12, buf7, reinterpret_tensor(
buf10, (16, 16), (16, 1), 0), buf11, reinterpret_tensor(buf14, (16,
4), (4, 1), 0), reinterpret_tensor(buf16, (16, 4), (4, 1), 0
), buf18, primals_10, buf22, primals_8, primals_5, reinterpret_tensor(
buf8, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf3, (16, 4,
4), (16, 1, 4), 0), reinterpret_tensor(buf4, (16, 4, 4), (16, 1, 4), 0)
class XavierLinear(nn.Module):
def __init__(self, d_in, d_out, bias=True):
super().__init__()
self.linear = nn.Linear(d_in, d_out, bias=bias)
nn.init.xavier_normal_(self.linear.weight)
def forward(self, x):
return self.linear(x)
class PositionwiseFeedForward(nn.Module):
def __init__(self, d_in, d_hid, dropout=0.1):
super().__init__()
self.w_1 = XavierLinear(d_in, d_hid)
self.w_2 = XavierLinear(d_hid, d_in)
self.layer_norm = nn.LayerNorm(d_in, eps=1e-06)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
residual = x
x = self.w_2(F.relu(self.w_1(x)))
x = self.dropout(x)
x += residual
x = self.layer_norm(x)
return x
class ScaledDotProductAttention(nn.Module):
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
def forward(self, q, k, v, mask=None):
attn = torch.matmul(q / self.temperature, k.transpose(2, 3))
if mask is not None:
mask = mask.unsqueeze(1)
attn = attn.masked_fill(mask == 0, -1000000000.0)
attn = self.dropout(F.softmax(attn, dim=-1))
output = torch.matmul(attn, v)
return output
class MultiHeadAttention(nn.Module):
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = XavierLinear(d_model, n_head * d_k, bias=False)
self.w_ks = XavierLinear(d_model, n_head * d_k, bias=False)
self.w_vs = XavierLinear(d_model, n_head * d_v, bias=False)
self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5)
self.fc = XavierLinear(n_head * d_v, d_model, bias=False)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(d_model)
def forward(self, q, k, v, mask=None):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1)
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)
q = self.attention(q, k, v, mask=mask)
q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1)
q = self.dropout(self.fc(q))
q += residual
q = self.layer_norm(q)
return q
class EncoderLayerNew(nn.Module):
def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1):
super().__init__()
self.slf_attn = MultiHeadAttention(n_head, d_model, d_k, d_v,
dropout=dropout)
self.pos_ffn = PositionwiseFeedForward(d_model, d_inner, dropout=
dropout)
def forward(self, input_0):
primals_2 = self.slf_attn.w_qs.linear.weight
primals_3 = self.slf_attn.w_ks.linear.weight
primals_4 = self.slf_attn.w_vs.linear.weight
primals_5 = self.slf_attn.fc.linear.weight
primals_6 = self.slf_attn.layer_norm.weight
primals_7 = self.slf_attn.layer_norm.bias
primals_8 = self.pos_ffn.w_1.linear.weight
primals_9 = self.pos_ffn.w_1.linear.bias
primals_10 = self.pos_ffn.w_2.linear.weight
primals_11 = self.pos_ffn.w_2.linear.bias
primals_12 = self.pos_ffn.layer_norm.weight
primals_13 = self.pos_ffn.layer_norm.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
|
muberraozmen/MrMP
|
EncoderLayer
| false
| 4,057
|
[
"MIT"
] | 0
|
da6bcccbad85a682c848ff4aa1121c773d779e57
|
https://github.com/muberraozmen/MrMP/tree/da6bcccbad85a682c848ff4aa1121c773d779e57
|
ConvDropoutLayerNorm
|
import torch
from torch import nn
import torch.utils.checkpoint
class SqueezeBertLayerNorm(nn.LayerNorm):
"""
This is a nn.LayerNorm subclass that accepts NCW data layout and performs normalization in the C dimension.
N = batch C = channels W = sequence length
"""
def __init__(self, hidden_size, eps=1e-12):
nn.LayerNorm.__init__(self, normalized_shape=hidden_size, eps=eps)
def forward(self, x):
x = x.permute(0, 2, 1)
x = nn.LayerNorm.forward(self, x)
return x.permute(0, 2, 1)
class ConvDropoutLayerNorm(nn.Module):
"""
ConvDropoutLayerNorm: Conv, Dropout, LayerNorm
"""
def __init__(self, cin, cout, groups, dropout_prob):
super().__init__()
self.conv1d = nn.Conv1d(in_channels=cin, out_channels=cout,
kernel_size=1, groups=groups)
self.layernorm = SqueezeBertLayerNorm(cout)
self.dropout = nn.Dropout(dropout_prob)
def forward(self, hidden_states, input_tensor):
x = self.conv1d(hidden_states)
x = self.dropout(x)
x = x + input_tensor
x = self.layernorm(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'cin': 4, 'cout': 4, 'groups': 1, 'dropout_prob': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
import torch.utils.checkpoint
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask & ymask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x2 + 4 * y3), xmask & ymask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(out_ptr0 + (y0 + 4 * x2 + 16 * y1), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-12
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4), (16, 4, 1))
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(16, 4)](buf0, primals_2,
primals_4, buf1, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1,
num_stages=1)
del primals_2
del primals_4
buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(16)](buf1, buf2, buf3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf4 = buf0
del buf0
triton_poi_fused_native_layer_norm_2[grid(64)](buf1, buf2, buf3,
primals_5, primals_6, buf4, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf2
del buf3
del primals_6
return reinterpret_tensor(buf4, (4, 4, 4), (16, 1, 4), 0
), primals_1, primals_3, primals_5, buf1
class SqueezeBertLayerNorm(nn.LayerNorm):
"""
This is a nn.LayerNorm subclass that accepts NCW data layout and performs normalization in the C dimension.
N = batch C = channels W = sequence length
"""
def __init__(self, hidden_size, eps=1e-12):
nn.LayerNorm.__init__(self, normalized_shape=hidden_size, eps=eps)
def forward(self, x):
x = x.permute(0, 2, 1)
x = nn.LayerNorm.forward(self, x)
return x.permute(0, 2, 1)
class ConvDropoutLayerNormNew(nn.Module):
"""
ConvDropoutLayerNorm: Conv, Dropout, LayerNorm
"""
def __init__(self, cin, cout, groups, dropout_prob):
super().__init__()
self.conv1d = nn.Conv1d(in_channels=cin, out_channels=cout,
kernel_size=1, groups=groups)
self.layernorm = SqueezeBertLayerNorm(cout)
self.dropout = nn.Dropout(dropout_prob)
def forward(self, input_0, input_1):
primals_1 = self.conv1d.weight
primals_2 = self.conv1d.bias
primals_5 = self.layernorm.weight
primals_6 = self.layernorm.bias
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
ncoop57/transformers
|
ConvDropoutLayerNorm
| false
| 4,058
|
[
"Apache-2.0"
] | 0
|
d7e156bd1ae2467e9ea1dbc44f31da0ed2296aee
|
https://github.com/ncoop57/transformers/tree/d7e156bd1ae2467e9ea1dbc44f31da0ed2296aee
|
DeiTEmbeddings
|
from _paritybench_helpers import _mock_config
import collections
import torch
from torch import nn
import torch.utils.checkpoint
import collections.abc
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return x, x
class PatchEmbeddings(nn.Module):
"""
Image to Patch Embedding.
"""
def __init__(self, image_size=224, patch_size=16, num_channels=3,
embed_dim=768):
super().__init__()
image_size = to_2tuple(image_size)
patch_size = to_2tuple(patch_size)
num_patches = image_size[1] // patch_size[1] * (image_size[0] //
patch_size[0])
self.image_size = image_size
self.patch_size = patch_size
self.num_patches = num_patches
self.projection = nn.Conv2d(num_channels, embed_dim, kernel_size=
patch_size, stride=patch_size)
def forward(self, pixel_values):
_batch_size, _num_channels, height, width = pixel_values.shape
if height != self.image_size[0] or width != self.image_size[1]:
raise ValueError(
f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]})."
)
x = self.projection(pixel_values).flatten(2).transpose(1, 2)
return x
class DeiTEmbeddings(nn.Module):
"""
Construct the CLS token, distillation token, position and patch embeddings.
"""
def __init__(self, config):
super().__init__()
self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
self.distillation_token = nn.Parameter(torch.zeros(1, 1, config.
hidden_size))
self.patch_embeddings = PatchEmbeddings(image_size=config.
image_size, patch_size=config.patch_size, num_channels=config.
num_channels, embed_dim=config.hidden_size)
num_patches = self.patch_embeddings.num_patches
self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches +
2, config.hidden_size))
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, pixel_values):
batch_size = pixel_values.shape[0]
embeddings = self.patch_embeddings(pixel_values)
cls_tokens = self.cls_token.expand(batch_size, -1, -1)
distillation_tokens = self.distillation_token.expand(batch_size, -1, -1
)
embeddings = torch.cat((cls_tokens, distillation_tokens, embeddings
), dim=1)
embeddings = embeddings + self.position_embeddings
embeddings = self.dropout(embeddings)
return embeddings
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, image_size=4,
patch_size=4, num_channels=4, hidden_dropout_prob=0.5)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import collections
from torch import nn
import torch.utils.checkpoint
import collections.abc
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 3
x0 = xindex % 4
x2 = xindex // 12
x3 = xindex % 12
x4 = xindex
tmp21 = tl.load(in_ptr4 + x3, xmask, eviction_policy='evict_last')
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + x0, tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tl.full([1], 3, tl.int64)
tmp14 = tl.load(in_ptr2 + (x0 + 4 * x2), tmp11 & xmask, eviction_policy
='evict_last', other=0.0)
tmp15 = tl.load(in_ptr3 + x0, tmp11 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp11, tmp16, tmp17)
tmp19 = tl.where(tmp9, tmp10, tmp18)
tmp20 = tl.where(tmp4, tmp5, tmp19)
tmp22 = tmp20 + tmp21
tl.store(out_ptr0 + x4, tmp22, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (1, 1, 4), (4, 4, 1))
assert_size_stride(primals_5, (1, 1, 4), (4, 4, 1))
assert_size_stride(primals_6, (1, 3, 4), (12, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(4,
4), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1))
buf1 = empty_strided_cuda((4, 3, 4), (12, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_cat_0[grid(48)](primals_4, primals_5, buf0,
primals_3, primals_6, buf1, 48, XBLOCK=64, num_warps=1,
num_stages=1)
del buf0
del primals_3
del primals_4
del primals_5
del primals_6
return buf1, primals_1, primals_2
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return x, x
class PatchEmbeddings(nn.Module):
"""
Image to Patch Embedding.
"""
def __init__(self, image_size=224, patch_size=16, num_channels=3,
embed_dim=768):
super().__init__()
image_size = to_2tuple(image_size)
patch_size = to_2tuple(patch_size)
num_patches = image_size[1] // patch_size[1] * (image_size[0] //
patch_size[0])
self.image_size = image_size
self.patch_size = patch_size
self.num_patches = num_patches
self.projection = nn.Conv2d(num_channels, embed_dim, kernel_size=
patch_size, stride=patch_size)
def forward(self, pixel_values):
_batch_size, _num_channels, height, width = pixel_values.shape
if height != self.image_size[0] or width != self.image_size[1]:
raise ValueError(
f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]})."
)
x = self.projection(pixel_values).flatten(2).transpose(1, 2)
return x
class DeiTEmbeddingsNew(nn.Module):
"""
Construct the CLS token, distillation token, position and patch embeddings.
"""
def __init__(self, config):
super().__init__()
self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
self.distillation_token = nn.Parameter(torch.zeros(1, 1, config.
hidden_size))
self.patch_embeddings = PatchEmbeddings(image_size=config.
image_size, patch_size=config.patch_size, num_channels=config.
num_channels, embed_dim=config.hidden_size)
num_patches = self.patch_embeddings.num_patches
self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches +
2, config.hidden_size))
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_0):
primals_4 = self.cls_token
primals_5 = self.distillation_token
primals_6 = self.position_embeddings
primals_1 = self.patch_embeddings.projection.weight
primals_3 = self.patch_embeddings.projection.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
ncoop57/transformers
|
DeiTEmbeddings
| false
| 4,059
|
[
"Apache-2.0"
] | 0
|
d7e156bd1ae2467e9ea1dbc44f31da0ed2296aee
|
https://github.com/ncoop57/transformers/tree/d7e156bd1ae2467e9ea1dbc44f31da0ed2296aee
|
PerceptronTanh
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class PerceptronTanh(nn.Module):
"""Implements a 1-layer perceptron with Tanh activaton."""
def __init__(self, input_dimension, hidden_dimension, output_dimension):
super(PerceptronTanh, self).__init__()
self._layer1 = nn.Linear(input_dimension, hidden_dimension)
self._layer2 = nn.Linear(hidden_dimension, output_dimension, bias=False
)
def forward(self, inp):
return F.tanh(self._layer2(F.relu(self._layer1(inp))))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dimension': 4, 'hidden_dimension': 4,
'output_dimension': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused_tanh_1[grid(256)](buf3, 256, XBLOCK=128, num_warps
=4, num_stages=1)
return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf3, primals_4, buf4
class PerceptronTanhNew(nn.Module):
"""Implements a 1-layer perceptron with Tanh activaton."""
def __init__(self, input_dimension, hidden_dimension, output_dimension):
super(PerceptronTanhNew, self).__init__()
self._layer1 = nn.Linear(input_dimension, hidden_dimension)
self._layer2 = nn.Linear(hidden_dimension, output_dimension, bias=False
)
def forward(self, input_0):
primals_1 = self._layer1.weight
primals_2 = self._layer1.bias
primals_4 = self._layer2.weight
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
negotiatorvivian/PDP-SP
|
PerceptronTanh
| false
| 4,060
|
[
"MIT"
] | 0
|
0fa4c1145c2b881c1fde4ed8d9f0845b7967f857
|
https://github.com/negotiatorvivian/PDP-SP/tree/0fa4c1145c2b881c1fde4ed8d9f0845b7967f857
|
CanineSelfAttention
|
from _paritybench_helpers import _mock_config
import math
import torch
from torch import nn
import torch.utils.checkpoint
class CanineSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if (config.hidden_size % config.num_attention_heads != 0 and not
hasattr(config, 'embedding_size')):
raise ValueError(
f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})'
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = getattr(config,
'position_embedding_type', 'absolute')
if (self.position_embedding_type == 'relative_key' or self.
position_embedding_type == 'relative_key_query'):
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.
max_position_embeddings - 1, self.attention_head_size)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, from_tensor, to_tensor, attention_mask=None,
head_mask=None, output_attentions=False):
mixed_query_layer = self.query(from_tensor)
key_layer = self.transpose_for_scores(self.key(to_tensor))
value_layer = self.transpose_for_scores(self.value(to_tensor))
query_layer = self.transpose_for_scores(mixed_query_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1,
-2))
if (self.position_embedding_type == 'relative_key' or self.
position_embedding_type == 'relative_key_query'):
seq_length = from_tensor.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long,
device=from_tensor.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long,
device=from_tensor.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.
max_position_embeddings - 1)
positional_embedding = positional_embedding
if self.position_embedding_type == 'relative_key':
relative_position_scores = torch.einsum('bhld,lrd->bhlr',
query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == 'relative_key_query':
relative_position_scores_query = torch.einsum('bhld,lrd->bhlr',
query_layer, positional_embedding)
relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr',
key_layer, positional_embedding)
attention_scores = (attention_scores +
relative_position_scores_query +
relative_position_scores_key)
attention_scores = attention_scores / math.sqrt(self.
attention_head_size)
if attention_mask is not None:
if attention_mask.ndim == 3:
attention_mask = torch.unsqueeze(attention_mask, dim=1)
attention_mask = (1.0 - attention_mask.float()) * -10000.0
attention_scores = attention_scores + attention_mask
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.dropout(attention_probs)
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.
all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (
context_layer,)
return outputs
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, num_attention_heads=
4, attention_probs_dropout_prob=0.5, position_embedding_type=4)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
import torch.utils.checkpoint
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr1 + x2, xmask)
tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = float('-inf')
tmp2 = tmp0 == tmp1
tmp3 = tmp2 == 0
tmp4 = tmp3.to(tl.int64)
tmp5 = tmp4 != 0
tmp7 = tmp6 == tmp1
tmp8 = tmp7 == 0
tmp9 = tmp8.to(tl.int64)
tmp10 = tmp9 != 0
tmp11 = tmp5 | tmp10
tmp13 = tmp12 == tmp1
tmp14 = tmp13 == 0
tmp15 = tmp14.to(tl.int64)
tmp16 = tmp15 != 0
tmp17 = tmp11 | tmp16
tmp19 = tmp18 == tmp1
tmp20 = tmp19 == 0
tmp21 = tmp20.to(tl.int64)
tmp22 = tmp21 != 0
tmp23 = tmp17 | tmp22
tmp24 = tmp23 == 0
tmp28 = tmp26 + tmp27
tmp30 = tmp28 + tmp29
tmp32 = tmp30 + tmp31
tmp33 = tmp25 / tmp32
tmp34 = 0.0
tmp35 = tl.where(tmp24, tmp34, tmp33)
tl.store(out_ptr0 + x2, tmp35, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_2
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf0
triton_poi_fused_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_1[grid(256)](buf5, buf6, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_2[grid(256)](buf5, buf6, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf5
del buf6
buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf1
triton_poi_fused_3[grid(16, 4)](buf2, primals_8, buf8, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_8
buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_4[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
del buf9
return reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0
), buf7, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0)
class CanineSelfAttentionNew(nn.Module):
def __init__(self, config):
super().__init__()
if (config.hidden_size % config.num_attention_heads != 0 and not
hasattr(config, 'embedding_size')):
raise ValueError(
f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})'
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = getattr(config,
'position_embedding_type', 'absolute')
if (self.position_embedding_type == 'relative_key' or self.
position_embedding_type == 'relative_key_query'):
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.
max_position_embeddings - 1, self.attention_head_size)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, input_0, input_1):
primals_1 = self.query.weight
primals_2 = self.query.bias
primals_4 = self.key.weight
primals_5 = self.key.bias
primals_7 = self.value.weight
primals_8 = self.value.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
|
ncoop57/transformers
|
CanineSelfAttention
| false
| 4,061
|
[
"Apache-2.0"
] | 0
|
d7e156bd1ae2467e9ea1dbc44f31da0ed2296aee
|
https://github.com/ncoop57/transformers/tree/d7e156bd1ae2467e9ea1dbc44f31da0ed2296aee
|
Model
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
keep_rate = 0.5
self.conv1 = nn.Conv2d(in_channels=1, out_channels=16, kernel_size=
3, stride=1, padding='same', bias=True)
self.dropout1 = nn.Dropout2d(1 - keep_rate)
self.conv2 = nn.Conv2d(in_channels=16, out_channels=16, kernel_size
=3, stride=1, padding='same', bias=True)
self.maxpooling1 = nn.MaxPool2d(2)
self.conv3 = nn.Conv2d(in_channels=16, out_channels=32, kernel_size
=3, stride=1, padding='same', bias=True)
self.dropout2 = nn.Dropout2d(1 - keep_rate)
self.conv4 = nn.Conv2d(in_channels=32, out_channels=32, kernel_size
=3, stride=1, padding='same', bias=True)
self.maxpooling2 = nn.MaxPool2d(2)
self.conv5 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size
=3, stride=1, padding='same', bias=True)
self.dropout3 = nn.Dropout2d(1 - keep_rate)
self.conv6 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size
=3, stride=1, padding='same', bias=True)
self.maxpooling3 = nn.MaxPool2d(2)
self.conv7 = nn.Conv2d(in_channels=64, out_channels=128,
kernel_size=3, stride=1, padding='same', bias=True)
self.dropout4 = nn.Dropout2d(1 - keep_rate)
self.conv8 = nn.Conv2d(in_channels=128, out_channels=128,
kernel_size=3, stride=1, padding='same', bias=True)
self.maxpooling4 = nn.MaxPool2d(2)
self.conv9 = nn.Conv2d(in_channels=128, out_channels=256,
kernel_size=3, stride=1, padding='same', bias=True)
self.dropout5 = nn.Dropout2d(1 - keep_rate)
self.conv10 = nn.Conv2d(in_channels=256, out_channels=256,
kernel_size=3, stride=1, padding='same', bias=True)
self.conv11 = nn.ConvTranspose2d(in_channels=256, out_channels=128,
kernel_size=2, stride=2, padding=0, bias=True)
self.conv12 = nn.Conv2d(in_channels=256, out_channels=128,
kernel_size=3, stride=1, padding='same', bias=True)
self.dropout6 = nn.Dropout2d(1 - keep_rate)
self.conv13 = nn.Conv2d(in_channels=128, out_channels=128,
kernel_size=3, stride=1, padding='same', bias=True)
self.conv14 = nn.ConvTranspose2d(in_channels=128, out_channels=64,
kernel_size=2, stride=2, padding=0, bias=True)
self.conv15 = nn.Conv2d(in_channels=128, out_channels=64,
kernel_size=3, stride=1, padding='same', bias=True)
self.dropout7 = nn.Dropout2d(1 - keep_rate)
self.conv16 = nn.Conv2d(in_channels=64, out_channels=64,
kernel_size=3, stride=1, padding='same', bias=True)
self.conv17 = nn.ConvTranspose2d(in_channels=64, out_channels=32,
kernel_size=2, stride=2, padding=0, bias=True)
self.conv18 = nn.Conv2d(in_channels=64, out_channels=32,
kernel_size=3, stride=1, padding='same', bias=True)
self.dropout8 = nn.Dropout2d(1 - keep_rate)
self.conv19 = nn.Conv2d(in_channels=32, out_channels=32,
kernel_size=3, stride=1, padding='same', bias=True)
self.conv20 = nn.ConvTranspose2d(in_channels=32, out_channels=16,
kernel_size=2, stride=2, padding=0, bias=True)
self.conv21 = nn.Conv2d(in_channels=32, out_channels=16,
kernel_size=3, stride=1, padding='same', bias=True)
self.dropout9 = nn.Dropout2d(1 - keep_rate)
self.conv22 = nn.Conv2d(in_channels=16, out_channels=16,
kernel_size=3, stride=1, padding='same', bias=True)
self.outputs = nn.Conv2d(in_channels=16, out_channels=1,
kernel_size=1, stride=1, padding='same', bias=True)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.dropout1(x)
x = self.conv2(x)
x = F.relu(x)
x1 = self.maxpooling1(x)
x1 = self.conv3(x1)
x1 = F.relu(x1)
x1 = self.dropout2(x1)
x1 = self.conv4(x1)
x1 = F.relu(x1)
x2 = self.maxpooling2(x1)
x2 = self.conv5(x2)
x2 = F.relu(x2)
x2 = self.dropout3(x2)
x2 = self.conv6(x2)
x2 = F.relu(x2)
x3 = self.maxpooling3(x2)
x3 = self.conv7(x3)
x3 = F.relu(x3)
x3 = self.dropout4(x3)
x3 = self.conv8(x3)
x3 = F.relu(x3)
x4 = self.maxpooling4(x3)
x4 = self.conv9(x4)
x4 = F.relu(x4)
x4 = self.dropout5(x4)
x4 = self.conv10(x4)
x4 = F.relu(x4)
x5 = self.conv11(x4)
x5 = torch.cat((x5, x3), 1)
x5 = self.conv12(x5)
x5 = F.relu(x5)
x5 = self.dropout6(x5)
x5 = self.conv13(x5)
x5 = F.relu(x5)
x6 = self.conv14(x5)
x6 = torch.cat((x6, x2), 1)
x6 = self.conv15(x6)
x6 = F.relu(x6)
x6 = self.dropout7(x6)
x6 = self.conv16(x6)
x6 = F.relu(x6)
x7 = self.conv17(x6)
x7 = torch.cat((x7, x1), 1)
x7 = self.conv18(x7)
x7 = F.relu(x7)
x7 = self.dropout8(x7)
x7 = self.conv19(x7)
x7 = F.relu(x7)
x8 = self.conv20(x7)
x8 = torch.cat((x8, x), 1)
x8 = self.conv21(x8)
x8 = F.relu(x8)
x8 = self.dropout9(x8)
x8 = self.conv22(x8)
x8 = F.relu(x8)
outputs = self.outputs(x8)
out = torch.sigmoid(outputs)
return out
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 16
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), None, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 128
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_7(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 16 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 16 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (8 + 2 * x0 + 16 * x1), None, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (9 + 2 * x0 + 16 * x1), None, eviction_policy=
'evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 256
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_cat_9(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 64 % 256
x0 = xindex % 64
x2 = xindex // 16384
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 128, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 64 * x1 + 8192 * x2), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tl.full([1], 256, tl.int64)
tmp13 = tl.load(in_ptr2 + (x0 + 64 * (-128 + x1) + 8192 * x2), tmp10,
other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tl.store(out_ptr0 + x3, tmp14, None)
@triton.jit
def triton_poi_fused_cat_10(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 256 % 128
x0 = xindex % 256
x2 = xindex // 32768
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 256 * x1 + 16384 * x2), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tl.full([1], 128, tl.int64)
tmp13 = tl.load(in_ptr2 + (x0 + 256 * (-64 + x1) + 16384 * x2), tmp10,
other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tl.store(out_ptr0 + x3, tmp14, None)
@triton.jit
def triton_poi_fused_cat_11(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 1024 % 64
x0 = xindex % 1024
x2 = xindex // 65536
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 1024 * x1 + 32768 * x2), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tl.full([1], 64, tl.int64)
tmp13 = tl.load(in_ptr2 + (x0 + 1024 * (-32 + x1) + 32768 * x2), tmp10,
other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tl.store(out_ptr0 + x3, tmp14, None)
@triton.jit
def triton_poi_fused_cat_12(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 32
x0 = xindex % 4096
x2 = xindex // 131072
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 16, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 65536 * x2), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tl.full([1], 32, tl.int64)
tmp13 = tl.load(in_ptr2 + (x0 + 4096 * (-16 + x1) + 65536 * x2), tmp10,
other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tl.store(out_ptr0 + x3, tmp14, None)
@triton.jit
def triton_poi_fused_convolution_sigmoid_13(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32,
primals_33, primals_34, primals_35, primals_36, primals_37,
primals_38, primals_39, primals_40, primals_41, primals_42,
primals_43, primals_44, primals_45, primals_46, primals_47) = args
args.clear()
assert_size_stride(primals_1, (16, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (32, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_7, (32,), (1,))
assert_size_stride(primals_8, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_9, (32,), (1,))
assert_size_stride(primals_10, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_11, (64,), (1,))
assert_size_stride(primals_12, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_13, (64,), (1,))
assert_size_stride(primals_14, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_15, (128,), (1,))
assert_size_stride(primals_16, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_17, (128,), (1,))
assert_size_stride(primals_18, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_19, (256,), (1,))
assert_size_stride(primals_20, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_21, (256,), (1,))
assert_size_stride(primals_22, (256, 128, 2, 2), (512, 4, 2, 1))
assert_size_stride(primals_23, (128,), (1,))
assert_size_stride(primals_24, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_25, (128,), (1,))
assert_size_stride(primals_26, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_27, (128,), (1,))
assert_size_stride(primals_28, (128, 64, 2, 2), (256, 4, 2, 1))
assert_size_stride(primals_29, (64,), (1,))
assert_size_stride(primals_30, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_31, (64,), (1,))
assert_size_stride(primals_32, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_33, (64,), (1,))
assert_size_stride(primals_34, (64, 32, 2, 2), (128, 4, 2, 1))
assert_size_stride(primals_35, (32,), (1,))
assert_size_stride(primals_36, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_37, (32,), (1,))
assert_size_stride(primals_38, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_39, (32,), (1,))
assert_size_stride(primals_40, (32, 16, 2, 2), (64, 4, 2, 1))
assert_size_stride(primals_41, (16,), (1,))
assert_size_stride(primals_42, (16, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_43, (16,), (1,))
assert_size_stride(primals_44, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_45, (16,), (1,))
assert_size_stride(primals_46, (1, 16, 1, 1), (16, 1, 1, 1))
assert_size_stride(primals_47, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(262144)](buf1, primals_2,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_0[grid(262144)](buf3, primals_5,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 16, 32, 32), (16384, 1024, 32, 1),
torch.float32)
buf5 = empty_strided_cuda((4, 16, 32, 32), (16384, 1024, 32, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(65536)](buf3, buf4,
buf5, 65536, XBLOCK=512, num_warps=4, num_stages=1)
buf6 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_relu_2[grid(131072)](buf7, primals_7,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_7
buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_relu_2[grid(131072)](buf9, primals_9,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_9
buf10 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1),
torch.float32)
buf11 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_3[grid(32768)](buf9, buf10,
buf11, 32768, XBLOCK=128, num_warps=4, num_stages=1)
buf12 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 64, 16, 16), (16384, 256, 16, 1))
buf13 = buf12
del buf12
triton_poi_fused_convolution_relu_4[grid(65536)](buf13, primals_11,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_11
buf14 = extern_kernels.convolution(buf13, primals_12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 64, 16, 16), (16384, 256, 16, 1))
buf15 = buf14
del buf14
triton_poi_fused_convolution_relu_4[grid(65536)](buf15, primals_13,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_13
buf16 = empty_strided_cuda((4, 64, 8, 8), (4096, 64, 8, 1), torch.
float32)
buf17 = empty_strided_cuda((4, 64, 8, 8), (4096, 64, 8, 1), torch.int8)
triton_poi_fused_max_pool2d_with_indices_5[grid(16384)](buf15,
buf16, buf17, 16384, XBLOCK=128, num_warps=4, num_stages=1)
buf18 = extern_kernels.convolution(buf16, primals_14, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 128, 8, 8), (8192, 64, 8, 1))
buf19 = buf18
del buf18
triton_poi_fused_convolution_relu_6[grid(32768)](buf19, primals_15,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_15
buf20 = extern_kernels.convolution(buf19, primals_16, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 128, 8, 8), (8192, 64, 8, 1))
buf21 = buf20
del buf20
triton_poi_fused_convolution_relu_6[grid(32768)](buf21, primals_17,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_17
buf22 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.
float32)
buf23 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.int8
)
triton_poi_fused_max_pool2d_with_indices_7[grid(8192)](buf21, buf22,
buf23, 8192, XBLOCK=256, num_warps=4, num_stages=1)
buf24 = extern_kernels.convolution(buf22, primals_18, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 256, 4, 4), (4096, 16, 4, 1))
buf25 = buf24
del buf24
triton_poi_fused_convolution_relu_8[grid(16384)](buf25, primals_19,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_19
buf26 = extern_kernels.convolution(buf25, primals_20, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 256, 4, 4), (4096, 16, 4, 1))
buf27 = buf26
del buf26
triton_poi_fused_convolution_relu_8[grid(16384)](buf27, primals_21,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_21
buf28 = extern_kernels.convolution(buf27, primals_22, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 128, 8, 8), (8192, 64, 8, 1))
buf29 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.float32)
triton_poi_fused_cat_9[grid(65536)](buf28, primals_23, buf21, buf29,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del buf28
del primals_23
buf30 = extern_kernels.convolution(buf29, primals_24, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf30, (4, 128, 8, 8), (8192, 64, 8, 1))
buf31 = buf30
del buf30
triton_poi_fused_convolution_relu_6[grid(32768)](buf31, primals_25,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_25
buf32 = extern_kernels.convolution(buf31, primals_26, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf32, (4, 128, 8, 8), (8192, 64, 8, 1))
buf33 = buf32
del buf32
triton_poi_fused_convolution_relu_6[grid(32768)](buf33, primals_27,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_27
buf34 = extern_kernels.convolution(buf33, primals_28, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf34, (4, 64, 16, 16), (16384, 256, 16, 1))
buf35 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1),
torch.float32)
triton_poi_fused_cat_10[grid(131072)](buf34, primals_29, buf15,
buf35, 131072, XBLOCK=1024, num_warps=4, num_stages=1)
del buf34
del primals_29
buf36 = extern_kernels.convolution(buf35, primals_30, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf36, (4, 64, 16, 16), (16384, 256, 16, 1))
buf37 = buf36
del buf36
triton_poi_fused_convolution_relu_4[grid(65536)](buf37, primals_31,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_31
buf38 = extern_kernels.convolution(buf37, primals_32, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf38, (4, 64, 16, 16), (16384, 256, 16, 1))
buf39 = buf38
del buf38
triton_poi_fused_convolution_relu_4[grid(65536)](buf39, primals_33,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_33
buf40 = extern_kernels.convolution(buf39, primals_34, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf40, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf41 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.float32)
triton_poi_fused_cat_11[grid(262144)](buf40, primals_35, buf9,
buf41, 262144, XBLOCK=1024, num_warps=4, num_stages=1)
del buf40
del primals_35
buf42 = extern_kernels.convolution(buf41, primals_36, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf42, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf43 = buf42
del buf42
triton_poi_fused_convolution_relu_2[grid(131072)](buf43, primals_37,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_37
buf44 = extern_kernels.convolution(buf43, primals_38, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf44, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf45 = buf44
del buf44
triton_poi_fused_convolution_relu_2[grid(131072)](buf45, primals_39,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_39
buf46 = extern_kernels.convolution(buf45, primals_40, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf46, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf47 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_12[grid(524288)](buf46, primals_41, buf3,
buf47, 524288, XBLOCK=512, num_warps=8, num_stages=1)
del buf46
del primals_41
buf48 = extern_kernels.convolution(buf47, primals_42, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf48, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf49 = buf48
del buf48
triton_poi_fused_convolution_relu_0[grid(262144)](buf49, primals_43,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_43
buf50 = extern_kernels.convolution(buf49, primals_44, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf50, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf51 = buf50
del buf50
triton_poi_fused_convolution_relu_0[grid(262144)](buf51, primals_45,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_45
buf52 = extern_kernels.convolution(buf51, primals_46, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf52, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf53 = buf52
del buf52
triton_poi_fused_convolution_sigmoid_13[grid(16384)](buf53,
primals_47, 16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_47
return (buf53, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, primals_12, primals_14, primals_16, primals_18,
primals_20, primals_22, primals_24, primals_26, primals_28,
primals_30, primals_32, primals_34, primals_36, primals_38,
primals_40, primals_42, primals_44, primals_46, buf1, buf3, buf4,
buf5, buf7, buf9, buf10, buf11, buf13, buf15, buf16, buf17, buf19,
buf21, buf22, buf23, buf25, buf27, buf29, buf31, buf33, buf35,
buf37, buf39, buf41, buf43, buf45, buf47, buf49, buf51, buf53)
class ModelNew(nn.Module):
def __init__(self):
super(ModelNew, self).__init__()
keep_rate = 0.5
self.conv1 = nn.Conv2d(in_channels=1, out_channels=16, kernel_size=
3, stride=1, padding='same', bias=True)
self.dropout1 = nn.Dropout2d(1 - keep_rate)
self.conv2 = nn.Conv2d(in_channels=16, out_channels=16, kernel_size
=3, stride=1, padding='same', bias=True)
self.maxpooling1 = nn.MaxPool2d(2)
self.conv3 = nn.Conv2d(in_channels=16, out_channels=32, kernel_size
=3, stride=1, padding='same', bias=True)
self.dropout2 = nn.Dropout2d(1 - keep_rate)
self.conv4 = nn.Conv2d(in_channels=32, out_channels=32, kernel_size
=3, stride=1, padding='same', bias=True)
self.maxpooling2 = nn.MaxPool2d(2)
self.conv5 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size
=3, stride=1, padding='same', bias=True)
self.dropout3 = nn.Dropout2d(1 - keep_rate)
self.conv6 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size
=3, stride=1, padding='same', bias=True)
self.maxpooling3 = nn.MaxPool2d(2)
self.conv7 = nn.Conv2d(in_channels=64, out_channels=128,
kernel_size=3, stride=1, padding='same', bias=True)
self.dropout4 = nn.Dropout2d(1 - keep_rate)
self.conv8 = nn.Conv2d(in_channels=128, out_channels=128,
kernel_size=3, stride=1, padding='same', bias=True)
self.maxpooling4 = nn.MaxPool2d(2)
self.conv9 = nn.Conv2d(in_channels=128, out_channels=256,
kernel_size=3, stride=1, padding='same', bias=True)
self.dropout5 = nn.Dropout2d(1 - keep_rate)
self.conv10 = nn.Conv2d(in_channels=256, out_channels=256,
kernel_size=3, stride=1, padding='same', bias=True)
self.conv11 = nn.ConvTranspose2d(in_channels=256, out_channels=128,
kernel_size=2, stride=2, padding=0, bias=True)
self.conv12 = nn.Conv2d(in_channels=256, out_channels=128,
kernel_size=3, stride=1, padding='same', bias=True)
self.dropout6 = nn.Dropout2d(1 - keep_rate)
self.conv13 = nn.Conv2d(in_channels=128, out_channels=128,
kernel_size=3, stride=1, padding='same', bias=True)
self.conv14 = nn.ConvTranspose2d(in_channels=128, out_channels=64,
kernel_size=2, stride=2, padding=0, bias=True)
self.conv15 = nn.Conv2d(in_channels=128, out_channels=64,
kernel_size=3, stride=1, padding='same', bias=True)
self.dropout7 = nn.Dropout2d(1 - keep_rate)
self.conv16 = nn.Conv2d(in_channels=64, out_channels=64,
kernel_size=3, stride=1, padding='same', bias=True)
self.conv17 = nn.ConvTranspose2d(in_channels=64, out_channels=32,
kernel_size=2, stride=2, padding=0, bias=True)
self.conv18 = nn.Conv2d(in_channels=64, out_channels=32,
kernel_size=3, stride=1, padding='same', bias=True)
self.dropout8 = nn.Dropout2d(1 - keep_rate)
self.conv19 = nn.Conv2d(in_channels=32, out_channels=32,
kernel_size=3, stride=1, padding='same', bias=True)
self.conv20 = nn.ConvTranspose2d(in_channels=32, out_channels=16,
kernel_size=2, stride=2, padding=0, bias=True)
self.conv21 = nn.Conv2d(in_channels=32, out_channels=16,
kernel_size=3, stride=1, padding='same', bias=True)
self.dropout9 = nn.Dropout2d(1 - keep_rate)
self.conv22 = nn.Conv2d(in_channels=16, out_channels=16,
kernel_size=3, stride=1, padding='same', bias=True)
self.outputs = nn.Conv2d(in_channels=16, out_channels=1,
kernel_size=1, stride=1, padding='same', bias=True)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.conv5.weight
primals_11 = self.conv5.bias
primals_12 = self.conv6.weight
primals_13 = self.conv6.bias
primals_14 = self.conv7.weight
primals_15 = self.conv7.bias
primals_16 = self.conv8.weight
primals_17 = self.conv8.bias
primals_18 = self.conv9.weight
primals_19 = self.conv9.bias
primals_20 = self.conv10.weight
primals_21 = self.conv10.bias
primals_22 = self.conv11.weight
primals_23 = self.conv11.bias
primals_24 = self.conv12.weight
primals_25 = self.conv12.bias
primals_26 = self.conv13.weight
primals_27 = self.conv13.bias
primals_28 = self.conv14.weight
primals_29 = self.conv14.bias
primals_30 = self.conv15.weight
primals_31 = self.conv15.bias
primals_32 = self.conv16.weight
primals_33 = self.conv16.bias
primals_34 = self.conv17.weight
primals_35 = self.conv17.bias
primals_36 = self.conv18.weight
primals_37 = self.conv18.bias
primals_38 = self.conv19.weight
primals_39 = self.conv19.bias
primals_40 = self.conv20.weight
primals_41 = self.conv20.bias
primals_42 = self.conv21.weight
primals_43 = self.conv21.bias
primals_44 = self.conv22.weight
primals_45 = self.conv22.bias
primals_46 = self.outputs.weight
primals_47 = self.outputs.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33, primals_34,
primals_35, primals_36, primals_37, primals_38, primals_39,
primals_40, primals_41, primals_42, primals_43, primals_44,
primals_45, primals_46, primals_47])
return output[0]
|
mntalha/U-NET_Iplementation
|
Model
| false
| 4,062
|
[
"MIT"
] | 0
|
7fc2a34352f02a4989659053a6dd8717134913a0
|
https://github.com/mntalha/U-NET_Iplementation/tree/7fc2a34352f02a4989659053a6dd8717134913a0
|
DeconvBlock
|
import torch
import torch.nn as nn
class DeconvBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(DeconvBlock, self).__init__()
self.conv = nn.ConvTranspose2d(in_channels, out_channels,
kernel_size=3, stride=2, padding=1, output_padding=0)
self.pad = nn.ReflectionPad2d((0, 1, 0, 1))
self.nonlin = nn.ELU(inplace=True)
def forward(self, x):
out = self.conv(x)
out = self.pad(out)
out = self.nonlin(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_elu_reflection_pad2d_0(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8 % 8
x4 = xindex // 64
x2 = xindex // 64 % 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + (48 + -1 * tl_math.abs(-6 + x0) + -7 * tl_math
.abs(-6 + x1) + 49 * x4), xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 1.0
tmp6 = tmp2 * tmp5
tmp7 = libdevice.expm1(tmp6)
tmp8 = tmp7 * tmp5
tmp9 = tl.where(tmp4, tmp6, tmp8)
tl.store(out_ptr0 + x5, tmp9, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2,
2), padding=(1, 1), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 7, 7), (196, 49, 7, 1))
buf1 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_elu_reflection_pad2d_0[grid(1024)](buf0,
primals_2, buf1, 1024, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_2
return buf1, primals_1, primals_3, buf1
class DeconvBlockNew(nn.Module):
def __init__(self, in_channels, out_channels):
super(DeconvBlockNew, self).__init__()
self.conv = nn.ConvTranspose2d(in_channels, out_channels,
kernel_size=3, stride=2, padding=1, output_padding=0)
self.pad = nn.ReflectionPad2d((0, 1, 0, 1))
self.nonlin = nn.ELU(inplace=True)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
maxuanquang/FeatDepth
|
DeconvBlock
| false
| 4,063
|
[
"MIT"
] | 0
|
cc68d9f1f49b65ace8f2918af5b9d552ecd80ba4
|
https://github.com/maxuanquang/FeatDepth/tree/cc68d9f1f49b65ace8f2918af5b9d552ecd80ba4
|
BasicModel
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicModel(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input):
input = 1 - F.relu(1 - input)
return input
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_relu_rsub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = tmp1 - tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_relu_rsub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class BasicModelNew(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
ngduduong/captum
|
BasicModel
| false
| 4,064
|
[
"BSD-3-Clause"
] | 0
|
6fe5f0f23ea975e73e0c0dee79bdc01b4223d283
|
https://github.com/ngduduong/captum/tree/6fe5f0f23ea975e73e0c0dee79bdc01b4223d283
|
Perceptron
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Perceptron(nn.Module):
"""Implements a 1-layer perceptron."""
def __init__(self, input_dimension, hidden_dimension, output_dimension):
super(Perceptron, self).__init__()
self._layer1 = nn.Linear(input_dimension, hidden_dimension)
self._layer2 = nn.Linear(hidden_dimension, output_dimension, bias=False
)
def forward(self, inp):
return F.sigmoid(self._layer2(F.relu(self._layer1(inp))))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dimension': 4, 'hidden_dimension': 4,
'output_dimension': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused_sigmoid_1[grid(256)](buf3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf3, primals_4, buf4
class PerceptronNew(nn.Module):
"""Implements a 1-layer perceptron."""
def __init__(self, input_dimension, hidden_dimension, output_dimension):
super(PerceptronNew, self).__init__()
self._layer1 = nn.Linear(input_dimension, hidden_dimension)
self._layer2 = nn.Linear(hidden_dimension, output_dimension, bias=False
)
def forward(self, input_0):
primals_1 = self._layer1.weight
primals_2 = self._layer1.bias
primals_4 = self._layer2.weight
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
negotiatorvivian/PDP-SP
|
Perceptron
| false
| 4,065
|
[
"MIT"
] | 0
|
0fa4c1145c2b881c1fde4ed8d9f0845b7967f857
|
https://github.com/negotiatorvivian/PDP-SP/tree/0fa4c1145c2b881c1fde4ed8d9f0845b7967f857
|
Conv5x5
|
import torch
import torch.nn as nn
class Conv5x5(nn.Module):
def __init__(self, in_channels, out_channels, use_refl=True):
super(Conv5x5, self).__init__()
if use_refl:
self.pad = nn.ReflectionPad2d(2)
else:
self.pad = nn.ZeroPad2d(2)
self.conv = nn.Conv2d(int(in_channels), int(out_channels), 5)
def forward(self, x):
out = self.pad(x)
out = self.conv(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8 % 8
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-2 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-2 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 5, 5), (100, 25, 5, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0[grid(1024)](primals_1, buf0,
1024, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(256)](buf2, primals_3, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
return buf2, primals_2, buf0
class Conv5x5New(nn.Module):
def __init__(self, in_channels, out_channels, use_refl=True):
super(Conv5x5New, self).__init__()
if use_refl:
self.pad = nn.ReflectionPad2d(2)
else:
self.pad = nn.ZeroPad2d(2)
self.conv = nn.Conv2d(int(in_channels), int(out_channels), 5)
def forward(self, input_0):
primals_2 = self.conv.weight
primals_3 = self.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
maxuanquang/FeatDepth
|
Conv5x5
| false
| 4,066
|
[
"MIT"
] | 0
|
cc68d9f1f49b65ace8f2918af5b9d552ecd80ba4
|
https://github.com/maxuanquang/FeatDepth/tree/cc68d9f1f49b65ace8f2918af5b9d552ecd80ba4
|
BasicModel4_MultiArgs
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicModel4_MultiArgs(nn.Module):
"""
Slightly modified example model from the paper
https://arxiv.org/pdf/1703.01365.pdf
f(x1, x2) = RELU(ReLU(x1 - 1) - ReLU(x2) / x3)
"""
def __init__(self):
super().__init__()
def forward(self, input1, input2, additional_input1, additional_input2=0):
relu_out1 = F.relu(input1 - 1)
relu_out2 = F.relu(input2)
relu_out2 = relu_out2.div(additional_input1)
return F.relu(relu_out1 - relu_out2)[:, additional_input2]
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_relu_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp5 = tl.load(in_ptr1 + x0, xmask)
tmp7 = tl.load(in_ptr2 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 - tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp3, tmp5)
tmp8 = tmp6 / tmp7
tmp9 = tmp4 - tmp8
tmp10 = triton_helpers.maximum(tmp3, tmp9)
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_relu_sub_0[grid(256)](arg0_1, arg1_1, arg2_1,
buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return reinterpret_tensor(buf0, (4, 4, 4), (64, 4, 1), 0),
class BasicModel4_MultiArgsNew(nn.Module):
"""
Slightly modified example model from the paper
https://arxiv.org/pdf/1703.01365.pdf
f(x1, x2) = RELU(ReLU(x1 - 1) - ReLU(x2) / x3)
"""
def __init__(self):
super().__init__()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
ngduduong/captum
|
BasicModel4_MultiArgs
| false
| 4,067
|
[
"BSD-3-Clause"
] | 0
|
6fe5f0f23ea975e73e0c0dee79bdc01b4223d283
|
https://github.com/ngduduong/captum/tree/6fe5f0f23ea975e73e0c0dee79bdc01b4223d283
|
MultiRelu
|
import torch
import torch.nn as nn
class MultiRelu(nn.Module):
def __init__(self, inplace=False):
super().__init__()
self.relu1 = nn.ReLU(inplace=inplace)
self.relu2 = nn.ReLU(inplace=inplace)
def forward(self, arg1, arg2):
return self.relu1(arg1), self.relu2(arg2)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_relu_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_relu_0[grid(256)](arg1_1, buf1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg1_1
return buf0, buf1
class MultiReluNew(nn.Module):
def __init__(self, inplace=False):
super().__init__()
self.relu1 = nn.ReLU(inplace=inplace)
self.relu2 = nn.ReLU(inplace=inplace)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0], output[1]
|
ngduduong/captum
|
MultiRelu
| false
| 4,068
|
[
"BSD-3-Clause"
] | 0
|
6fe5f0f23ea975e73e0c0dee79bdc01b4223d283
|
https://github.com/ngduduong/captum/tree/6fe5f0f23ea975e73e0c0dee79bdc01b4223d283
|
AlbertAttention
|
from _paritybench_helpers import _mock_config
import math
import torch
from typing import List
from typing import Tuple
from torch import nn
from typing import Set
import torch.utils.checkpoint
def find_pruneable_heads_and_indices(heads: 'List[int]', n_heads: 'int',
head_size: 'int', already_pruned_heads: 'Set[int]') ->Tuple[Set[int],
torch.LongTensor]:
"""
Finds the heads and their indices taking :obj:`already_pruned_heads` into account.
Args:
heads (:obj:`List[int]`): List of the indices of heads to prune.
n_heads (:obj:`int`): The number of heads in the model.
head_size (:obj:`int`): The size of each head.
already_pruned_heads (:obj:`Set[int]`): A set of already pruned heads.
Returns:
:obj:`Tuple[Set[int], torch.LongTensor]`: A tuple with the remaining heads and their corresponding indices.
"""
mask = torch.ones(n_heads, head_size)
heads = set(heads) - already_pruned_heads
for head in heads:
head = head - sum(1 if h < head else 0 for h in already_pruned_heads)
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index: 'torch.LongTensor' = torch.arange(len(mask))[mask].long()
return heads, index
def prune_linear_layer(layer: 'nn.Linear', index: 'torch.LongTensor', dim:
'int'=0) ->nn.Linear:
"""
Prune a linear layer to keep only entries in index.
Used to remove heads.
Args:
layer (:obj:`torch.nn.Linear`): The layer to prune.
index (:obj:`torch.LongTensor`): The indices to keep in the layer.
dim (:obj:`int`, `optional`, defaults to 0): The dimension on which to keep the indices.
Returns:
:obj:`torch.nn.Linear`: The pruned layer as a new layer with :obj:`requires_grad=True`.
"""
index = index
W = layer.weight.index_select(dim, index).clone().detach()
if layer.bias is not None:
if dim == 1:
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None
)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
if layer.bias is not None:
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
class AlbertAttention(nn.Module):
def __init__(self, config):
super().__init__()
if (config.hidden_size % config.num_attention_heads != 0 and not
hasattr(config, 'embedding_size')):
raise ValueError(
f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads}'
)
self.num_attention_heads = config.num_attention_heads
self.hidden_size = config.hidden_size
self.attention_head_size = (config.hidden_size // config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.attention_dropout = nn.Dropout(config.attention_probs_dropout_prob
)
self.output_dropout = nn.Dropout(config.hidden_dropout_prob)
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.
layer_norm_eps)
self.pruned_heads = set()
self.position_embedding_type = getattr(config,
'position_embedding_type', 'absolute')
if (self.position_embedding_type == 'relative_key' or self.
position_embedding_type == 'relative_key_query'):
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.
max_position_embeddings - 1, self.attention_head_size)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.
num_attention_heads, self.attention_head_size, self.pruned_heads)
self.query = prune_linear_layer(self.query, index)
self.key = prune_linear_layer(self.key, index)
self.value = prune_linear_layer(self.value, index)
self.dense = prune_linear_layer(self.dense, index, dim=1)
self.num_attention_heads = self.num_attention_heads - len(heads)
self.all_head_size = (self.attention_head_size * self.
num_attention_heads)
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_states, attention_mask=None, head_mask=None,
output_attentions=False):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1,
-2))
attention_scores = attention_scores / math.sqrt(self.
attention_head_size)
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
if (self.position_embedding_type == 'relative_key' or self.
position_embedding_type == 'relative_key_query'):
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long,
device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long,
device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.
max_position_embeddings - 1)
positional_embedding = positional_embedding
if self.position_embedding_type == 'relative_key':
relative_position_scores = torch.einsum('bhld,lrd->bhlr',
query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == 'relative_key_query':
relative_position_scores_query = torch.einsum('bhld,lrd->bhlr',
query_layer, positional_embedding)
relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr',
key_layer, positional_embedding)
attention_scores = (attention_scores +
relative_position_scores_query +
relative_position_scores_key)
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.attention_dropout(attention_probs)
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.transpose(2, 1).flatten(2)
projected_context_layer = self.dense(context_layer)
projected_context_layer_dropout = self.output_dropout(
projected_context_layer)
layernormed_context_layer = self.LayerNorm(hidden_states +
projected_context_layer_dropout)
return (layernormed_context_layer, attention_probs
) if output_attentions else (layernormed_context_layer,)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, num_attention_heads=
4, attention_probs_dropout_prob=0.5, hidden_dropout_prob=0.5,
layer_norm_eps=1, position_embedding_type=4)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from typing import List
from typing import Tuple
from torch import nn
from typing import Set
import torch.utils.checkpoint
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr1 + x2, xmask)
tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = float('-inf')
tmp2 = tmp0 == tmp1
tmp3 = tmp2 == 0
tmp4 = tmp3.to(tl.int64)
tmp5 = tmp4 != 0
tmp7 = tmp6 == tmp1
tmp8 = tmp7 == 0
tmp9 = tmp8.to(tl.int64)
tmp10 = tmp9 != 0
tmp11 = tmp5 | tmp10
tmp13 = tmp12 == tmp1
tmp14 = tmp13 == 0
tmp15 = tmp14.to(tl.int64)
tmp16 = tmp15 != 0
tmp17 = tmp11 | tmp16
tmp19 = tmp18 == tmp1
tmp20 = tmp19 == 0
tmp21 = tmp20.to(tl.int64)
tmp22 = tmp21 != 0
tmp23 = tmp17 | tmp22
tmp24 = tmp23 == 0
tmp28 = tmp26 + tmp27
tmp30 = tmp28 + tmp29
tmp32 = tmp30 + tmp31
tmp33 = tmp25 / tmp32
tmp34 = 0.0
tmp35 = tl.where(tmp24, tmp34, tmp33)
tl.store(out_ptr0 + x2, tmp35, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + 1)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr2 + 2)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr2 + 3)
tmp23 = tl.broadcast_to(tmp22, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp10 = tmp7 + tmp9
tmp11 = tmp6 + tmp10
tmp12 = tmp5 + tmp11
tmp17 = tmp14 + tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp12 + tmp18
tmp24 = tmp21 + tmp23
tmp25 = tmp20 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + x0, tmp28, xmask)
tl.store(out_ptr1 + x0, tmp40, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp6 = tmp4 - tmp5
tmp8 = 1.0
tmp9 = tmp7 + tmp8
tmp10 = libdevice.rsqrt(tmp9)
tmp11 = tmp6 * tmp10
tmp13 = tmp11 * tmp12
tmp15 = tmp13 + tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_2
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf0
triton_poi_fused_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_1[grid(256)](buf5, buf6, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_2[grid(256)](buf5, buf6, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf5
del buf6
buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf1
triton_poi_fused_3[grid(16, 4)](buf2, primals_7, buf8, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_7
buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_4[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0)
del buf9
extern_kernels.mm(reinterpret_tensor(buf10, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf11)
buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_native_layer_norm_5[grid(16)](primals_3, buf11,
primals_9, buf12, buf13, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_6[grid(64)](primals_3, buf11,
primals_9, buf12, buf13, primals_10, primals_11, buf14, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf12
del buf13
del primals_11
return buf14, primals_3, primals_9, primals_10, buf7, reinterpret_tensor(
buf8, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4
), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0
), reinterpret_tensor(buf10, (16, 4), (4, 1), 0), buf11, primals_8
def find_pruneable_heads_and_indices(heads: 'List[int]', n_heads: 'int',
head_size: 'int', already_pruned_heads: 'Set[int]') ->Tuple[Set[int],
torch.LongTensor]:
"""
Finds the heads and their indices taking :obj:`already_pruned_heads` into account.
Args:
heads (:obj:`List[int]`): List of the indices of heads to prune.
n_heads (:obj:`int`): The number of heads in the model.
head_size (:obj:`int`): The size of each head.
already_pruned_heads (:obj:`Set[int]`): A set of already pruned heads.
Returns:
:obj:`Tuple[Set[int], torch.LongTensor]`: A tuple with the remaining heads and their corresponding indices.
"""
mask = torch.ones(n_heads, head_size)
heads = set(heads) - already_pruned_heads
for head in heads:
head = head - sum(1 if h < head else 0 for h in already_pruned_heads)
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index: 'torch.LongTensor' = torch.arange(len(mask))[mask].long()
return heads, index
def prune_linear_layer(layer: 'nn.Linear', index: 'torch.LongTensor', dim:
'int'=0) ->nn.Linear:
"""
Prune a linear layer to keep only entries in index.
Used to remove heads.
Args:
layer (:obj:`torch.nn.Linear`): The layer to prune.
index (:obj:`torch.LongTensor`): The indices to keep in the layer.
dim (:obj:`int`, `optional`, defaults to 0): The dimension on which to keep the indices.
Returns:
:obj:`torch.nn.Linear`: The pruned layer as a new layer with :obj:`requires_grad=True`.
"""
index = index
W = layer.weight.index_select(dim, index).clone().detach()
if layer.bias is not None:
if dim == 1:
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None
)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
if layer.bias is not None:
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
class AlbertAttentionNew(nn.Module):
def __init__(self, config):
super().__init__()
if (config.hidden_size % config.num_attention_heads != 0 and not
hasattr(config, 'embedding_size')):
raise ValueError(
f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads}'
)
self.num_attention_heads = config.num_attention_heads
self.hidden_size = config.hidden_size
self.attention_head_size = (config.hidden_size // config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.attention_dropout = nn.Dropout(config.attention_probs_dropout_prob
)
self.output_dropout = nn.Dropout(config.hidden_dropout_prob)
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.
layer_norm_eps)
self.pruned_heads = set()
self.position_embedding_type = getattr(config,
'position_embedding_type', 'absolute')
if (self.position_embedding_type == 'relative_key' or self.
position_embedding_type == 'relative_key_query'):
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.
max_position_embeddings - 1, self.attention_head_size)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.
num_attention_heads, self.attention_head_size, self.pruned_heads)
self.query = prune_linear_layer(self.query, index)
self.key = prune_linear_layer(self.key, index)
self.value = prune_linear_layer(self.value, index)
self.dense = prune_linear_layer(self.dense, index, dim=1)
self.num_attention_heads = self.num_attention_heads - len(heads)
self.all_head_size = (self.attention_head_size * self.
num_attention_heads)
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, input_0):
primals_1 = self.query.weight
primals_2 = self.query.bias
primals_4 = self.key.weight
primals_5 = self.key.bias
primals_6 = self.value.weight
primals_7 = self.value.bias
primals_8 = self.dense.weight
primals_9 = self.dense.bias
primals_10 = self.LayerNorm.weight
primals_11 = self.LayerNorm.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
ncoop57/transformers
|
AlbertAttention
| false
| 4,069
|
[
"Apache-2.0"
] | 0
|
d7e156bd1ae2467e9ea1dbc44f31da0ed2296aee
|
https://github.com/ncoop57/transformers/tree/d7e156bd1ae2467e9ea1dbc44f31da0ed2296aee
|
BasicModel3
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicModel3(nn.Module):
"""
Example model two from the paper
https://arxiv.org/pdf/1703.01365.pdf
f(x1, x2) = RELU(ReLU(x1 - 1) - ReLU(x2))
"""
def __init__(self):
super().__init__()
def forward(self, input1, input2):
relu_out1 = F.relu(input1 - 1)
relu_out2 = F.relu(input2)
return F.relu(relu_out1 - relu_out2)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_relu_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp5 = tl.load(in_ptr1 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 - tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp3, tmp5)
tmp7 = tmp4 - tmp6
tmp8 = triton_helpers.maximum(tmp3, tmp7)
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_relu_sub_0[grid(256)](arg0_1, arg1_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class BasicModel3New(nn.Module):
"""
Example model two from the paper
https://arxiv.org/pdf/1703.01365.pdf
f(x1, x2) = RELU(ReLU(x1 - 1) - ReLU(x2))
"""
def __init__(self):
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
ngduduong/captum
|
BasicModel3
| false
| 4,070
|
[
"BSD-3-Clause"
] | 0
|
6fe5f0f23ea975e73e0c0dee79bdc01b4223d283
|
https://github.com/ngduduong/captum/tree/6fe5f0f23ea975e73e0c0dee79bdc01b4223d283
|
BasicModel5_MultiArgs
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicModel5_MultiArgs(nn.Module):
"""
Slightly modified example model from the paper
https://arxiv.org/pdf/1703.01365.pdf
f(x1, x2) = RELU(ReLU(x1 - 1) * x3[0] - ReLU(x2) * x3[1])
"""
def __init__(self):
super().__init__()
def forward(self, input1, input2, additional_input1, additional_input2=0):
relu_out1 = F.relu(input1 - 1) * additional_input1[0]
relu_out2 = F.relu(input2)
relu_out2 = relu_out2 * additional_input1[1]
return F.relu(relu_out1 - relu_out2)[:, additional_input2]
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_relu_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp5 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr2 + x2, xmask)
tmp9 = tl.load(in_ptr1 + (64 + x0), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 - tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp4 * tmp5
tmp8 = triton_helpers.maximum(tmp3, tmp7)
tmp10 = tmp8 * tmp9
tmp11 = tmp6 - tmp10
tmp12 = triton_helpers.maximum(tmp3, tmp11)
tl.store(out_ptr0 + x2, tmp12, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_relu_sub_0[grid(256)](arg0_1, arg1_1, arg2_1,
buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return reinterpret_tensor(buf0, (4, 4, 4), (64, 4, 1), 0),
class BasicModel5_MultiArgsNew(nn.Module):
"""
Slightly modified example model from the paper
https://arxiv.org/pdf/1703.01365.pdf
f(x1, x2) = RELU(ReLU(x1 - 1) * x3[0] - ReLU(x2) * x3[1])
"""
def __init__(self):
super().__init__()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
ngduduong/captum
|
BasicModel5_MultiArgs
| false
| 4,071
|
[
"BSD-3-Clause"
] | 0
|
6fe5f0f23ea975e73e0c0dee79bdc01b4223d283
|
https://github.com/ngduduong/captum/tree/6fe5f0f23ea975e73e0c0dee79bdc01b4223d283
|
BasicModel6_MultiTensor
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicModel6_MultiTensor(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input1, input2):
input = input1 + input2
return 1 - F.relu(1 - input)[:, 1]
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_rsub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp2
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp3 - tmp6
tl.store(out_ptr0 + x2, tmp7, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_rsub_0[grid(64)](arg0_1, arg1_1, buf0, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class BasicModel6_MultiTensorNew(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
ngduduong/captum
|
BasicModel6_MultiTensor
| false
| 4,072
|
[
"BSD-3-Clause"
] | 0
|
6fe5f0f23ea975e73e0c0dee79bdc01b4223d283
|
https://github.com/ngduduong/captum/tree/6fe5f0f23ea975e73e0c0dee79bdc01b4223d283
|
T5DenseReluDense
|
from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint
class T5DenseReluDense(nn.Module):
def __init__(self, config):
super().__init__()
self.wi = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(self, hidden_states):
hidden_states = self.wi(hidden_states)
hidden_states = F.relu(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.wo(hidden_states)
return hidden_states
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(d_model=4, d_ff=4, dropout_rate=0.5)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.checkpoint
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, buf3,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf2)
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_3, buf3
class T5DenseReluDenseNew(nn.Module):
def __init__(self, config):
super().__init__()
self.wi = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(self, input_0):
primals_1 = self.wi.weight
primals_3 = self.wo.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Hzfinfdu/Black-Box-Tuning
|
T5DenseReluDense
| false
| 4,073
|
[
"MIT"
] | 0
|
64eb5505875dc1b242c6f0a2a2f07e4000c24cb4
|
https://github.com/Hzfinfdu/Black-Box-Tuning/tree/64eb5505875dc1b242c6f0a2a2f07e4000c24cb4
|
STFullyConnected
|
import time
import torch
import numpy as np
from torch import nn
from torch import optim
from torch.nn import functional as F
class Base(nn.Module):
""" This class is the base structure for all of classification/regression DNN models.
Mainly, it provides the general methods for training, evaluating model and predcting the given data.
"""
def fit(self, train_loader, valid_loader, out, epochs=100, lr=0.0001):
"""Training the DNN model, similar to the scikit-learn or Keras style.
In the end, the optimal value of parameters will also be persisted on the hard drive.
Arguments:
train_loader (DataLoader): Data loader for training set,
including m X n target FloatTensor and m X l label FloatTensor
(m is the No. of sample, n is the No. of features, l is the No. of classes or tasks)
valid_loader (DataLoader): Data loader for validation set.
The data structure is as same as loader_train.
out (str): the file path for the model file (suffix with '.pkg')
and log file (suffix with '.log').
epochs(int, optional): The maximum of training epochs (default: 100)
lr (float, optional): learning rate (default: 1e-4)
"""
if 'optim' in self.__dict__:
optimizer = self.optim
else:
optimizer = optim.Adam(self.parameters(), lr=lr)
best_loss = np.inf
last_save = 0
if not os.path.exists(out):
try:
os.makedirs(out)
except PermissionError:
None
log = open(file=out + '.log', mode='w+')
for epoch in range(epochs):
time.time()
for param_group in optimizer.param_groups:
param_group['lr'] = lr * (1 - 1 / epochs) ** (epoch * 10)
for i, (Xb, yb) in enumerate(train_loader):
Xb, yb = Xb, yb
optimizer.zero_grad()
y_ = self(Xb, istrain=True)
ix = yb == yb
yb, y_ = yb[ix], y_[ix]
wb = torch.Tensor(yb.size())
wb[yb == 3.99] = 0.1
wb[yb != 3.99] = 1
loss = self.criterion(y_ * wb, yb * wb)
loss.backward()
optimizer.step()
loss_valid = self.evaluate(valid_loader)
None
if loss_valid < best_loss:
torch.save(self.state_dict(), out + '.pkg')
None
best_loss = loss_valid
last_save = epoch
else:
None
if epoch - last_save > 100:
break
log.close()
self.load_state_dict(torch.load(out + '.pkg'))
def evaluate(self, loader):
"""Evaluating the performance of the DNN model.
Arguments:
loader (torch.util.data.DataLoader): data loader for test set,
including m X n target FloatTensor and l X n label FloatTensor
(m is the No. of sample, n is the No. of features, l is the No. of classes or tasks)
Return:
loss (float): the average loss value based on the calculation of loss function with given test set.
"""
loss = 0
for Xb, yb in loader:
Xb, yb = Xb, yb
y_ = self.forward(Xb)
ix = yb == yb
yb, y_ = yb[ix], y_[ix]
wb = torch.Tensor(yb.size())
wb[yb == 3.99] = 0.1
wb[yb != 3.99] = 1
loss += self.criterion(y_ * wb, yb * wb).item()
loss = loss / len(loader)
return loss
def predict(self, loader):
"""Predicting the probability of each sample in the given dataset.
Arguments:
loader (torch.util.data.DataLoader): data loader for test set,
only including m X n target FloatTensor
(m is the No. of sample, n is the No. of features)
Return:
score (ndarray): probability of each sample in the given dataset,
it is a m X l FloatTensor (m is the No. of sample, l is the No. of classes or tasks.)
"""
score = []
for Xb, yb in loader:
Xb = Xb
y_ = self.forward(Xb)
score.append(y_.detach().cpu())
score = torch.cat(score, dim=0).numpy()
return score
class STFullyConnected(Base):
"""Single task DNN classification/regression model. It contains four fully connected layers between which
are dropout layer for robustness.
Arguments:
n_dim (int): the No. of columns (features) for input tensor
n_class (int): the No. of columns (classes) for output tensor.
is_reg (bool, optional): Regression model (True) or Classification model (False)
"""
def __init__(self, n_dim, n_class, is_reg=False):
super(STFullyConnected, self).__init__()
self.dropout = nn.Dropout(0.25)
self.fc0 = nn.Linear(n_dim, 4000)
self.fc1 = nn.Linear(4000, 1000)
self.fc3 = nn.Linear(1000, n_class)
self.is_reg = is_reg
if is_reg:
self.criterion = nn.MSELoss()
elif n_class == 1:
self.criterion = nn.BCELoss()
self.activation = nn.Sigmoid()
else:
self.criterion = nn.CrossEntropyLoss()
self.activation = nn.Softmax()
self
def forward(self, X, istrain=False):
"""Invoke the class directly as a function
Arguments:
X (FloatTensor): m X n FloatTensor, m is the No. of samples, n is the No. of features.
istrain (bool, optional): is it invoked during training process (True) or just for prediction (False)
Return:
y (FloatTensor): m X l FloatTensor, m is the No. of samples, n is the No. of classes
"""
y = F.relu(self.fc0(X))
if istrain:
y = self.dropout(y)
y = F.relu(self.fc1(y))
if istrain:
y = self.dropout(y)
if self.is_reg:
y = self.fc3(y)
else:
y = self.activation(self.fc3(y))
return y
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_dim': 4, 'n_class': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import time
import numpy as np
from torch import nn
from torch import optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 4000
x1 = xindex // 4000
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + (x0 + 4096 * x1), tmp6, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 1000
x2 = xindex % 4000
x3 = xindex // 4000
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x2 + 4096 * x3), tmp6, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4000, 4), (4, 1))
assert_size_stride(primals_2, (4000,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1000, 4000), (4000, 1))
assert_size_stride(primals_5, (1000,), (1,))
assert_size_stride(primals_6, (4, 1000), (1000, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4000), (4000, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4000), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4000), (64000, 16000,
4000, 1), 0)
del buf0
buf8 = empty_strided_cuda((4, 4, 4, 4000), (65536, 16384, 4096, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256000)](buf1,
primals_2, buf8, 256000, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 1000), (1000, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4000), (4000, 1), 0
), reinterpret_tensor(primals_4, (4000, 1000), (1, 4000), 0),
out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 1000), (16000, 4000, 1000,
1), 0)
del buf2
buf7 = empty_strided_cuda((4, 4, 4, 1000), (16384, 4096, 1000, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(64000)](buf3,
primals_5, buf7, 64000, XBLOCK=512, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 1000),
(1000, 1), 0), reinterpret_tensor(primals_6, (1000, 4), (1,
1000), 0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(256)](buf4, buf5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
triton_poi_fused__softmax_3[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf5
return buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4000), (4000, 1), 0
), reinterpret_tensor(buf3, (64, 1000), (1000, 1), 0
), buf6, primals_6, buf7, primals_4, buf8
class Base(nn.Module):
""" This class is the base structure for all of classification/regression DNN models.
Mainly, it provides the general methods for training, evaluating model and predcting the given data.
"""
def fit(self, train_loader, valid_loader, out, epochs=100, lr=0.0001):
"""Training the DNN model, similar to the scikit-learn or Keras style.
In the end, the optimal value of parameters will also be persisted on the hard drive.
Arguments:
train_loader (DataLoader): Data loader for training set,
including m X n target FloatTensor and m X l label FloatTensor
(m is the No. of sample, n is the No. of features, l is the No. of classes or tasks)
valid_loader (DataLoader): Data loader for validation set.
The data structure is as same as loader_train.
out (str): the file path for the model file (suffix with '.pkg')
and log file (suffix with '.log').
epochs(int, optional): The maximum of training epochs (default: 100)
lr (float, optional): learning rate (default: 1e-4)
"""
if 'optim' in self.__dict__:
optimizer = self.optim
else:
optimizer = optim.Adam(self.parameters(), lr=lr)
best_loss = np.inf
last_save = 0
if not os.path.exists(out):
try:
os.makedirs(out)
except PermissionError:
None
log = open(file=out + '.log', mode='w+')
for epoch in range(epochs):
time.time()
for param_group in optimizer.param_groups:
param_group['lr'] = lr * (1 - 1 / epochs) ** (epoch * 10)
for i, (Xb, yb) in enumerate(train_loader):
Xb, yb = Xb, yb
optimizer.zero_grad()
y_ = self(Xb, istrain=True)
ix = yb == yb
yb, y_ = yb[ix], y_[ix]
wb = torch.Tensor(yb.size())
wb[yb == 3.99] = 0.1
wb[yb != 3.99] = 1
loss = self.criterion(y_ * wb, yb * wb)
loss.backward()
optimizer.step()
loss_valid = self.evaluate(valid_loader)
None
if loss_valid < best_loss:
torch.save(self.state_dict(), out + '.pkg')
None
best_loss = loss_valid
last_save = epoch
else:
None
if epoch - last_save > 100:
break
log.close()
self.load_state_dict(torch.load(out + '.pkg'))
def evaluate(self, loader):
"""Evaluating the performance of the DNN model.
Arguments:
loader (torch.util.data.DataLoader): data loader for test set,
including m X n target FloatTensor and l X n label FloatTensor
(m is the No. of sample, n is the No. of features, l is the No. of classes or tasks)
Return:
loss (float): the average loss value based on the calculation of loss function with given test set.
"""
loss = 0
for Xb, yb in loader:
Xb, yb = Xb, yb
y_ = self.forward(Xb)
ix = yb == yb
yb, y_ = yb[ix], y_[ix]
wb = torch.Tensor(yb.size())
wb[yb == 3.99] = 0.1
wb[yb != 3.99] = 1
loss += self.criterion(y_ * wb, yb * wb).item()
loss = loss / len(loader)
return loss
def predict(self, loader):
"""Predicting the probability of each sample in the given dataset.
Arguments:
loader (torch.util.data.DataLoader): data loader for test set,
only including m X n target FloatTensor
(m is the No. of sample, n is the No. of features)
Return:
score (ndarray): probability of each sample in the given dataset,
it is a m X l FloatTensor (m is the No. of sample, l is the No. of classes or tasks.)
"""
score = []
for Xb, yb in loader:
Xb = Xb
y_ = self.forward(Xb)
score.append(y_.detach().cpu())
score = torch.cat(score, dim=0).numpy()
return score
class STFullyConnectedNew(Base):
"""Single task DNN classification/regression model. It contains four fully connected layers between which
are dropout layer for robustness.
Arguments:
n_dim (int): the No. of columns (features) for input tensor
n_class (int): the No. of columns (classes) for output tensor.
is_reg (bool, optional): Regression model (True) or Classification model (False)
"""
def __init__(self, n_dim, n_class, is_reg=False):
super(STFullyConnectedNew, self).__init__()
self.dropout = nn.Dropout(0.25)
self.fc0 = nn.Linear(n_dim, 4000)
self.fc1 = nn.Linear(4000, 1000)
self.fc3 = nn.Linear(1000, n_class)
self.is_reg = is_reg
if is_reg:
self.criterion = nn.MSELoss()
elif n_class == 1:
self.criterion = nn.BCELoss()
self.activation = nn.Sigmoid()
else:
self.criterion = nn.CrossEntropyLoss()
self.activation = nn.Softmax()
self
def forward(self, input_0):
primals_1 = self.fc0.weight
primals_2 = self.fc0.bias
primals_4 = self.fc1.weight
primals_5 = self.fc1.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
naisuu/DrugEx
|
STFullyConnected
| false
| 4,074
|
[
"MIT"
] | 0
|
8708c98a137473f11990d70e43a46018806b6f39
|
https://github.com/naisuu/DrugEx/tree/8708c98a137473f11990d70e43a46018806b6f39
|
BasicModel2
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicModel2(nn.Module):
"""
Example model one from the paper
https://arxiv.org/pdf/1703.01365.pdf
f(x1, x2) = RELU(ReLU(x1) - 1 - ReLU(x2))
"""
def __init__(self):
super().__init__()
def forward(self, input1, input2):
relu_out1 = F.relu(input1)
relu_out2 = F.relu(input2)
return F.relu(relu_out1 - 1 - relu_out2)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_relu_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp5 = tl.load(in_ptr1 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 1.0
tmp4 = tmp2 - tmp3
tmp6 = triton_helpers.maximum(tmp1, tmp5)
tmp7 = tmp4 - tmp6
tmp8 = triton_helpers.maximum(tmp1, tmp7)
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_relu_sub_0[grid(256)](arg0_1, arg1_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class BasicModel2New(nn.Module):
"""
Example model one from the paper
https://arxiv.org/pdf/1703.01365.pdf
f(x1, x2) = RELU(ReLU(x1) - 1 - ReLU(x2))
"""
def __init__(self):
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
ngduduong/captum
|
BasicModel2
| false
| 4,075
|
[
"BSD-3-Clause"
] | 0
|
6fe5f0f23ea975e73e0c0dee79bdc01b4223d283
|
https://github.com/ngduduong/captum/tree/6fe5f0f23ea975e73e0c0dee79bdc01b4223d283
|
ReLUDeepLiftModel
|
import torch
import torch.nn as nn
class ReLUDeepLiftModel(nn.Module):
"""
https://www.youtube.com/watch?v=f_iAM0NPwnM
"""
def __init__(self):
super().__init__()
self.relu1 = nn.ReLU()
self.relu2 = nn.ReLU()
def forward(self, x1, x2):
return 2 * self.relu1(x1) + 2 * self.relu2(x2 - 1.5)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_relu_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp5 = tl.load(in_ptr1 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 2.0
tmp4 = tmp2 * tmp3
tmp6 = 1.5
tmp7 = tmp5 - tmp6
tmp8 = triton_helpers.maximum(tmp1, tmp7)
tmp9 = tmp8 * tmp3
tmp10 = tmp4 + tmp9
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_relu_sub_0[grid(256)](arg0_1, arg1_1, buf0,
256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class ReLUDeepLiftModelNew(nn.Module):
"""
https://www.youtube.com/watch?v=f_iAM0NPwnM
"""
def __init__(self):
super().__init__()
self.relu1 = nn.ReLU()
self.relu2 = nn.ReLU()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
ngduduong/captum
|
ReLUDeepLiftModel
| false
| 4,076
|
[
"BSD-3-Clause"
] | 0
|
6fe5f0f23ea975e73e0c0dee79bdc01b4223d283
|
https://github.com/ngduduong/captum/tree/6fe5f0f23ea975e73e0c0dee79bdc01b4223d283
|
FeatureModel
|
import torch
import torch.nn as nn
class FeatureModel(nn.Module):
def __init__(self, num_features_in, num_anchors=9, feature_size_out=64,
prior=0.01, feature_size=256):
super(FeatureModel, self).__init__()
self.feature_size_out = feature_size_out
self.num_anchors = num_anchors
self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3,
padding=1)
self.act1 = nn.ReLU()
self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act2 = nn.ReLU()
self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act3 = nn.ReLU()
self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act4 = nn.ReLU()
self.output = nn.Conv2d(feature_size, num_anchors * self.
feature_size_out, kernel_size=3, padding=1)
self.output_act = nn.Sigmoid()
def forward(self, x):
out = self.conv1(x)
out = self.act1(out)
out = self.conv2(out)
out = self.act2(out)
out = self.conv3(out)
out = self.act3(out)
out = self.conv4(out)
out = self.act4(out)
out = self.output(out)
out = self.output_act(out)
out1 = out.permute(0, 2, 3, 1)
batch_size, width, height, _channels = out1.shape
out2 = out1.view(batch_size, width, height, self.num_anchors, self.
feature_size_out)
return out2.contiguous().view(x.shape[0], -1, self.feature_size_out)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_features_in': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 4 * x2 + 36 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask)
tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_clone_convolution_5(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 576
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp2, None)
tl.store(out_ptr0 + x2, tmp3, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (256, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_7, (256,), (1,))
assert_size_stride(primals_8, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_9, (256,), (1,))
assert_size_stride(primals_10, (576, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_11, (576,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((256, 4, 3, 3), (36, 1, 12, 4), torch.float32
)
get_raw_stream(0)
triton_poi_fused_0[grid(1024, 9)](primals_1, buf0, 1024, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
triton_poi_fused_1[grid(16, 16)](primals_3, buf1, 16, 16, XBLOCK=16,
YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_2[grid(65536, 9)](primals_4, buf2, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_2[grid(65536, 9)](primals_6, buf3, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf4 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_2[grid(65536, 9)](primals_8, buf4, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_8
buf5 = empty_strided_cuda((576, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_3[grid(147456, 9)](primals_10, buf5, 147456, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_10
buf6 = extern_kernels.convolution(buf1, buf0, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 256, 4, 4), (4096, 1, 1024, 256))
buf7 = buf6
del buf6
triton_poi_fused_convolution_relu_4[grid(16384)](buf7, primals_2,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf8 = extern_kernels.convolution(buf7, buf2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 256, 4, 4), (4096, 1, 1024, 256))
buf9 = buf8
del buf8
triton_poi_fused_convolution_relu_4[grid(16384)](buf9, primals_5,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf10 = extern_kernels.convolution(buf9, buf3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 256, 4, 4), (4096, 1, 1024, 256))
buf11 = buf10
del buf10
triton_poi_fused_convolution_relu_4[grid(16384)](buf11, primals_7,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf12 = extern_kernels.convolution(buf11, buf4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 256, 4, 4), (4096, 1, 1024, 256))
buf13 = buf12
del buf12
triton_poi_fused_convolution_relu_4[grid(16384)](buf13, primals_9,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf14 = extern_kernels.convolution(buf13, buf5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 576, 4, 4), (9216, 1, 2304, 576))
buf15 = buf14
del buf14
buf16 = empty_strided_cuda((4, 4, 4, 9, 64), (9216, 2304, 576, 64,
1), torch.float32)
triton_poi_fused_clone_convolution_5[grid(36864)](buf15, primals_11,
buf16, 36864, XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
return reinterpret_tensor(buf16, (4, 144, 64), (9216, 64, 1), 0
), buf0, buf1, buf2, buf3, buf4, buf5, buf7, buf9, buf11, buf13, buf15
class FeatureModelNew(nn.Module):
def __init__(self, num_features_in, num_anchors=9, feature_size_out=64,
prior=0.01, feature_size=256):
super(FeatureModelNew, self).__init__()
self.feature_size_out = feature_size_out
self.num_anchors = num_anchors
self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3,
padding=1)
self.act1 = nn.ReLU()
self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act2 = nn.ReLU()
self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act3 = nn.ReLU()
self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act4 = nn.ReLU()
self.output = nn.Conv2d(feature_size, num_anchors * self.
feature_size_out, kernel_size=3, padding=1)
self.output_act = nn.Sigmoid()
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.output.weight
primals_11 = self.output.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
nassarofficial/pytorch-retina
|
FeatureModel
| false
| 4,077
|
[
"Apache-2.0"
] | 0
|
b2f10ffa7617797280c1f44d562c455b996254af
|
https://github.com/nassarofficial/pytorch-retina/tree/b2f10ffa7617797280c1f44d562c455b996254af
|
TanhDeepLiftModel
|
import torch
import torch.nn as nn
class TanhDeepLiftModel(nn.Module):
"""
Same as the ReLUDeepLiftModel, but with activations
that can have negative outputs
"""
def __init__(self):
super().__init__()
self.tanh1 = nn.Tanh()
self.tanh2 = nn.Tanh()
def forward(self, x1, x2):
return 2 * self.tanh1(x1) + 2 * self.tanh2(x2 - 1.5)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_sub_tanh_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp4 = tl.load(in_ptr1 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tmp2 = 2.0
tmp3 = tmp1 * tmp2
tmp5 = 1.5
tmp6 = tmp4 - tmp5
tmp7 = libdevice.tanh(tmp6)
tmp8 = tmp7 * tmp2
tmp9 = tmp3 + tmp8
tl.store(out_ptr0 + x0, tmp9, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_sub_tanh_0[grid(256)](arg0_1, arg1_1, buf0,
256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class TanhDeepLiftModelNew(nn.Module):
"""
Same as the ReLUDeepLiftModel, but with activations
that can have negative outputs
"""
def __init__(self):
super().__init__()
self.tanh1 = nn.Tanh()
self.tanh2 = nn.Tanh()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
ngduduong/captum
|
TanhDeepLiftModel
| false
| 4,078
|
[
"BSD-3-Clause"
] | 0
|
6fe5f0f23ea975e73e0c0dee79bdc01b4223d283
|
https://github.com/ngduduong/captum/tree/6fe5f0f23ea975e73e0c0dee79bdc01b4223d283
|
SigmoidDeepLiftModel
|
import torch
import torch.nn as nn
class SigmoidDeepLiftModel(nn.Module):
"""
Model architecture from:
https://medium.com/coinmonks/create-a-neural-network-in
-pytorch-and-make-your-life-simpler-ec5367895199
"""
def __init__(self, num_in, num_hidden, num_out):
super().__init__()
self.num_in = num_in
self.num_hidden = num_hidden
self.num_out = num_out
self.lin1 = nn.Linear(num_in, num_hidden, bias=False)
self.lin2 = nn.Linear(num_hidden, num_out, bias=False)
self.lin1.weight = nn.Parameter(torch.ones(num_hidden, num_in))
self.lin2.weight = nn.Parameter(torch.ones(num_out, num_hidden))
self.relu1 = nn.ReLU()
self.sigmoid = nn.Sigmoid()
def forward(self, input):
lin1 = self.lin1(input)
lin2 = self.lin2(self.relu1(lin1))
return self.sigmoid(lin2)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_in': 4, 'num_hidden': 4, 'num_out': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, buf4,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused_sigmoid_1[grid(256)](buf3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
return buf3, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf3, primals_3, buf4
class SigmoidDeepLiftModelNew(nn.Module):
"""
Model architecture from:
https://medium.com/coinmonks/create-a-neural-network-in
-pytorch-and-make-your-life-simpler-ec5367895199
"""
def __init__(self, num_in, num_hidden, num_out):
super().__init__()
self.num_in = num_in
self.num_hidden = num_hidden
self.num_out = num_out
self.lin1 = nn.Linear(num_in, num_hidden, bias=False)
self.lin2 = nn.Linear(num_hidden, num_out, bias=False)
self.lin1.weight = nn.Parameter(torch.ones(num_hidden, num_in))
self.lin2.weight = nn.Parameter(torch.ones(num_out, num_hidden))
self.relu1 = nn.ReLU()
self.sigmoid = nn.Sigmoid()
def forward(self, input_0):
primals_1 = self.lin1.weight
primals_3 = self.lin2.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
ngduduong/captum
|
SigmoidDeepLiftModel
| false
| 4,079
|
[
"BSD-3-Clause"
] | 0
|
6fe5f0f23ea975e73e0c0dee79bdc01b4223d283
|
https://github.com/ngduduong/captum/tree/6fe5f0f23ea975e73e0c0dee79bdc01b4223d283
|
BasicModel_ConvNet_One_Conv
|
import torch
import torch.nn as nn
class BasicModel_ConvNet_One_Conv(nn.Module):
def __init__(self, inplace=False):
super().__init__()
self.conv1 = nn.Conv2d(1, 2, 3, 1)
self.relu1 = nn.ReLU(inplace=inplace)
self.fc1 = nn.Linear(8, 4)
self.conv1.weight = nn.Parameter(torch.ones(2, 1, 3, 3))
self.conv1.bias = nn.Parameter(torch.tensor([-50.0, -75.0]))
self.fc1.weight = nn.Parameter(torch.cat([torch.ones(4, 5), -1 *
torch.ones(4, 3)], dim=1))
self.fc1.bias = nn.Parameter(torch.zeros(4))
self.relu2 = nn.ReLU(inplace=inplace)
def forward(self, x, x2=None):
if x2 is not None:
x = x + x2
x = self.relu1(self.conv1(x))
x = x.view(-1, 8)
return self.relu2(self.fc1(x))
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_0(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 30752
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x1 = xindex // 3844 % 2
x0 = xindex % 3844
x3 = xindex // 3844
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x0 + 3872 * x3), tmp4, xmask)
tl.store(out_ptr1 + (x0 + 3968 * x3), tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_view_1(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 30752
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (3872 * (x0 // 3844) + x0 % 3844), xmask)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 15376
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (2, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (2,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (4, 8), (8, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 2, 62, 62), (7688, 3844, 62, 1))
buf1 = empty_strided_cuda((4, 2, 62, 62), (7744, 3872, 62, 1),
torch.float32)
buf6 = empty_strided_cuda((4, 2, 62, 62), (7936, 3968, 62, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_convolution_relu_threshold_backward_0[grid(30752)](
buf0, primals_2, buf1, buf6, 30752, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_2
buf2 = reinterpret_tensor(buf0, (3844, 8), (8, 1), 0)
del buf0
triton_poi_fused_convolution_relu_view_1[grid(30752)](buf1, buf2,
30752, XBLOCK=256, num_warps=4, num_stages=1)
del buf1
buf3 = empty_strided_cuda((3844, 4), (4, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (8, 4), (1, 8
), 0), out=buf3)
buf4 = buf3
del buf3
buf5 = empty_strided_cuda((3844, 4), (4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(15376)](buf4,
primals_5, buf5, 15376, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
return buf4, primals_1, primals_3, buf2, buf5, primals_4, buf6
class BasicModel_ConvNet_One_ConvNew(nn.Module):
def __init__(self, inplace=False):
super().__init__()
self.conv1 = nn.Conv2d(1, 2, 3, 1)
self.relu1 = nn.ReLU(inplace=inplace)
self.fc1 = nn.Linear(8, 4)
self.conv1.weight = nn.Parameter(torch.ones(2, 1, 3, 3))
self.conv1.bias = nn.Parameter(torch.tensor([-50.0, -75.0]))
self.fc1.weight = nn.Parameter(torch.cat([torch.ones(4, 5), -1 *
torch.ones(4, 3)], dim=1))
self.fc1.bias = nn.Parameter(torch.zeros(4))
self.relu2 = nn.ReLU(inplace=inplace)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.fc1.weight
primals_5 = self.fc1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
ngduduong/captum
|
BasicModel_ConvNet_One_Conv
| false
| 4,080
|
[
"BSD-3-Clause"
] | 0
|
6fe5f0f23ea975e73e0c0dee79bdc01b4223d283
|
https://github.com/ngduduong/captum/tree/6fe5f0f23ea975e73e0c0dee79bdc01b4223d283
|
Binarizer
|
import torch
from abc import ABC
from sklearn.preprocessing import Binarizer
class BaseOperator(ABC):
"""
Abstract class defining the basic structure for operator implementations in Hummingbird.
"""
def __init__(self, regression=False, classification=False, transformer=
False, anomaly_detection=False, **kwargs):
"""
Args:
regression: Whether operator is a regression model.
classification: Whether the operator is a classification model.
transformer: Whether the operator is a feature transformer.
anomaly_detection: Whether the operator is an anomaly detection model.
kwargs: Other keyword arguments.
"""
super().__init__()
self.regression = regression
self.classification = classification
self.transformer = transformer
self.anomaly_detection = anomaly_detection
class Binarizer(BaseOperator, torch.nn.Module):
"""
Class implementing Binarizer operators in PyTorch.
"""
def __init__(self, threshold, device):
super(Binarizer, self).__init__()
self.transformer = True
self.threshold = torch.nn.Parameter(torch.FloatTensor([threshold]),
requires_grad=False)
def forward(self, x):
return torch.gt(x, self.threshold).float()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'threshold': 4, 'device': 0}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from abc import ABC
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy_gt_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 > tmp2
tmp4 = tmp3.to(tl.float32)
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (1,), (1,))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__to_copy_gt_0[grid(256)](arg1_1, arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class BaseOperator(ABC):
"""
Abstract class defining the basic structure for operator implementations in Hummingbird.
"""
def __init__(self, regression=False, classification=False, transformer=
False, anomaly_detection=False, **kwargs):
"""
Args:
regression: Whether operator is a regression model.
classification: Whether the operator is a classification model.
transformer: Whether the operator is a feature transformer.
anomaly_detection: Whether the operator is an anomaly detection model.
kwargs: Other keyword arguments.
"""
super().__init__()
self.regression = regression
self.classification = classification
self.transformer = transformer
self.anomaly_detection = anomaly_detection
class BinarizerNew(BaseOperator, torch.nn.Module):
"""
Class implementing Binarizer operators in PyTorch.
"""
def __init__(self, threshold, device):
super(BinarizerNew, self).__init__()
self.transformer = True
self.threshold = torch.nn.Parameter(torch.FloatTensor([threshold]),
requires_grad=False)
def forward(self, input_0):
arg0_1 = self.threshold
arg1_1 = input_0
output = call([arg0_1, arg1_1])
return output[0]
|
kvenkman/hummingbird
|
Binarizer
| false
| 4,081
|
[
"MIT"
] | 0
|
dac08f4ff4a4103df4a8e83329a02f2d804bf34d
|
https://github.com/kvenkman/hummingbird/tree/dac08f4ff4a4103df4a8e83329a02f2d804bf34d
|
DeiTAttention
|
from _paritybench_helpers import _mock_config
import math
import torch
from typing import List
from typing import Tuple
from torch import nn
from typing import Set
import torch.utils.checkpoint
def find_pruneable_heads_and_indices(heads: 'List[int]', n_heads: 'int',
head_size: 'int', already_pruned_heads: 'Set[int]') ->Tuple[Set[int],
torch.LongTensor]:
"""
Finds the heads and their indices taking :obj:`already_pruned_heads` into account.
Args:
heads (:obj:`List[int]`): List of the indices of heads to prune.
n_heads (:obj:`int`): The number of heads in the model.
head_size (:obj:`int`): The size of each head.
already_pruned_heads (:obj:`Set[int]`): A set of already pruned heads.
Returns:
:obj:`Tuple[Set[int], torch.LongTensor]`: A tuple with the remaining heads and their corresponding indices.
"""
mask = torch.ones(n_heads, head_size)
heads = set(heads) - already_pruned_heads
for head in heads:
head = head - sum(1 if h < head else 0 for h in already_pruned_heads)
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index: 'torch.LongTensor' = torch.arange(len(mask))[mask].long()
return heads, index
def prune_linear_layer(layer: 'nn.Linear', index: 'torch.LongTensor', dim:
'int'=0) ->nn.Linear:
"""
Prune a linear layer to keep only entries in index.
Used to remove heads.
Args:
layer (:obj:`torch.nn.Linear`): The layer to prune.
index (:obj:`torch.LongTensor`): The indices to keep in the layer.
dim (:obj:`int`, `optional`, defaults to 0): The dimension on which to keep the indices.
Returns:
:obj:`torch.nn.Linear`: The pruned layer as a new layer with :obj:`requires_grad=True`.
"""
index = index
W = layer.weight.index_select(dim, index).clone().detach()
if layer.bias is not None:
if dim == 1:
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None
)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
if layer.bias is not None:
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
class DeiTSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if (config.hidden_size % config.num_attention_heads != 0 and not
hasattr(config, 'embedding_size')):
raise ValueError(
f'The hidden size {config.hidden_size,} is not a multiple of the number of attention heads {config.num_attention_heads}.'
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, head_mask=None, output_attentions=False):
mixed_query_layer = self.query(hidden_states)
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1,
-2))
attention_scores = attention_scores / math.sqrt(self.
attention_head_size)
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.dropout(attention_probs)
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.
all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (
context_layer,)
return outputs
class DeiTSelfOutput(nn.Module):
"""
The residual connection is defined in DeiTLayer instead of here (as is the case with other models), due to the
layernorm applied before each block.
"""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
class DeiTAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.attention = DeiTSelfAttention(config)
self.output = DeiTSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.
attention.num_attention_heads, self.attention.
attention_head_size, self.pruned_heads)
self.attention.query = prune_linear_layer(self.attention.query, index)
self.attention.key = prune_linear_layer(self.attention.key, index)
self.attention.value = prune_linear_layer(self.attention.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.attention.num_attention_heads = (self.attention.
num_attention_heads - len(heads))
self.attention.all_head_size = (self.attention.attention_head_size *
self.attention.num_attention_heads)
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_states, head_mask=None, output_attentions=False):
self_outputs = self.attention(hidden_states, head_mask,
output_attentions)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:]
return outputs
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, num_attention_heads=
4, attention_probs_dropout_prob=0.5, hidden_dropout_prob=0.5)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
from typing import List
from typing import Tuple
from torch import nn
from typing import Set
import torch.utils.checkpoint
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr1 + x2, xmask)
tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = float('-inf')
tmp2 = tmp0 == tmp1
tmp3 = tmp2 == 0
tmp4 = tmp3.to(tl.int64)
tmp5 = tmp4 != 0
tmp7 = tmp6 == tmp1
tmp8 = tmp7 == 0
tmp9 = tmp8.to(tl.int64)
tmp10 = tmp9 != 0
tmp11 = tmp5 | tmp10
tmp13 = tmp12 == tmp1
tmp14 = tmp13 == 0
tmp15 = tmp14.to(tl.int64)
tmp16 = tmp15 != 0
tmp17 = tmp11 | tmp16
tmp19 = tmp18 == tmp1
tmp20 = tmp19 == 0
tmp21 = tmp20.to(tl.int64)
tmp22 = tmp21 != 0
tmp23 = tmp17 | tmp22
tmp24 = tmp23 == 0
tmp28 = tmp26 + tmp27
tmp30 = tmp28 + tmp29
tmp32 = tmp30 + tmp31
tmp33 = tmp25 / tmp32
tmp34 = 0.0
tmp35 = tl.where(tmp24, tmp34, tmp33)
tl.store(out_ptr0 + x2, tmp35, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_2
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf0
triton_poi_fused_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_1[grid(256)](buf5, buf6, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_2[grid(256)](buf5, buf6, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf5
del buf6
buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf1
triton_poi_fused_3[grid(16, 4)](buf2, primals_7, buf8, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_7
buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_4[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0)
del buf9
extern_kernels.addmm(primals_9, reinterpret_tensor(buf10, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf11)
del primals_9
return reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0
), buf7, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0
), reinterpret_tensor(buf10, (16, 4), (4, 1), 0), primals_8
def find_pruneable_heads_and_indices(heads: 'List[int]', n_heads: 'int',
head_size: 'int', already_pruned_heads: 'Set[int]') ->Tuple[Set[int],
torch.LongTensor]:
"""
Finds the heads and their indices taking :obj:`already_pruned_heads` into account.
Args:
heads (:obj:`List[int]`): List of the indices of heads to prune.
n_heads (:obj:`int`): The number of heads in the model.
head_size (:obj:`int`): The size of each head.
already_pruned_heads (:obj:`Set[int]`): A set of already pruned heads.
Returns:
:obj:`Tuple[Set[int], torch.LongTensor]`: A tuple with the remaining heads and their corresponding indices.
"""
mask = torch.ones(n_heads, head_size)
heads = set(heads) - already_pruned_heads
for head in heads:
head = head - sum(1 if h < head else 0 for h in already_pruned_heads)
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index: 'torch.LongTensor' = torch.arange(len(mask))[mask].long()
return heads, index
def prune_linear_layer(layer: 'nn.Linear', index: 'torch.LongTensor', dim:
'int'=0) ->nn.Linear:
"""
Prune a linear layer to keep only entries in index.
Used to remove heads.
Args:
layer (:obj:`torch.nn.Linear`): The layer to prune.
index (:obj:`torch.LongTensor`): The indices to keep in the layer.
dim (:obj:`int`, `optional`, defaults to 0): The dimension on which to keep the indices.
Returns:
:obj:`torch.nn.Linear`: The pruned layer as a new layer with :obj:`requires_grad=True`.
"""
index = index
W = layer.weight.index_select(dim, index).clone().detach()
if layer.bias is not None:
if dim == 1:
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None
)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
if layer.bias is not None:
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
class DeiTSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if (config.hidden_size % config.num_attention_heads != 0 and not
hasattr(config, 'embedding_size')):
raise ValueError(
f'The hidden size {config.hidden_size,} is not a multiple of the number of attention heads {config.num_attention_heads}.'
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, head_mask=None, output_attentions=False):
mixed_query_layer = self.query(hidden_states)
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1,
-2))
attention_scores = attention_scores / math.sqrt(self.
attention_head_size)
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.dropout(attention_probs)
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.
all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (
context_layer,)
return outputs
class DeiTSelfOutput(nn.Module):
"""
The residual connection is defined in DeiTLayer instead of here (as is the case with other models), due to the
layernorm applied before each block.
"""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
class DeiTAttentionNew(nn.Module):
def __init__(self, config):
super().__init__()
self.attention = DeiTSelfAttention(config)
self.output = DeiTSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.
attention.num_attention_heads, self.attention.
attention_head_size, self.pruned_heads)
self.attention.query = prune_linear_layer(self.attention.query, index)
self.attention.key = prune_linear_layer(self.attention.key, index)
self.attention.value = prune_linear_layer(self.attention.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.attention.num_attention_heads = (self.attention.
num_attention_heads - len(heads))
self.attention.all_head_size = (self.attention.attention_head_size *
self.attention.num_attention_heads)
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, input_0):
primals_1 = self.attention.query.weight
primals_2 = self.attention.query.bias
primals_4 = self.attention.key.weight
primals_5 = self.attention.key.bias
primals_6 = self.attention.value.weight
primals_7 = self.attention.value.bias
primals_8 = self.output.dense.weight
primals_9 = self.output.dense.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
ncoop57/transformers
|
DeiTAttention
| false
| 4,082
|
[
"Apache-2.0"
] | 0
|
d7e156bd1ae2467e9ea1dbc44f31da0ed2296aee
|
https://github.com/ncoop57/transformers/tree/d7e156bd1ae2467e9ea1dbc44f31da0ed2296aee
|
SoftmaxModel
|
import torch
import torch.nn as nn
class SoftmaxModel(nn.Module):
"""
Model architecture from:
https://adventuresinmachinelearning.com/pytorch-tutorial-deep-learning/
"""
def __init__(self, num_in, num_hidden, num_out, inplace=False):
super().__init__()
self.num_in = num_in
self.num_hidden = num_hidden
self.num_out = num_out
self.lin1 = nn.Linear(num_in, num_hidden)
self.lin2 = nn.Linear(num_hidden, num_hidden)
self.lin3 = nn.Linear(num_hidden, num_out)
self.relu1 = nn.ReLU(inplace=inplace)
self.relu2 = nn.ReLU(inplace=inplace)
self.softmax = nn.Softmax(dim=1)
def forward(self, input):
lin1 = self.relu1(self.lin1(input))
lin2 = self.relu2(self.lin2(lin1))
lin3 = self.lin3(lin2)
return self.softmax(lin3)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_in': 4, 'num_hidden': 4, 'num_out': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3,
primals_5, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf4, buf5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf5
return buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(
buf3, (64, 4), (4, 1), 0), buf6, primals_6, buf7, primals_4, buf8
class SoftmaxModelNew(nn.Module):
"""
Model architecture from:
https://adventuresinmachinelearning.com/pytorch-tutorial-deep-learning/
"""
def __init__(self, num_in, num_hidden, num_out, inplace=False):
super().__init__()
self.num_in = num_in
self.num_hidden = num_hidden
self.num_out = num_out
self.lin1 = nn.Linear(num_in, num_hidden)
self.lin2 = nn.Linear(num_hidden, num_hidden)
self.lin3 = nn.Linear(num_hidden, num_out)
self.relu1 = nn.ReLU(inplace=inplace)
self.relu2 = nn.ReLU(inplace=inplace)
self.softmax = nn.Softmax(dim=1)
def forward(self, input_0):
primals_1 = self.lin1.weight
primals_2 = self.lin1.bias
primals_4 = self.lin2.weight
primals_5 = self.lin2.bias
primals_6 = self.lin3.weight
primals_7 = self.lin3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
ngduduong/captum
|
SoftmaxModel
| false
| 4,083
|
[
"BSD-3-Clause"
] | 0
|
6fe5f0f23ea975e73e0c0dee79bdc01b4223d283
|
https://github.com/ngduduong/captum/tree/6fe5f0f23ea975e73e0c0dee79bdc01b4223d283
|
TinyCnn
|
import torch
import torch.nn as nn
class TinyCnn(nn.Module):
def __init__(self, feature_extraction=False):
super().__init__()
self.feature_extraction = feature_extraction
self.conv1 = nn.Conv2d(3, 3, 5)
self.relu1 = nn.ReLU()
self.pool1 = nn.MaxPool2d(2, 2)
if not self.feature_extraction:
self.conv2 = nn.Conv2d(3, 10, 2)
def forward(self, x):
x = self.pool1(self.relu1(self.conv1(x)))
if not self.feature_extraction:
x = self.conv2(x)
x = x.view(-1, 10)
else:
x = x.view(-1, 12)
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 43200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3600 % 3
x0 = xindex % 3600
x4 = xindex // 3600
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + 3616 * x4), tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 10800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 30
x1 = xindex // 30 % 30
x4 = xindex // 900
x3 = xindex // 2700
x5 = xindex % 2700
tmp0 = tl.load(in_ptr0 + (2 * x0 + 120 * x1 + 3616 * x4), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 120 * x1 + 3616 * x4), xmask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (60 + 2 * x0 + 120 * x1 + 3616 * x4), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (61 + 2 * x0 + 120 * x1 + 3616 * x4), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x5 + 2720 * x3), tmp6, xmask)
tl.store(out_ptr1 + (x5 + 2816 * x3), tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 33640
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 841 % 10
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (3, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (3,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (10, 3, 2, 2), (12, 4, 2, 1))
assert_size_stride(primals_5, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 3, 60, 60), (10800, 3600, 60, 1))
buf1 = empty_strided_cuda((4, 3, 60, 60), (10848, 3616, 60, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(43200)](buf0, primals_2,
buf1, 43200, XBLOCK=512, num_warps=4, num_stages=1)
del buf0
del primals_2
buf2 = empty_strided_cuda((4, 3, 30, 30), (2720, 900, 30, 1), torch
.float32)
buf3 = empty_strided_cuda((4, 3, 30, 30), (2816, 900, 30, 1), torch
.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(10800)](buf1, buf2,
buf3, 10800, XBLOCK=128, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 10, 29, 29), (8410, 841, 29, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_2[grid(33640)](buf5, primals_5, 33640,
XBLOCK=512, num_warps=4, num_stages=1)
del primals_5
return reinterpret_tensor(buf5, (3364, 10), (10, 1), 0
), primals_1, primals_3, primals_4, buf1, buf2, buf3
class TinyCnnNew(nn.Module):
def __init__(self, feature_extraction=False):
super().__init__()
self.feature_extraction = feature_extraction
self.conv1 = nn.Conv2d(3, 3, 5)
self.relu1 = nn.ReLU()
self.pool1 = nn.MaxPool2d(2, 2)
if not self.feature_extraction:
self.conv2 = nn.Conv2d(3, 10, 2)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
ngduduong/captum
|
TinyCnn
| false
| 4,084
|
[
"BSD-3-Clause"
] | 0
|
6fe5f0f23ea975e73e0c0dee79bdc01b4223d283
|
https://github.com/ngduduong/captum/tree/6fe5f0f23ea975e73e0c0dee79bdc01b4223d283
|
MLPNet
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class MLPNet(nn.Module):
def __init__(self):
super(MLPNet, self).__init__()
self.fc1 = nn.Linear(28 * 28, 500)
self.fc2 = nn.Linear(500, 256)
self.fc3 = nn.Linear(256, 10)
def forward(self, x):
x = x.view(-1, 28 * 28)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def name(self):
return 'MLP'
def get_inputs():
return [torch.rand([4, 784])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 2000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 500
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 784), (784, 1))
assert_size_stride(primals_2, (500, 784), (784, 1))
assert_size_stride(primals_3, (500,), (1,))
assert_size_stride(primals_4, (256, 500), (500, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (10, 256), (256, 1))
assert_size_stride(primals_7, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 500), (500, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784,
500), (1, 784), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(2000)](buf1, primals_3, 2000, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (500, 256), (
1, 500), 0), out=buf2)
buf3 = buf2
del buf2
triton_poi_fused_relu_1[grid(1024)](buf3, primals_5, 1024, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6,
(256, 10), (1, 256), 0), alpha=1, beta=1, out=buf4)
del primals_7
return buf4, primals_1, buf1, buf3, primals_6, primals_4
class MLPNetNew(nn.Module):
def __init__(self):
super(MLPNetNew, self).__init__()
self.fc1 = nn.Linear(28 * 28, 500)
self.fc2 = nn.Linear(500, 256)
self.fc3 = nn.Linear(256, 10)
def name(self):
return 'MLP'
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
ngtrunghuan/50.021-ArtificialIntelligence
|
MLPNet
| false
| 4,085
|
[
"MIT"
] | 0
|
b0c3d9f8cc70312ea1298818482a4b25d4ddbded
|
https://github.com/ngtrunghuan/50.021-ArtificialIntelligence/tree/b0c3d9f8cc70312ea1298818482a4b25d4ddbded
|
ResNNFlow
|
import torch
import torch.utils.data
class ResNNFlow(torch.nn.Sequential):
def __init__(self, *args, **kwargs):
super(ResNNFlow, self).__init__(*args, **kwargs)
self.gate = torch.nn.Parameter(torch.nn.init.normal_(torch.Tensor(1)))
def forward(self, inputs):
or_inputs = inputs
for module in self._modules.values():
inputs = module(inputs)
return self.gate.sigmoid() * inputs + (1 - self.gate.sigmoid()
) * or_inputs
def logdetj(self, inputs=None):
for module in self._modules.values():
inputs = module.log_diag_jacobian(inputs)
inputs = inputs if len(inputs.shape) == 4 else inputs.view(
inputs.shape + [1, 1])
return (torch.nn.functional.softplus(grad.squeeze() + self.gate) -
torch.nn.functional.softplus(self.gate)).sum(-1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp3 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tl.sigmoid(tmp1)
tmp4 = tmp2 * tmp3
tmp5 = 1.0
tmp6 = tmp5 - tmp2
tmp7 = tmp6 * tmp3
tmp8 = tmp4 + tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_rsub_sigmoid_0[grid(256)](primals_2,
primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
return buf0, primals_1, primals_2
class ResNNFlowNew(torch.nn.Sequential):
def __init__(self, *args, **kwargs):
super(ResNNFlowNew, self).__init__(*args, **kwargs)
self.gate = torch.nn.Parameter(torch.nn.init.normal_(torch.Tensor(1)))
def logdetj(self, inputs=None):
for module in self._modules.values():
inputs = module.log_diag_jacobian(inputs)
inputs = inputs if len(inputs.shape) == 4 else inputs.view(
inputs.shape + [1, 1])
return (torch.nn.functional.softplus(grad.squeeze() + self.gate) -
torch.nn.functional.softplus(self.gate)).sum(-1)
def forward(self, input_0):
primals_2 = self.gate
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
nicola-decao/M-NAF-experiments-VAE
|
ResNNFlow
| false
| 4,086
|
[
"MIT"
] | 0
|
b8e127205e84d94ae50618e95734f20d259f7934
|
https://github.com/nicola-decao/M-NAF-experiments-VAE/tree/b8e127205e84d94ae50618e95734f20d259f7934
|
GatedConv2d
|
import torch
import torch.utils.data
import torch.nn as nn
class GatedConv2d(nn.Module):
def __init__(self, input_channels, output_channels, kernel_size, stride,
padding, dilation=1, activation=None):
super(GatedConv2d, self).__init__()
self.activation = activation
self.sigmoid = nn.Sigmoid()
self.h = nn.Conv2d(input_channels, output_channels, kernel_size,
stride, padding, dilation)
self.g = nn.Conv2d(input_channels, output_channels, kernel_size,
stride, padding, dilation)
def forward(self, x):
if self.activation is None:
h = self.h(x)
else:
h = self.activation(self.h(x))
g = self.sigmoid(self.g(x))
return h * g
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_channels': 4, 'output_channels': 4, 'kernel_size':
4, 'stride': 1, 'padding': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_mul_sigmoid_0(in_out_ptr0, in_out_ptr1,
in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1296
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 81 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_out_ptr1 + x3, xmask)
tmp4 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tl.sigmoid(tmp5)
tmp7 = tmp2 * tmp6
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(in_out_ptr1 + x3, tmp5, xmask)
tl.store(out_ptr0 + x3, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(4, 4), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 9, 9), (324, 81, 9, 1))
buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1,
1), padding=(4, 4), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 9, 9), (324, 81, 9, 1))
buf1 = buf0
del buf0
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((4, 4, 9, 9), (324, 81, 9, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_mul_sigmoid_0[grid(1296)](buf1, buf3,
primals_2, primals_5, buf4, 1296, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_2
del primals_5
return buf4, primals_1, primals_3, primals_4, buf1, buf3
class GatedConv2dNew(nn.Module):
def __init__(self, input_channels, output_channels, kernel_size, stride,
padding, dilation=1, activation=None):
super(GatedConv2dNew, self).__init__()
self.activation = activation
self.sigmoid = nn.Sigmoid()
self.h = nn.Conv2d(input_channels, output_channels, kernel_size,
stride, padding, dilation)
self.g = nn.Conv2d(input_channels, output_channels, kernel_size,
stride, padding, dilation)
def forward(self, input_0):
primals_1 = self.h.weight
primals_2 = self.h.bias
primals_3 = self.g.weight
primals_5 = self.g.bias
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
nicola-decao/M-NAF-experiments-VAE
|
GatedConv2d
| false
| 4,087
|
[
"MIT"
] | 0
|
b8e127205e84d94ae50618e95734f20d259f7934
|
https://github.com/nicola-decao/M-NAF-experiments-VAE/tree/b8e127205e84d94ae50618e95734f20d259f7934
|
NPIArg
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class NPIArg(nn.Module):
def __init__(self, input_dim: 'int', arg_dim: 'int'):
super(NPIArg, self).__init__()
self.f_arg = nn.Linear(input_dim, arg_dim)
def forward(self, x):
x = self.f_arg(x)
x = F.log_softmax(x.view(1, -1), dim=1)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'arg_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__log_softmax_0(in_ptr0, out_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = triton_helpers.promote_to_tensor(triton_helpers.max2(tmp1, 0))
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tl.broadcast_to(tmp5, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = tl_math.log(tmp8)
tmp10 = tmp4 - tmp9
tl.store(out_ptr2 + tl.broadcast_to(r0, [RBLOCK]), tmp10, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf3 = empty_strided_cuda((1, 256), (256, 1), torch.float32)
get_raw_stream(0)
triton_per_fused__log_softmax_0[grid(1)](buf0, buf3, 1, 256,
num_warps=2, num_stages=1)
del buf0
return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf3
class NPIArgNew(nn.Module):
def __init__(self, input_dim: 'int', arg_dim: 'int'):
super(NPIArgNew, self).__init__()
self.f_arg = nn.Linear(input_dim, arg_dim)
def forward(self, input_0):
primals_1 = self.f_arg.weight
primals_2 = self.f_arg.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
nienjiuntai/pytorch-npi
|
NPIArg
| false
| 4,088
|
[
"MIT"
] | 0
|
16b413c152dfb7f1506a85997adc10ddc2d9af35
|
https://github.com/nienjiuntai/pytorch-npi/tree/16b413c152dfb7f1506a85997adc10ddc2d9af35
|
NPIProg
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class NPIProg(nn.Module):
def __init__(self, input_dim: 'int', prog_key_dim: 'int', prog_num: 'int'):
super(NPIProg, self).__init__()
self._fcn1 = nn.Linear(in_features=input_dim, out_features=prog_key_dim
)
self._fcn2 = nn.Linear(in_features=prog_key_dim, out_features=prog_num)
def forward(self, x):
x = self._fcn1(x)
x = self._fcn2(F.relu_(x))
x = F.log_softmax(x.view(1, -1), dim=1)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'prog_key_dim': 4, 'prog_num': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + x4, tmp6, xmask)
@triton.jit
def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * (x1 % 4 // 4) + 64 * ((4 *
(x1 // 4 % 4) + x1 % 4) // 16)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_per_fused__log_softmax_2(in_ptr0, out_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = triton_helpers.promote_to_tensor(triton_helpers.max2(tmp1, 0))
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tl.broadcast_to(tmp5, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = tl_math.log(tmp8)
tmp10 = tmp4 - tmp9
tl.store(out_ptr2 + tl.broadcast_to(r0, [RBLOCK]), tmp10, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
triton_poi_fused_view_1[grid(256)](buf1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0)
del buf1
extern_kernels.addmm(primals_5, buf2, reinterpret_tensor(primals_4,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_5
buf6 = empty_strided_cuda((1, 256), (256, 1), torch.float32)
triton_per_fused__log_softmax_2[grid(1)](buf3, buf6, 1, 256,
num_warps=2, num_stages=1)
del buf3
return buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf2, buf6, primals_4, buf7
class NPIProgNew(nn.Module):
def __init__(self, input_dim: 'int', prog_key_dim: 'int', prog_num: 'int'):
super(NPIProgNew, self).__init__()
self._fcn1 = nn.Linear(in_features=input_dim, out_features=prog_key_dim
)
self._fcn2 = nn.Linear(in_features=prog_key_dim, out_features=prog_num)
def forward(self, input_0):
primals_1 = self._fcn1.weight
primals_2 = self._fcn1.bias
primals_4 = self._fcn2.weight
primals_5 = self._fcn2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
nienjiuntai/pytorch-npi
|
NPIProg
| false
| 4,089
|
[
"MIT"
] | 0
|
16b413c152dfb7f1506a85997adc10ddc2d9af35
|
https://github.com/nienjiuntai/pytorch-npi/tree/16b413c152dfb7f1506a85997adc10ddc2d9af35
|
BasicModel_ConvNet
|
import torch
import torch.nn as nn
class BasicModel_ConvNet(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 2, 3, 1)
self.relu1 = nn.ReLU()
self.pool1 = nn.MaxPool2d(2)
self.conv2 = nn.Conv2d(2, 4, 3, 1)
self.relu2 = nn.ReLU()
self.pool2 = nn.MaxPool2d(2)
self.fc1 = nn.Linear(4, 8)
self.relu3 = nn.ReLU()
self.fc2 = nn.Linear(8, 10)
self.softmax = nn.Softmax(dim=1)
self.fc1.weight = nn.Parameter(torch.ones(8, 4))
self.fc2.weight = nn.Parameter(torch.ones(10, 8))
def forward(self, x):
x = self.relu1(self.conv1(x))
x = self.pool1(x)
x = self.relu2(self.conv2(x))
x = self.pool2(x)
x = x.view(-1, 4)
x = self.relu3(self.fc1(x))
x = self.fc2(x)
return self.softmax(x)
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 30752
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3844 % 2
x0 = xindex % 3844
x4 = xindex // 3844
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + 3872 * x4), tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 7688
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 31
x1 = xindex // 31 % 31
x4 = xindex // 961
x3 = xindex // 1922
x5 = xindex % 1922
tmp0 = tl.load(in_ptr0 + (2 * x0 + 124 * x1 + 3872 * x4), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 124 * x1 + 3872 * x4), xmask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (62 + 2 * x0 + 124 * x1 + 3872 * x4), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (63 + 2 * x0 + 124 * x1 + 3872 * x4), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x5 + 1952 * x3), tmp6, xmask)
tl.store(out_ptr1 + (x5 + 2048 * x3), tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 13456
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 841 % 4
x2 = xindex // 3364
x4 = xindex % 3364
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x4 + 3392 * x2), tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 3136
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 14
x1 = xindex // 14 % 14
x2 = xindex // 196 % 4
x3 = xindex // 784
x4 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 58 * x1 + 841 * x2 + 3392 * x3),
xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 58 * x1 + 841 * x2 + 3392 * x3),
xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (29 + 2 * x0 + 58 * x1 + 841 * x2 + 3392 * x3),
xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (30 + 2 * x0 + 58 * x1 + 841 * x2 + 3392 * x3
), xmask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x4, tmp15, xmask)
tl.store(out_ptr1 + x4, tmp16, xmask)
@triton.jit
def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 6272
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_per_fused__softmax_5(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 784
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + 10 * x0), tmp11, rmask & xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (2, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (2,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (4, 2, 3, 3), (18, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (8, 4), (4, 1))
assert_size_stride(primals_7, (8,), (1,))
assert_size_stride(primals_8, (10, 8), (8, 1))
assert_size_stride(primals_9, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 2, 62, 62), (7688, 3844, 62, 1))
buf1 = empty_strided_cuda((4, 2, 62, 62), (7744, 3872, 62, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(30752)](buf0, primals_2,
buf1, 30752, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_2
buf2 = empty_strided_cuda((4, 2, 31, 31), (1952, 961, 31, 1), torch
.float32)
buf3 = empty_strided_cuda((4, 2, 31, 31), (2048, 961, 31, 1), torch
.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(7688)](buf1, buf2,
buf3, 7688, XBLOCK=128, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 29, 29), (3364, 841, 29, 1))
buf5 = empty_strided_cuda((4, 4, 29, 29), (3392, 841, 29, 1), torch
.float32)
triton_poi_fused_convolution_relu_2[grid(13456)](buf4, primals_5,
buf5, 13456, XBLOCK=256, num_warps=4, num_stages=1)
del buf4
del primals_5
buf6 = empty_strided_cuda((4, 4, 14, 14), (784, 196, 14, 1), torch.int8
)
buf7 = empty_strided_cuda((4, 4, 14, 14), (784, 196, 14, 1), torch.
float32)
triton_poi_fused_max_pool2d_with_indices_3[grid(3136)](buf5, buf6,
buf7, 3136, XBLOCK=128, num_warps=4, num_stages=1)
buf8 = empty_strided_cuda((784, 8), (8, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf7, (784, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 8), (1, 4), 0), out=buf8)
buf9 = buf8
del buf8
triton_poi_fused_relu_4[grid(6272)](buf9, primals_7, 6272, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_7
buf10 = empty_strided_cuda((784, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_9, buf9, reinterpret_tensor(primals_8,
(8, 10), (1, 8), 0), alpha=1, beta=1, out=buf10)
del primals_9
buf13 = empty_strided_cuda((784, 10), (10, 1), torch.float32)
triton_per_fused__softmax_5[grid(784)](buf10, buf13, 784, 10,
XBLOCK=8, num_warps=2, num_stages=1)
del buf10
return (buf13, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5,
buf6, reinterpret_tensor(buf7, (784, 4), (4, 1), 0), buf9, buf13,
primals_8, primals_6)
class BasicModel_ConvNetNew(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 2, 3, 1)
self.relu1 = nn.ReLU()
self.pool1 = nn.MaxPool2d(2)
self.conv2 = nn.Conv2d(2, 4, 3, 1)
self.relu2 = nn.ReLU()
self.pool2 = nn.MaxPool2d(2)
self.fc1 = nn.Linear(4, 8)
self.relu3 = nn.ReLU()
self.fc2 = nn.Linear(8, 10)
self.softmax = nn.Softmax(dim=1)
self.fc1.weight = nn.Parameter(torch.ones(8, 4))
self.fc2.weight = nn.Parameter(torch.ones(10, 8))
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
ngduduong/captum
|
BasicModel_ConvNet
| false
| 4,090
|
[
"BSD-3-Clause"
] | 0
|
6fe5f0f23ea975e73e0c0dee79bdc01b4223d283
|
https://github.com/ngduduong/captum/tree/6fe5f0f23ea975e73e0c0dee79bdc01b4223d283
|
GammaLoss
|
import torch
import torch.nn
class GammaLoss(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y, y_hat):
p = 2
loss = -y * torch.pow(y_hat, 1 - p) / (1 - p) + torch.pow(y_hat, 2 - p
) / (2 - p)
return torch.mean(loss)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mean_mul_neg_pow_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp2 = tl.load(in_ptr1 + r0, None)
tmp1 = -tmp0
tmp3 = tl.full([1], 1, tl.int32)
tmp4 = tmp3 / tmp2
tmp5 = tmp1 * tmp4
tmp6 = -1.0
tmp7 = tmp5 * tmp6
tmp8 = float('inf')
tmp9 = tmp7 + tmp8
tmp10 = tl.broadcast_to(tmp9, [RBLOCK])
tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0))
tmp13 = 256.0
tmp14 = tmp12 / tmp13
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp14, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mean_mul_neg_pow_0[grid(1)](buf1, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class GammaLossNew(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
nizamphoenix/kaggle
|
GammaLoss
| false
| 4,091
|
[
"MIT"
] | 0
|
a9c993d0441a6d9260d605a630f95d938e6329db
|
https://github.com/nizamphoenix/kaggle/tree/a9c993d0441a6d9260d605a630f95d938e6329db
|
BasicModel_ConvNet_MaxPool1d
|
import torch
import torch.nn as nn
class BasicModel_ConvNet_MaxPool1d(nn.Module):
"""Same as above, but with the MaxPool2d replaced
with a MaxPool1d. This is useful because the MaxPool modules
behave differently to other modules from the perspective
of the DeepLift Attributions
"""
def __init__(self):
super().__init__()
self.conv1 = nn.Conv1d(1, 2, 3)
self.relu1 = nn.ReLU()
self.pool1 = nn.MaxPool1d(2)
self.conv2 = nn.Conv1d(2, 4, 3)
self.relu2 = nn.ReLU()
self.pool2 = nn.MaxPool1d(2)
self.fc1 = nn.Linear(4, 8)
self.relu3 = nn.ReLU()
self.fc2 = nn.Linear(8, 10)
self.softmax = nn.Softmax(dim=1)
self.fc1.weight = nn.Parameter(torch.ones(8, 4))
self.fc2.weight = nn.Parameter(torch.ones(10, 8))
def forward(self, x):
x = self.relu1(self.conv1(x))
x = self.pool1(x)
x = self.relu2(self.conv2(x))
x = self.pool2(x)
x = x.view(-1, 4)
x = self.relu3(self.fc1(x))
x = self.fc2(x)
return self.softmax(x)
def get_inputs():
return [torch.rand([4, 1, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 496
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 62 % 2
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr0 + x3, tmp6, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 248
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 2 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr0 + x0, tmp5, xmask)
tl.store(out_ptr1 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_2(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 464
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 29 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr0 + x3, tmp6, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 224
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 14
x1 = xindex // 14
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 29 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 29 * x1), xmask, eviction_policy
='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr0 + x2, tmp5, xmask)
tl.store(out_ptr1 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 448
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_per_fused__softmax_5(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 56
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + 10 * x0), tmp11, rmask & xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (2, 1, 3), (3, 3, 1))
assert_size_stride(primals_2, (2,), (1,))
assert_size_stride(primals_3, (4, 1, 64), (64, 64, 1))
assert_size_stride(primals_4, (4, 2, 3), (6, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (8, 4), (4, 1))
assert_size_stride(primals_7, (8,), (1,))
assert_size_stride(primals_8, (10, 8), (8, 1))
assert_size_stride(primals_9, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 2, 62), (124, 62, 1))
buf1 = buf0
del buf0
buf15 = empty_strided_cuda((4, 2, 62), (124, 62, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_convolution_relu_threshold_backward_0[grid(496)](buf1,
primals_2, buf15, 496, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 2, 1, 31), (62, 31, 31, 1), torch.int8)
buf3 = empty_strided_cuda((4, 2, 1, 31), (62, 31, 31, 1), torch.float32
)
triton_poi_fused_max_pool2d_with_indices_1[grid(248)](buf1, buf2,
buf3, 248, XBLOCK=256, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(reinterpret_tensor(buf3, (4, 2,
31), (62, 31, 1), 0), primals_4, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf4, (4, 4, 29), (116, 29, 1))
buf5 = buf4
del buf4
buf14 = empty_strided_cuda((4, 4, 29), (116, 29, 1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_2[grid(464)](buf5,
primals_5, buf14, 464, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 4, 1, 14), (56, 14, 14, 1), torch.int8)
buf7 = empty_strided_cuda((4, 4, 1, 14), (56, 14, 14, 1), torch.float32
)
triton_poi_fused_max_pool2d_with_indices_3[grid(224)](buf5, buf6,
buf7, 224, XBLOCK=256, num_warps=4, num_stages=1)
buf8 = empty_strided_cuda((56, 8), (8, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf7, (56, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 8), (1, 4), 0), out=buf8)
buf9 = buf8
del buf8
triton_poi_fused_relu_4[grid(448)](buf9, primals_7, 448, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_7
buf10 = empty_strided_cuda((56, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_9, buf9, reinterpret_tensor(primals_8,
(8, 10), (1, 8), 0), alpha=1, beta=1, out=buf10)
del primals_9
buf13 = empty_strided_cuda((56, 10), (10, 1), torch.float32)
triton_per_fused__softmax_5[grid(56)](buf10, buf13, 56, 10, XBLOCK=
32, num_warps=4, num_stages=1)
del buf10
return buf13, primals_1, primals_3, primals_4, reinterpret_tensor(buf1,
(4, 2, 1, 62), (124, 62, 62, 1), 0), buf2, reinterpret_tensor(buf3,
(4, 2, 31), (62, 31, 1), 0), reinterpret_tensor(buf5, (4, 4, 1, 29),
(116, 29, 29, 1), 0), buf6, reinterpret_tensor(buf7, (56, 4), (4, 1), 0
), buf9, buf13, primals_8, primals_6, buf14, buf15
class BasicModel_ConvNet_MaxPool1dNew(nn.Module):
"""Same as above, but with the MaxPool2d replaced
with a MaxPool1d. This is useful because the MaxPool modules
behave differently to other modules from the perspective
of the DeepLift Attributions
"""
def __init__(self):
super().__init__()
self.conv1 = nn.Conv1d(1, 2, 3)
self.relu1 = nn.ReLU()
self.pool1 = nn.MaxPool1d(2)
self.conv2 = nn.Conv1d(2, 4, 3)
self.relu2 = nn.ReLU()
self.pool2 = nn.MaxPool1d(2)
self.fc1 = nn.Linear(4, 8)
self.relu3 = nn.ReLU()
self.fc2 = nn.Linear(8, 10)
self.softmax = nn.Softmax(dim=1)
self.fc1.weight = nn.Parameter(torch.ones(8, 4))
self.fc2.weight = nn.Parameter(torch.ones(10, 8))
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
ngduduong/captum
|
BasicModel_ConvNet_MaxPool1d
| false
| 4,092
|
[
"BSD-3-Clause"
] | 0
|
6fe5f0f23ea975e73e0c0dee79bdc01b4223d283
|
https://github.com/ngduduong/captum/tree/6fe5f0f23ea975e73e0c0dee79bdc01b4223d283
|
LogCoshLoss
|
import torch
import torch.nn
class LogCoshLoss(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y_t, y_prime_t):
ey_t = torch.abs(y_t - y_prime_t)
return torch.mean(torch.log(torch.cosh(ey_t + 1e-16)))
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_add_cosh_log_mean_sub_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = 1e-16
tmp5 = tmp3 + tmp4
tmp6 = libdevice.cosh(tmp5)
tmp7 = tl_math.log(tmp6)
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = 256.0
tmp12 = tmp10 / tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_abs_add_cosh_log_mean_sub_0[grid(1)](buf1, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class LogCoshLossNew(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
nizamphoenix/kaggle
|
LogCoshLoss
| false
| 4,093
|
[
"MIT"
] | 0
|
a9c993d0441a6d9260d605a630f95d938e6329db
|
https://github.com/nizamphoenix/kaggle/tree/a9c993d0441a6d9260d605a630f95d938e6329db
|
AbsModel
|
from torch.nn import Module
import torch
from torch import Tensor
from torch.nn import Identity
from torch.nn.modules import Module
import torch.optim.lr_scheduler
class AbsLayer(Module):
def forward(self, x: 'Tensor') ->Tensor:
return torch.abs(x).reshape((-1, 1))
class AbsModel(Module):
"""Fake model, that simply compute the absolute value of the inputs"""
def __init__(self):
super().__init__()
self.features = AbsLayer()
self.classifier = Identity()
def forward(self, x: 'Tensor') ->Tensor:
x = self.features(x)
x = self.classifier(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch.nn import Module
from torch import Tensor
from torch.nn import Identity
from torch.nn.modules import Module
import torch.optim.lr_scheduler
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_abs_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl_math.abs(tmp0)
tl.store(out_ptr0 + x0, tmp1, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_abs_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (256, 1), (1, 1), 0),
class AbsLayer(Module):
def forward(self, x: 'Tensor') ->Tensor:
return torch.abs(x).reshape((-1, 1))
class AbsModelNew(Module):
"""Fake model, that simply compute the absolute value of the inputs"""
def __init__(self):
super().__init__()
self.features = AbsLayer()
self.classifier = Identity()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
nuwangunasekara/avalanche
|
AbsModel
| false
| 4,094
|
[
"MIT"
] | 0
|
1f4d5b3e559552394cce573a85b1c9af26a544fb
|
https://github.com/nuwangunasekara/avalanche/tree/1f4d5b3e559552394cce573a85b1c9af26a544fb
|
OcclusionAwareSimilarity
|
import torch
import torch.nn as nn
class OcclusionAwareSimilarity(nn.Module):
def __init__(self, threshold):
super(OcclusionAwareSimilarity, self).__init__()
self.threshold = threshold
def forward(self, similarity_matrix):
indicator_zero = similarity_matrix <= self.threshold
similarity_matrix[indicator_zero] = 0
return similarity_matrix
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'threshold': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_index_put_lift_fresh_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 4.0
tmp2 = tmp0 <= tmp1
tmp3 = 0.0
tmp4 = tl.where(tmp2, tmp3, tmp0)
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
get_raw_stream(0)
triton_poi_fused_index_put_lift_fresh_0[grid(256)](arg0_1, arg0_1,
256, XBLOCK=256, num_warps=4, num_stages=1)
return arg0_1,
class OcclusionAwareSimilarityNew(nn.Module):
def __init__(self, threshold):
super(OcclusionAwareSimilarityNew, self).__init__()
self.threshold = threshold
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
nv-nguyen/template-pose
|
OcclusionAwareSimilarity
| false
| 4,095
|
[
"MIT"
] | 0
|
ce1ffead1887b54efc8031e8e2442ba884e512ec
|
https://github.com/nv-nguyen/template-pose/tree/ce1ffead1887b54efc8031e8e2442ba884e512ec
|
SpatialGatingUnit
|
import torch
import torch.nn as nn
class SpatialGatingUnit(nn.Module):
def __init__(self, dim_seq, dim_ff):
super().__init__()
self.proj = nn.Linear(dim_seq, dim_seq)
nn.init.zeros_(self.proj.weight)
nn.init.ones_(self.proj.bias)
self.norm = nn.LayerNorm(normalized_shape=dim_ff // 2, eps=1e-06)
self.dim_ff = dim_ff
self.activation = nn.GELU()
def forward(self, x):
res, gate = torch.split(tensor=x, split_size_or_sections=self.
dim_ff // 2, dim=2)
gate = self.norm(gate)
gate = torch.transpose(gate, 1, 2)
gate = self.proj(gate)
gate = torch.transpose(gate, 1, 2)
return gate * res
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dim_seq': 4, 'dim_ff': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 2.0
tmp4 = tmp2 / tmp3
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 + x0 + 4 * x1), xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp3 - tmp1
tmp5 = tmp4 * tmp4
tmp7 = tmp6 - tmp1
tmp8 = tmp7 * tmp7
tmp9 = tmp5 + tmp8
tmp10 = 2.0
tmp11 = tmp9 / tmp10
tmp12 = 1e-06
tmp13 = tmp11 + tmp12
tmp14 = libdevice.rsqrt(tmp13)
tmp15 = tmp2 * tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 8
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 2
y1 = yindex // 2
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 2 * x2 + 8 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_mul_3(in_out_ptr0, in_ptr0, in_ptr1, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 8
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 2
y1 = yindex // 2
tmp0 = tl.load(in_out_ptr0 + (x2 + 4 * y3), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + x2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 * tmp3
tl.debug_barrier()
tl.store(in_out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (2,), (1,))
assert_size_stride(primals_3, (2,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(16)](primals_1, buf0, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(32)](primals_1, buf0,
buf1, 32, XBLOCK=32, num_warps=1, num_stages=1)
del buf0
buf2 = empty_strided_cuda((4, 2, 4), (8, 4, 1), torch.float32)
triton_poi_fused_clone_2[grid(8, 4)](buf1, primals_2, primals_3,
buf2, 8, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_2
del primals_3
buf3 = empty_strided_cuda((8, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (8, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 2), (8, 1, 4), 0)
del buf3
triton_poi_fused_mul_3[grid(8, 4)](buf4, primals_5, primals_1, 8, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_5
return buf4, reinterpret_tensor(primals_1, (4, 4, 2), (16, 4, 1), 0
), buf1, reinterpret_tensor(buf2, (8, 4), (4, 1), 0), primals_4
class SpatialGatingUnitNew(nn.Module):
def __init__(self, dim_seq, dim_ff):
super().__init__()
self.proj = nn.Linear(dim_seq, dim_seq)
nn.init.zeros_(self.proj.weight)
nn.init.ones_(self.proj.bias)
self.norm = nn.LayerNorm(normalized_shape=dim_ff // 2, eps=1e-06)
self.dim_ff = dim_ff
self.activation = nn.GELU()
def forward(self, input_0):
primals_4 = self.proj.weight
primals_5 = self.proj.bias
primals_2 = self.norm.weight
primals_3 = self.norm.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
nima1999nikkhah/SimCLR_gMLP
|
SpatialGatingUnit
| false
| 4,096
|
[
"MIT"
] | 0
|
32cca4764d4266493cb7d141eb9ef01a91f63996
|
https://github.com/nima1999nikkhah/SimCLR_gMLP/tree/32cca4764d4266493cb7d141eb9ef01a91f63996
|
BasicModel_ConvNet_MaxPool3d
|
import torch
import torch.nn as nn
class BasicModel_ConvNet_MaxPool3d(nn.Module):
"""Same as above, but with the MaxPool1d replaced
with a MaxPool3d. This is useful because the MaxPool modules
behave differently to other modules from the perspective
of the DeepLift Attributions
"""
def __init__(self):
super().__init__()
self.conv1 = nn.Conv3d(1, 2, 3)
self.relu1 = nn.ReLU()
self.pool1 = nn.MaxPool3d(2)
self.conv2 = nn.Conv3d(2, 4, 3)
self.relu2 = nn.ReLU()
self.pool2 = nn.MaxPool3d(2)
self.fc1 = nn.Linear(4, 8)
self.relu3 = nn.ReLU()
self.fc2 = nn.Linear(8, 10)
self.softmax = nn.Softmax(dim=1)
self.fc1.weight = nn.Parameter(torch.ones(8, 4))
self.fc2.weight = nn.Parameter(torch.ones(10, 8))
def forward(self, x):
x = self.relu1(self.conv1(x))
x = self.pool1(x)
x = self.relu2(self.conv2(x))
x = self.pool2(x)
x = x.view(-1, 4)
x = self.relu3(self.fc1(x))
x = self.fc2(x)
return self.softmax(x)
def get_inputs():
return [torch.rand([4, 1, 64, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 1906624
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x2 = xindex // 238328 % 2
x0 = xindex % 3844
x5 = xindex // 3844
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + 3872 * x5), tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 390224
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 24389 % 4
x0 = xindex % 24389
x4 = xindex // 24389
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + 24416 * x4), tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 87808
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_per_fused__softmax_3(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 10976
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + 10 * x0), tmp11, rmask & xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (2, 1, 3, 3, 3), (27, 27, 9, 3, 1))
assert_size_stride(primals_2, (2,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64, 64), (262144, 262144, 4096,
64, 1))
assert_size_stride(primals_4, (4, 2, 3, 3, 3), (54, 27, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (8, 4), (4, 1))
assert_size_stride(primals_7, (8,), (1,))
assert_size_stride(primals_8, (10, 8), (8, 1))
assert_size_stride(primals_9, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 2, 62, 62, 62), (476656, 238328, 3844,
62, 1))
buf1 = empty_strided_cuda((4, 2, 62, 62, 62), (480128, 240064, 3872,
62, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(1906624)](buf0, primals_2,
buf1, 1906624, XBLOCK=1024, num_warps=4, num_stages=1)
del buf0
del primals_2
buf2 = torch.ops.aten.max_pool3d_with_indices.default(buf1, [2, 2,
2], [2, 2, 2])
buf3 = buf2[0]
buf4 = buf2[1]
del buf2
buf5 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1, 1),
padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 29, 29, 29), (97556, 24389, 841, 29, 1)
)
buf6 = empty_strided_cuda((4, 4, 29, 29, 29), (97664, 24416, 841,
29, 1), torch.float32)
triton_poi_fused_convolution_relu_1[grid(390224)](buf5, primals_5,
buf6, 390224, XBLOCK=1024, num_warps=4, num_stages=1)
del buf5
del primals_5
buf7 = torch.ops.aten.max_pool3d_with_indices.default(buf6, [2, 2,
2], [2, 2, 2])
buf8 = buf7[0]
buf9 = buf7[1]
del buf7
buf10 = empty_strided_cuda((10976, 8), (8, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf8, (10976, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 8), (1, 4), 0), out=buf10)
buf11 = buf10
del buf10
triton_poi_fused_relu_2[grid(87808)](buf11, primals_7, 87808,
XBLOCK=1024, num_warps=4, num_stages=1)
del primals_7
buf12 = empty_strided_cuda((10976, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_9, buf11, reinterpret_tensor(primals_8,
(8, 10), (1, 8), 0), alpha=1, beta=1, out=buf12)
del primals_9
buf15 = empty_strided_cuda((10976, 10), (10, 1), torch.float32)
triton_per_fused__softmax_3[grid(10976)](buf12, buf15, 10976, 10,
XBLOCK=8, num_warps=2, num_stages=1)
del buf12
return (buf15, primals_1, primals_3, primals_4, buf1, buf3, buf4, buf6,
buf9, reinterpret_tensor(buf8, (10976, 4), (4, 1), 0), buf11, buf15,
primals_8, primals_6)
class BasicModel_ConvNet_MaxPool3dNew(nn.Module):
"""Same as above, but with the MaxPool1d replaced
with a MaxPool3d. This is useful because the MaxPool modules
behave differently to other modules from the perspective
of the DeepLift Attributions
"""
def __init__(self):
super().__init__()
self.conv1 = nn.Conv3d(1, 2, 3)
self.relu1 = nn.ReLU()
self.pool1 = nn.MaxPool3d(2)
self.conv2 = nn.Conv3d(2, 4, 3)
self.relu2 = nn.ReLU()
self.pool2 = nn.MaxPool3d(2)
self.fc1 = nn.Linear(4, 8)
self.relu3 = nn.ReLU()
self.fc2 = nn.Linear(8, 10)
self.softmax = nn.Softmax(dim=1)
self.fc1.weight = nn.Parameter(torch.ones(8, 4))
self.fc2.weight = nn.Parameter(torch.ones(10, 8))
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
ngduduong/captum
|
BasicModel_ConvNet_MaxPool3d
| false
| 4,097
|
[
"BSD-3-Clause"
] | 0
|
6fe5f0f23ea975e73e0c0dee79bdc01b4223d283
|
https://github.com/ngduduong/captum/tree/6fe5f0f23ea975e73e0c0dee79bdc01b4223d283
|
SelfMatch2
|
import torch
import torch.nn as nn
import torch.nn.functional as F
def masked_softmax(logits, mask, dim=-1, log_softmax=False):
"""Take the softmax of `logits` over given dimension, and set
entries to 0 wherever `mask` is 0.
Args:
logits (torch.Tensor): Inputs to the softmax function.
mask (torch.Tensor): Same shape as `logits`, with 0 indicating
positions that should be assigned 0 probability in the output.
dim (int): Dimension over which to take softmax.
log_softmax (bool): Take log-softmax rather than regular softmax.
E.g., some PyTorch functions such as `F.nll_loss` expect log-softmax.
Returns:
probs (torch.Tensor): Result of taking masked softmax over the logits.
"""
mask = mask.type(torch.float32)
masked_logits = mask * logits + (1 - mask) * -1e+30
softmax_fn = F.log_softmax if log_softmax else F.softmax
probs = softmax_fn(masked_logits, dim)
return probs
class SelfMatch2(nn.Module):
"""General-purpose layer for encoding a sequence using a bidirectional RNN.
Encoded output is the RNN's hidden state at each position, which
has shape `(batch_size, seq_len, hidden_size * 2)`.
Args:
input_size (int): Size of a single timestep in the input.
hidden_size (int): Size of the RNN hidden state.
num_layers (int): Number of layers of RNN cells to use.
drop_prob (float): Probability of zero-ing out activations.
"""
def __init__(self, hidden_size, drop_prob=0.1):
super(SelfMatch2, self).__init__()
self.drop_prob = drop_prob
self.q_weight = nn.Parameter(torch.zeros(hidden_size, 1))
self.c_weight = nn.Parameter(torch.zeros(hidden_size, 1))
self.cq_weight = nn.Parameter(torch.zeros(1, 1, hidden_size))
for weight in (self.c_weight, self.q_weight, self.cq_weight):
nn.init.xavier_uniform_(weight)
self.bias = nn.Parameter(torch.zeros(1))
def forward(self, c, q, c_mask, q_mask):
batch_size, c_len, _ = c.size()
q_len = q.size(1)
s = self.get_similarity_matrix(c, q)
c_mask = c_mask.view(batch_size, c_len, 1)
q_mask = q_mask.view(batch_size, 1, q_len)
s1 = masked_softmax(s, q_mask, dim=2)
a = torch.bmm(s1, q)
return torch.cat([c, a, c * a], dim=2)
def get_similarity_matrix(self, c, q):
"""Get the "similarity matrix" between context and query (using the
terminology of the BiDAF paper).
A naive implementation as described in BiDAF would concatenate the
three vectors then project the result with a single weight matrix. This
method is a more memory-efficient implementation of the same operation.
See Also:
Equation 1 in https://arxiv.org/abs/1611.01603
"""
c_len, q_len = c.size(1), q.size(1)
c = F.dropout(c, self.drop_prob, self.training)
q = F.dropout(q, self.drop_prob, self.training)
s0 = torch.matmul(c, self.c_weight).expand([-1, -1, q_len])
s1 = torch.matmul(q, self.q_weight).transpose(1, 2).expand([-1,
c_len, -1])
s2 = torch.matmul(c * self.cq_weight, q.transpose(1, 2))
s = s0 + s1 + s2 + self.bias
return s
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4,
1]), torch.rand([4, 1, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_mul_rsub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
in_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex // 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp1 = tl.load(in_ptr1 + x3, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp4 = tl.load(in_out_ptr0 + x4, xmask)
tmp6 = tl.load(in_ptr3 + 0)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp8 = tmp5 + tmp7
tmp9 = tmp0 * tmp8
tmp10 = 1.0
tmp11 = tmp10 - tmp0
tmp12 = -1e+30
tmp13 = tmp11 * tmp12
tmp14 = tmp9 + tmp13
tl.store(in_out_ptr0 + x4, tmp14, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_cat_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 12
x1 = xindex // 12
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tl.full([1], 12, tl.int64)
tmp14 = tl.load(in_ptr0 + (4 * x1 + (-8 + x0)), tmp11 & xmask,
eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr1 + (4 * x1 + (-8 + x0)), tmp11 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tmp14 * tmp15
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp11, tmp16, tmp17)
tmp19 = tl.where(tmp9, tmp10, tmp18)
tmp20 = tl.where(tmp4, tmp5, tmp19)
tl.store(out_ptr0 + x2, tmp20, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 1), (1, 1))
assert_size_stride(primals_4, (4, 1), (1, 1))
assert_size_stride(primals_5, (1, 1, 4), (4, 4, 1))
assert_size_stride(primals_6, (1,), (1,))
assert_size_stride(primals_7, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_8, (4, 1, 4), (4, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
primals_3, out=buf0)
del primals_3
buf1 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
primals_4, out=buf1)
del primals_4
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(64)](primals_1, primals_5, buf2, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf2, reinterpret_tensor(primals_2, (4, 4, 4), (
16, 1, 4), 0), out=buf3)
buf4 = buf3
del buf3
triton_poi_fused_add_mul_rsub_1[grid(64)](buf4, primals_8, buf0,
buf1, primals_6, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf0
del buf1
del primals_6
buf5 = buf2
del buf2
triton_poi_fused__softmax_2[grid(64)](buf4, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf6 = buf4
del buf4
triton_poi_fused__softmax_3[grid(64)](buf5, buf6, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf7 = buf5
del buf5
extern_kernels.bmm(buf6, primals_2, out=buf7)
buf8 = empty_strided_cuda((4, 4, 12), (48, 12, 1), torch.float32)
triton_poi_fused_cat_4[grid(192)](primals_1, buf7, buf8, 192,
XBLOCK=256, num_warps=4, num_stages=1)
del buf7
return buf8, primals_1, primals_2, primals_8, buf6
def masked_softmax(logits, mask, dim=-1, log_softmax=False):
"""Take the softmax of `logits` over given dimension, and set
entries to 0 wherever `mask` is 0.
Args:
logits (torch.Tensor): Inputs to the softmax function.
mask (torch.Tensor): Same shape as `logits`, with 0 indicating
positions that should be assigned 0 probability in the output.
dim (int): Dimension over which to take softmax.
log_softmax (bool): Take log-softmax rather than regular softmax.
E.g., some PyTorch functions such as `F.nll_loss` expect log-softmax.
Returns:
probs (torch.Tensor): Result of taking masked softmax over the logits.
"""
mask = mask.type(torch.float32)
masked_logits = mask * logits + (1 - mask) * -1e+30
softmax_fn = F.log_softmax if log_softmax else F.softmax
probs = softmax_fn(masked_logits, dim)
return probs
class SelfMatch2New(nn.Module):
"""General-purpose layer for encoding a sequence using a bidirectional RNN.
Encoded output is the RNN's hidden state at each position, which
has shape `(batch_size, seq_len, hidden_size * 2)`.
Args:
input_size (int): Size of a single timestep in the input.
hidden_size (int): Size of the RNN hidden state.
num_layers (int): Number of layers of RNN cells to use.
drop_prob (float): Probability of zero-ing out activations.
"""
def __init__(self, hidden_size, drop_prob=0.1):
super(SelfMatch2New, self).__init__()
self.drop_prob = drop_prob
self.q_weight = nn.Parameter(torch.zeros(hidden_size, 1))
self.c_weight = nn.Parameter(torch.zeros(hidden_size, 1))
self.cq_weight = nn.Parameter(torch.zeros(1, 1, hidden_size))
for weight in (self.c_weight, self.q_weight, self.cq_weight):
nn.init.xavier_uniform_(weight)
self.bias = nn.Parameter(torch.zeros(1))
def get_similarity_matrix(self, c, q):
"""Get the "similarity matrix" between context and query (using the
terminology of the BiDAF paper).
A naive implementation as described in BiDAF would concatenate the
three vectors then project the result with a single weight matrix. This
method is a more memory-efficient implementation of the same operation.
See Also:
Equation 1 in https://arxiv.org/abs/1611.01603
"""
c_len, q_len = c.size(1), q.size(1)
c = F.dropout(c, self.drop_prob, self.training)
q = F.dropout(q, self.drop_prob, self.training)
s0 = torch.matmul(c, self.c_weight).expand([-1, -1, q_len])
s1 = torch.matmul(q, self.q_weight).transpose(1, 2).expand([-1,
c_len, -1])
s2 = torch.matmul(c * self.cq_weight, q.transpose(1, 2))
s = s0 + s1 + s2 + self.bias
return s
def forward(self, input_0, input_1, input_2, input_3):
primals_3 = self.q_weight
primals_4 = self.c_weight
primals_5 = self.cq_weight
primals_6 = self.bias
primals_1 = input_0
primals_2 = input_1
primals_7 = input_2
primals_8 = input_3
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
|
nikcaryo/cs224n-squad
|
SelfMatch2
| false
| 4,098
|
[
"MIT"
] | 0
|
4bebca38f3cbaab8c80cd306863d6dca1d9cdf76
|
https://github.com/nikcaryo/cs224n-squad/tree/4bebca38f3cbaab8c80cd306863d6dca1d9cdf76
|
VAE
|
import torch
import torch.nn as nn
import torch.utils.data
from math import *
class VAE(nn.Module):
def __init__(self):
super(VAE, self).__init__()
self.fc1 = nn.Linear(784, 400)
self.fc2 = nn.Linear(400, 20)
self.fc3 = nn.Linear(20, 2)
self.fc4 = nn.Linear(2, 20)
self.fc5 = nn.Linear(20, 400)
self.fc6 = nn.Linear(400, 784)
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
def encode(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
z = self.fc3(x)
return z
def decode(self, z):
z = self.relu(self.fc4(z))
z = self.relu(self.fc5(z))
return self.sigmoid(self.fc6(z))
def forward(self, x):
z = self.encode(x.view(-1, 784))
x = self.decode(z)
return x, z
def get_inputs():
return [torch.rand([4, 784])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
from math import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 400
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 20
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 3136
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 784
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4, 784), (784, 1))
assert_size_stride(primals_2, (400, 784), (784, 1))
assert_size_stride(primals_3, (400,), (1,))
assert_size_stride(primals_4, (20, 400), (400, 1))
assert_size_stride(primals_5, (20,), (1,))
assert_size_stride(primals_6, (2, 20), (20, 1))
assert_size_stride(primals_7, (2,), (1,))
assert_size_stride(primals_8, (20, 2), (2, 1))
assert_size_stride(primals_9, (20,), (1,))
assert_size_stride(primals_10, (400, 20), (20, 1))
assert_size_stride(primals_11, (400,), (1,))
assert_size_stride(primals_12, (784, 400), (400, 1))
assert_size_stride(primals_13, (784,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 400), (400, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784,
400), (1, 784), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(1600)](buf1, primals_3, 1600, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 20), (20, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (400, 20), (1,
400), 0), out=buf2)
buf3 = buf2
del buf2
triton_poi_fused_relu_1[grid(80)](buf3, primals_5, 80, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6,
(20, 2), (1, 20), 0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 20), (20, 1), torch.float32)
extern_kernels.mm(buf4, reinterpret_tensor(primals_8, (2, 20), (1,
2), 0), out=buf5)
buf6 = buf5
del buf5
triton_poi_fused_relu_1[grid(80)](buf6, primals_9, 80, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_9
buf7 = empty_strided_cuda((4, 400), (400, 1), torch.float32)
extern_kernels.mm(buf6, reinterpret_tensor(primals_10, (20, 400), (
1, 20), 0), out=buf7)
buf8 = buf7
del buf7
triton_poi_fused_relu_0[grid(1600)](buf8, primals_11, 1600, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_11
buf9 = empty_strided_cuda((4, 784), (784, 1), torch.float32)
extern_kernels.mm(buf8, reinterpret_tensor(primals_12, (400, 784),
(1, 400), 0), out=buf9)
buf10 = buf9
del buf9
triton_poi_fused_sigmoid_2[grid(3136)](buf10, primals_13, 3136,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_13
return (buf10, buf4, primals_1, buf1, buf3, buf4, buf6, buf8, buf10,
primals_12, primals_10, primals_8, primals_6, primals_4)
class VAENew(nn.Module):
def __init__(self):
super(VAENew, self).__init__()
self.fc1 = nn.Linear(784, 400)
self.fc2 = nn.Linear(400, 20)
self.fc3 = nn.Linear(20, 2)
self.fc4 = nn.Linear(2, 20)
self.fc5 = nn.Linear(20, 400)
self.fc6 = nn.Linear(400, 784)
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
def encode(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
z = self.fc3(x)
return z
def decode(self, z):
z = self.relu(self.fc4(z))
z = self.relu(self.fc5(z))
return self.sigmoid(self.fc6(z))
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_8 = self.fc4.weight
primals_9 = self.fc4.bias
primals_10 = self.fc5.weight
primals_11 = self.fc5.bias
primals_12 = self.fc6.weight
primals_13 = self.fc6.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0], output[1]
|
niujinshuchong/stochastic_processes
|
VAE
| false
| 4,099
|
[
"MIT"
] | 0
|
ea2538d2f09c39bec1834df5addd37e0699a88bf
|
https://github.com/niujinshuchong/stochastic_processes/tree/ea2538d2f09c39bec1834df5addd37e0699a88bf
|
ScaleNorm
|
import torch
import torch.nn as nn
class ScaleNorm(nn.Module):
"""ScaleNorm"""
def __init__(self, scale, eps=1e-05):
super(ScaleNorm, self).__init__()
self.scale = scale
self.eps = eps
def forward(self, x):
norm = self.scale / torch.norm(x, dim=1, keepdim=True).clamp(min=
self.eps)
return x * norm
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'scale': 1.0}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clamp_linalg_vector_norm_mul_reciprocal_0(in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-05
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tl.full([1], 1, tl.int32)
tmp16 = tmp15 / tmp14
tmp17 = 1.0
tmp18 = tmp16 * tmp17
tmp19 = tmp0 * tmp18
tl.store(out_ptr0 + x3, tmp19, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_linalg_vector_norm_mul_reciprocal_0[grid(256)](
arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class ScaleNormNew(nn.Module):
"""ScaleNorm"""
def __init__(self, scale, eps=1e-05):
super(ScaleNormNew, self).__init__()
self.scale = scale
self.eps = eps
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
nvski/ST-TR
|
ScaleNorm
| false
| 4,100
|
[
"MIT"
] | 0
|
75aa9fb872af217f8616c01cee7ca6548846260b
|
https://github.com/nvski/ST-TR/tree/75aa9fb872af217f8616c01cee7ca6548846260b
|
MTFullyConnected
|
import time
import torch
import numpy as np
from torch import nn
from torch import optim
from torch.nn import functional as F
class Base(nn.Module):
""" This class is the base structure for all of classification/regression DNN models.
Mainly, it provides the general methods for training, evaluating model and predcting the given data.
"""
def fit(self, train_loader, valid_loader, out, epochs=100, lr=0.0001):
"""Training the DNN model, similar to the scikit-learn or Keras style.
In the end, the optimal value of parameters will also be persisted on the hard drive.
Arguments:
train_loader (DataLoader): Data loader for training set,
including m X n target FloatTensor and m X l label FloatTensor
(m is the No. of sample, n is the No. of features, l is the No. of classes or tasks)
valid_loader (DataLoader): Data loader for validation set.
The data structure is as same as loader_train.
out (str): the file path for the model file (suffix with '.pkg')
and log file (suffix with '.log').
epochs(int, optional): The maximum of training epochs (default: 100)
lr (float, optional): learning rate (default: 1e-4)
"""
if 'optim' in self.__dict__:
optimizer = self.optim
else:
optimizer = optim.Adam(self.parameters(), lr=lr)
best_loss = np.inf
last_save = 0
if not os.path.exists(out):
try:
os.makedirs(out)
except PermissionError:
None
log = open(file=out + '.log', mode='w+')
for epoch in range(epochs):
time.time()
for param_group in optimizer.param_groups:
param_group['lr'] = lr * (1 - 1 / epochs) ** (epoch * 10)
for i, (Xb, yb) in enumerate(train_loader):
Xb, yb = Xb, yb
optimizer.zero_grad()
y_ = self(Xb, istrain=True)
ix = yb == yb
yb, y_ = yb[ix], y_[ix]
wb = torch.Tensor(yb.size())
wb[yb == 3.99] = 0.1
wb[yb != 3.99] = 1
loss = self.criterion(y_ * wb, yb * wb)
loss.backward()
optimizer.step()
loss_valid = self.evaluate(valid_loader)
None
if loss_valid < best_loss:
torch.save(self.state_dict(), out + '.pkg')
None
best_loss = loss_valid
last_save = epoch
else:
None
if epoch - last_save > 100:
break
log.close()
self.load_state_dict(torch.load(out + '.pkg'))
def evaluate(self, loader):
"""Evaluating the performance of the DNN model.
Arguments:
loader (torch.util.data.DataLoader): data loader for test set,
including m X n target FloatTensor and l X n label FloatTensor
(m is the No. of sample, n is the No. of features, l is the No. of classes or tasks)
Return:
loss (float): the average loss value based on the calculation of loss function with given test set.
"""
loss = 0
for Xb, yb in loader:
Xb, yb = Xb, yb
y_ = self.forward(Xb)
ix = yb == yb
yb, y_ = yb[ix], y_[ix]
wb = torch.Tensor(yb.size())
wb[yb == 3.99] = 0.1
wb[yb != 3.99] = 1
loss += self.criterion(y_ * wb, yb * wb).item()
loss = loss / len(loader)
return loss
def predict(self, loader):
"""Predicting the probability of each sample in the given dataset.
Arguments:
loader (torch.util.data.DataLoader): data loader for test set,
only including m X n target FloatTensor
(m is the No. of sample, n is the No. of features)
Return:
score (ndarray): probability of each sample in the given dataset,
it is a m X l FloatTensor (m is the No. of sample, l is the No. of classes or tasks.)
"""
score = []
for Xb, yb in loader:
Xb = Xb
y_ = self.forward(Xb)
score.append(y_.detach().cpu())
score = torch.cat(score, dim=0).numpy()
return score
class MTFullyConnected(Base):
"""Multi-task DNN classification/regression model. It contains four fully connected layers
between which are dropout layer for robustness.
Arguments:
n_dim (int): the No. of columns (features) for input tensor
n_task (int): the No. of columns (tasks) for output tensor.
is_reg (bool, optional): Regression model (True) or Classification model (False)
"""
def __init__(self, n_dim, n_task, is_reg=False):
super(MTFullyConnected, self).__init__()
self.n_task = n_task
self.dropout = nn.Dropout(0.25)
self.fc0 = nn.Linear(n_dim, 4000)
self.fc1 = nn.Linear(4000, 2000)
self.fc2 = nn.Linear(2000, 1000)
self.output = nn.Linear(1000, n_task)
self.is_reg = is_reg
if is_reg:
self.criterion = nn.MSELoss()
else:
self.criterion = nn.BCELoss()
self.activation = nn.Sigmoid()
self
def forward(self, X, istrain=False):
"""Invoke the class directly as a function
Arguments:
X (FloatTensor): m X n FloatTensor, m is the No. of samples, n is the No. of features.
istrain (bool, optional): is it invoked during training process (True)
or just for prediction (False)
Return:
y (FloatTensor): m X l FloatTensor, m is the No. of samples, n is the No. of tasks
"""
y = F.relu(self.fc0(X))
if istrain:
y = self.dropout(y)
y = F.relu(self.fc1(y))
if istrain:
y = self.dropout(y)
y = F.relu(self.fc2(y))
if istrain:
y = self.dropout(y)
if self.is_reg:
y = self.output(y)
else:
y = self.activation(self.output(y))
return y
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_dim': 4, 'n_task': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import time
import numpy as np
from torch import nn
from torch import optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 4000
x1 = xindex // 4000
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + (x0 + 4096 * x1), tmp6, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2000
x1 = xindex // 2000
tmp0 = tl.load(in_out_ptr0 + (x0 + 2016 * x1), xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x0 + 2016 * x1), tmp4, xmask)
tl.store(out_ptr0 + (x0 + 2048 * x1), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 1000
x2 = xindex % 4000
x3 = xindex // 4000
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x2 + 4096 * x3), tmp6, xmask)
@triton.jit
def triton_poi_fused_sigmoid_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4000, 4), (4, 1))
assert_size_stride(primals_2, (4000,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (2000, 4000), (4000, 1))
assert_size_stride(primals_5, (2000,), (1,))
assert_size_stride(primals_6, (1000, 2000), (2000, 1))
assert_size_stride(primals_7, (1000,), (1,))
assert_size_stride(primals_8, (4, 1000), (1000, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4000), (4000, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4000), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4000), (64000, 16000,
4000, 1), 0)
del buf0
buf10 = empty_strided_cuda((4, 4, 4, 4000), (65536, 16384, 4096, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256000)](buf1,
primals_2, buf10, 256000, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 2000), (2016, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4000), (4000, 1), 0
), reinterpret_tensor(primals_4, (4000, 2000), (1, 4000), 0),
out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 2000), (32256, 8064, 2016,
1), 0)
del buf2
buf9 = empty_strided_cuda((4, 4, 4, 2000), (32768, 8192, 2048, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(128000)](buf3,
primals_5, buf9, 128000, XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 1000), (1000, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 2000), (2016, 1), 0
), reinterpret_tensor(primals_6, (2000, 1000), (1, 2000), 0),
out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 1000), (16000, 4000, 1000,
1), 0)
del buf4
buf8 = empty_strided_cuda((4, 4, 4, 1000), (16384, 4096, 1000, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(64000)](buf5,
primals_7, buf8, 64000, XBLOCK=512, num_warps=4, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf5, (64, 1000), (1000, 1), 0
), reinterpret_tensor(primals_8, (1000, 4), (1, 1000), 0), out=buf6
)
buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf6
triton_poi_fused_sigmoid_3[grid(256)](buf7, primals_9, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_9
return buf7, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4000), (4000, 1), 0
), reinterpret_tensor(buf3, (64, 2000), (2016, 1), 0
), reinterpret_tensor(buf5, (64, 1000), (1000, 1), 0
), buf7, primals_8, buf8, primals_6, buf9, primals_4, buf10
class Base(nn.Module):
""" This class is the base structure for all of classification/regression DNN models.
Mainly, it provides the general methods for training, evaluating model and predcting the given data.
"""
def fit(self, train_loader, valid_loader, out, epochs=100, lr=0.0001):
"""Training the DNN model, similar to the scikit-learn or Keras style.
In the end, the optimal value of parameters will also be persisted on the hard drive.
Arguments:
train_loader (DataLoader): Data loader for training set,
including m X n target FloatTensor and m X l label FloatTensor
(m is the No. of sample, n is the No. of features, l is the No. of classes or tasks)
valid_loader (DataLoader): Data loader for validation set.
The data structure is as same as loader_train.
out (str): the file path for the model file (suffix with '.pkg')
and log file (suffix with '.log').
epochs(int, optional): The maximum of training epochs (default: 100)
lr (float, optional): learning rate (default: 1e-4)
"""
if 'optim' in self.__dict__:
optimizer = self.optim
else:
optimizer = optim.Adam(self.parameters(), lr=lr)
best_loss = np.inf
last_save = 0
if not os.path.exists(out):
try:
os.makedirs(out)
except PermissionError:
None
log = open(file=out + '.log', mode='w+')
for epoch in range(epochs):
time.time()
for param_group in optimizer.param_groups:
param_group['lr'] = lr * (1 - 1 / epochs) ** (epoch * 10)
for i, (Xb, yb) in enumerate(train_loader):
Xb, yb = Xb, yb
optimizer.zero_grad()
y_ = self(Xb, istrain=True)
ix = yb == yb
yb, y_ = yb[ix], y_[ix]
wb = torch.Tensor(yb.size())
wb[yb == 3.99] = 0.1
wb[yb != 3.99] = 1
loss = self.criterion(y_ * wb, yb * wb)
loss.backward()
optimizer.step()
loss_valid = self.evaluate(valid_loader)
None
if loss_valid < best_loss:
torch.save(self.state_dict(), out + '.pkg')
None
best_loss = loss_valid
last_save = epoch
else:
None
if epoch - last_save > 100:
break
log.close()
self.load_state_dict(torch.load(out + '.pkg'))
def evaluate(self, loader):
"""Evaluating the performance of the DNN model.
Arguments:
loader (torch.util.data.DataLoader): data loader for test set,
including m X n target FloatTensor and l X n label FloatTensor
(m is the No. of sample, n is the No. of features, l is the No. of classes or tasks)
Return:
loss (float): the average loss value based on the calculation of loss function with given test set.
"""
loss = 0
for Xb, yb in loader:
Xb, yb = Xb, yb
y_ = self.forward(Xb)
ix = yb == yb
yb, y_ = yb[ix], y_[ix]
wb = torch.Tensor(yb.size())
wb[yb == 3.99] = 0.1
wb[yb != 3.99] = 1
loss += self.criterion(y_ * wb, yb * wb).item()
loss = loss / len(loader)
return loss
def predict(self, loader):
"""Predicting the probability of each sample in the given dataset.
Arguments:
loader (torch.util.data.DataLoader): data loader for test set,
only including m X n target FloatTensor
(m is the No. of sample, n is the No. of features)
Return:
score (ndarray): probability of each sample in the given dataset,
it is a m X l FloatTensor (m is the No. of sample, l is the No. of classes or tasks.)
"""
score = []
for Xb, yb in loader:
Xb = Xb
y_ = self.forward(Xb)
score.append(y_.detach().cpu())
score = torch.cat(score, dim=0).numpy()
return score
class MTFullyConnectedNew(Base):
"""Multi-task DNN classification/regression model. It contains four fully connected layers
between which are dropout layer for robustness.
Arguments:
n_dim (int): the No. of columns (features) for input tensor
n_task (int): the No. of columns (tasks) for output tensor.
is_reg (bool, optional): Regression model (True) or Classification model (False)
"""
def __init__(self, n_dim, n_task, is_reg=False):
super(MTFullyConnectedNew, self).__init__()
self.n_task = n_task
self.dropout = nn.Dropout(0.25)
self.fc0 = nn.Linear(n_dim, 4000)
self.fc1 = nn.Linear(4000, 2000)
self.fc2 = nn.Linear(2000, 1000)
self.output = nn.Linear(1000, n_task)
self.is_reg = is_reg
if is_reg:
self.criterion = nn.MSELoss()
else:
self.criterion = nn.BCELoss()
self.activation = nn.Sigmoid()
self
def forward(self, input_0):
primals_1 = self.fc0.weight
primals_2 = self.fc0.bias
primals_4 = self.fc1.weight
primals_5 = self.fc1.bias
primals_6 = self.fc2.weight
primals_7 = self.fc2.bias
primals_8 = self.output.weight
primals_9 = self.output.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
naisuu/DrugEx
|
MTFullyConnected
| false
| 4,101
|
[
"MIT"
] | 0
|
8708c98a137473f11990d70e43a46018806b6f39
|
https://github.com/naisuu/DrugEx/tree/8708c98a137473f11990d70e43a46018806b6f39
|
ModuloMapIDList
|
import abc
import torch
import torch.nn
import torch.optim
class MapIDList(torch.nn.Module):
@abc.abstractmethod
def forward(self, raw_values: 'torch.Tensor') ->torch.Tensor:
pass
class ModuloMapIDList(MapIDList):
def __init__(self, modulo: 'int'):
super().__init__()
self.modulo = modulo
def forward(self, raw_values: 'torch.Tensor') ->torch.Tensor:
return torch.remainder(raw_values, self.modulo)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'modulo': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import abc
import torch.nn
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_remainder_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 4.0
tmp2 = tmp0 % tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = tmp2 != tmp3
tmp5 = libdevice.signbit(tmp2) if tmp2.dtype is tl.float32 else tmp2 < 0
tmp6 = libdevice.signbit(tmp1) if tmp1.dtype is tl.float32 else tmp1 < 0
tmp7 = tmp5 != tmp6
tmp8 = tmp4 & tmp7
tmp9 = tmp2 + tmp1
tmp10 = tl.where(tmp8, tmp9, tmp2)
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_remainder_0[grid(256)](arg0_1, buf0, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class MapIDList(torch.nn.Module):
@abc.abstractmethod
def forward(self, raw_values: 'torch.Tensor') ->torch.Tensor:
pass
class ModuloMapIDListNew(MapIDList):
def __init__(self, modulo: 'int'):
super().__init__()
self.modulo = modulo
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
mcx/ReAgent
|
ModuloMapIDList
| false
| 4,102
|
[
"BSD-3-Clause"
] | 0
|
57b58a8b3a6b74bb87a197b73a6cd108ddad895e
|
https://github.com/mcx/ReAgent/tree/57b58a8b3a6b74bb87a197b73a6cd108ddad895e
|
gMLPBlock
|
import torch
import torch.nn as nn
class SpatialGatingUnit(nn.Module):
def __init__(self, dim_seq, dim_ff):
super().__init__()
self.proj = nn.Linear(dim_seq, dim_seq)
nn.init.zeros_(self.proj.weight)
nn.init.ones_(self.proj.bias)
self.norm = nn.LayerNorm(normalized_shape=dim_ff // 2, eps=1e-06)
self.dim_ff = dim_ff
self.activation = nn.GELU()
def forward(self, x):
res, gate = torch.split(tensor=x, split_size_or_sections=self.
dim_ff // 2, dim=2)
gate = self.norm(gate)
gate = torch.transpose(gate, 1, 2)
gate = self.proj(gate)
gate = torch.transpose(gate, 1, 2)
return gate * res
class gMLPBlock(nn.Module):
def __init__(self, dim, dim_ff, seq_len):
super().__init__()
self.proj_in = nn.Linear(dim, dim_ff)
self.activation = nn.GELU()
self.sgu = SpatialGatingUnit(seq_len, dim_ff)
self.proj_out = nn.Linear(dim_ff // 2, dim)
def forward(self, x):
x = self.proj_in(x)
x = self.activation(x)
x = self.sgu(x)
x = self.proj_out(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4, 'dim_ff': 4, 'seq_len': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_gelu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 + x0 + 4 * x1), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_2(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 2 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 2.0
tmp4 = tmp2 / tmp3
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 8
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 2
y1 = yindex // 2
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 2 * x2 + 8 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2 + 4 * y1), xmask & ymask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (2 * x2 + 8 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 2 * x2 + 8 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp16 = tl.load(in_ptr2 + y0, ymask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr3 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp3 - tmp1
tmp5 = tmp4 * tmp4
tmp7 = tmp6 - tmp1
tmp8 = tmp7 * tmp7
tmp9 = tmp5 + tmp8
tmp10 = 2.0
tmp11 = tmp9 / tmp10
tmp12 = 1e-06
tmp13 = tmp11 + tmp12
tmp14 = libdevice.rsqrt(tmp13)
tmp15 = tmp2 * tmp14
tmp17 = tmp15 * tmp16
tmp19 = tmp17 + tmp18
tl.store(out_ptr0 + (x2 + 4 * y3), tmp19, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_mul_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 2
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 8 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x2 + 4 * y3), xmask & ymask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + 2 * y3), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_add_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (2,), (1,))
assert_size_stride(primals_5, (2,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 2), (2, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_gelu_0[grid(64)](buf0, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(32)](buf1, buf2, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_native_layer_norm_2[grid(16)](buf2, buf3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 2, 4), (8, 4, 1), torch.float32)
triton_poi_fused_clone_3[grid(8, 4)](buf2, buf3, primals_4,
primals_5, buf4, 8, 4, XBLOCK=4, YBLOCK=8, num_warps=1,
num_stages=1)
del buf3
del primals_5
buf5 = empty_strided_cuda((8, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf4, (8, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32)
triton_poi_fused_clone_mul_4[grid(16, 2)](buf5, primals_7, buf1,
buf6, 16, 2, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
buf7 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf6, (16, 2), (2, 1), 0),
reinterpret_tensor(primals_8, (2, 4), (1, 2), 0), out=buf7)
buf8 = reinterpret_tensor(buf7, (4, 4, 4), (16, 4, 1), 0)
del buf7
triton_poi_fused_add_5[grid(64)](buf8, primals_9, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_9
return buf8, primals_4, primals_7, reinterpret_tensor(primals_3, (16, 4
), (4, 1), 0), buf0, reinterpret_tensor(buf1, (4, 4, 2), (16, 4, 1), 0
), buf2, reinterpret_tensor(buf4, (8, 4), (4, 1), 0
), buf5, reinterpret_tensor(buf6, (16, 2), (2, 1), 0
), primals_8, primals_6
class SpatialGatingUnit(nn.Module):
def __init__(self, dim_seq, dim_ff):
super().__init__()
self.proj = nn.Linear(dim_seq, dim_seq)
nn.init.zeros_(self.proj.weight)
nn.init.ones_(self.proj.bias)
self.norm = nn.LayerNorm(normalized_shape=dim_ff // 2, eps=1e-06)
self.dim_ff = dim_ff
self.activation = nn.GELU()
def forward(self, x):
res, gate = torch.split(tensor=x, split_size_or_sections=self.
dim_ff // 2, dim=2)
gate = self.norm(gate)
gate = torch.transpose(gate, 1, 2)
gate = self.proj(gate)
gate = torch.transpose(gate, 1, 2)
return gate * res
class gMLPBlockNew(nn.Module):
def __init__(self, dim, dim_ff, seq_len):
super().__init__()
self.proj_in = nn.Linear(dim, dim_ff)
self.activation = nn.GELU()
self.sgu = SpatialGatingUnit(seq_len, dim_ff)
self.proj_out = nn.Linear(dim_ff // 2, dim)
def forward(self, input_0):
primals_1 = self.proj_in.weight
primals_2 = self.proj_in.bias
primals_6 = self.sgu.proj.weight
primals_7 = self.sgu.proj.bias
primals_4 = self.sgu.norm.weight
primals_5 = self.sgu.norm.bias
primals_8 = self.proj_out.weight
primals_9 = self.proj_out.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
nima1999nikkhah/SimSiam_gMLP
|
gMLPBlock
| false
| 4,103
|
[
"MIT"
] | 0
|
9cccd1092c02267951d39ae77c0fe5a91d735903
|
https://github.com/nima1999nikkhah/SimSiam_gMLP/tree/9cccd1092c02267951d39ae77c0fe5a91d735903
|
GlobalConvBlock
|
import torch
from torch import nn
from math import sqrt
class GlobalConvBlock(nn.Module):
def __init__(self, in_dim, out_dim, kernel_size):
super(GlobalConvBlock, self).__init__()
pad0 = int((kernel_size[0] - 1) / 2)
pad1 = int((kernel_size[1] - 1) / 2)
self.conv_l1 = nn.Conv2d(in_dim, out_dim, kernel_size=(kernel_size[
0], 1), padding=(pad0, 0))
self.conv_l2 = nn.Conv2d(out_dim, out_dim, kernel_size=(1,
kernel_size[1]), padding=(0, pad1))
self.conv_r1 = nn.Conv2d(in_dim, out_dim, kernel_size=(1,
kernel_size[1]), padding=(0, pad1))
self.conv_r2 = nn.Conv2d(out_dim, out_dim, kernel_size=(kernel_size
[0], 1), padding=(pad0, 0))
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, sqrt(2.0 / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def forward(self, x):
x_l = self.conv_l1(x)
x_l = self.conv_l2(x_l)
x_r = self.conv_r1(x)
x_r = self.conv_r2(x_r)
x = x_l + x_r
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4, 'out_dim': 4, 'kernel_size': [4, 4]}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
from math import sqrt
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 12 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_convolution_1(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 9 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, xmask)
tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + x3, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 1), (16, 4, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 1, 4), (16, 4, 4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 1, 4), (16, 4, 4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4, 4, 1), (16, 4, 1, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 3, 4), (48, 12, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(192)](buf1, primals_2, 192,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(0, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 3, 3), (36, 9, 3, 1))
buf3 = extern_kernels.convolution(primals_3, primals_6, stride=(1,
1), padding=(0, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 3), (48, 12, 3, 1))
buf4 = buf3
del buf3
triton_poi_fused_convolution_0[grid(192)](buf4, primals_7, 192,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf5 = extern_kernels.convolution(buf4, primals_8, stride=(1, 1),
padding=(1, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 3, 3), (36, 9, 3, 1))
buf6 = buf2
del buf2
triton_poi_fused_add_convolution_1[grid(144)](buf6, primals_5, buf5,
primals_9, 144, XBLOCK=256, num_warps=4, num_stages=1)
del buf5
del primals_5
del primals_9
return (buf6, primals_1, primals_3, primals_4, primals_6, primals_8,
buf1, buf4)
class GlobalConvBlockNew(nn.Module):
def __init__(self, in_dim, out_dim, kernel_size):
super(GlobalConvBlockNew, self).__init__()
pad0 = int((kernel_size[0] - 1) / 2)
pad1 = int((kernel_size[1] - 1) / 2)
self.conv_l1 = nn.Conv2d(in_dim, out_dim, kernel_size=(kernel_size[
0], 1), padding=(pad0, 0))
self.conv_l2 = nn.Conv2d(out_dim, out_dim, kernel_size=(1,
kernel_size[1]), padding=(0, pad1))
self.conv_r1 = nn.Conv2d(in_dim, out_dim, kernel_size=(1,
kernel_size[1]), padding=(0, pad1))
self.conv_r2 = nn.Conv2d(out_dim, out_dim, kernel_size=(kernel_size
[0], 1), padding=(pad0, 0))
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, sqrt(2.0 / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def forward(self, input_0):
primals_1 = self.conv_l1.weight
primals_2 = self.conv_l1.bias
primals_4 = self.conv_l2.weight
primals_5 = self.conv_l2.bias
primals_6 = self.conv_r1.weight
primals_7 = self.conv_r1.bias
primals_8 = self.conv_r2.weight
primals_9 = self.conv_r2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
odgiv/SegAN
|
GlobalConvBlock
| false
| 4,104
|
[
"MIT"
] | 0
|
d7a91fbc10139dc81c61737326649a3a758cdf94
|
https://github.com/odgiv/SegAN/tree/d7a91fbc10139dc81c61737326649a3a758cdf94
|
EdgeFeaturesLayer
|
import torch
import torch.nn as nn
class EdgeFeaturesLayer(nn.Module):
def __init__(self, d_model, d_edge, h, dropout):
super(EdgeFeaturesLayer, self).__init__()
assert d_model % h == 0
d_model // h
self.linear = nn.Linear(d_edge, 1, bias=False)
with torch.no_grad():
self.linear.weight.fill_(0.25)
def forward(self, x):
p_edge = x.permute(0, 2, 3, 1)
p_edge = self.linear(p_edge).permute(0, 3, 1, 2)
return torch.relu(p_edge)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'd_edge': 4, 'h': 4, 'dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64, 4)](primals_1, buf0, 64, 4,
XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 1), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 1, 4, 4), (16, 1, 4, 1), 0)
del buf1
buf3 = empty_strided_cuda((4, 1, 4, 4), (16, 1, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(64)](buf2, buf3, 64,
XBLOCK=64, num_warps=1, num_stages=1)
return buf2, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), buf3
class EdgeFeaturesLayerNew(nn.Module):
def __init__(self, d_model, d_edge, h, dropout):
super(EdgeFeaturesLayerNew, self).__init__()
assert d_model % h == 0
d_model // h
self.linear = nn.Linear(d_edge, 1, bias=False)
with torch.no_grad():
self.linear.weight.fill_(0.25)
def forward(self, input_0):
primals_2 = self.linear.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
odb9402/MAT
|
EdgeFeaturesLayer
| false
| 4,106
|
[
"MIT"
] | 0
|
95d8083170da2c8ce1f5898b3a556bcf54eac8cc
|
https://github.com/odb9402/MAT/tree/95d8083170da2c8ce1f5898b3a556bcf54eac8cc
|
Generator
|
import math
import torch
import torch.nn as nn
class LayerNorm(nn.Module):
"""Construct a layernorm module (See citation for details)."""
def __init__(self, features, eps=1e-06):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class ScaleNorm(nn.Module):
"""ScaleNorm"""
"""All g’s in SCALE NORM are initialized to sqrt(d)"""
def __init__(self, scale, eps=1e-05):
super(ScaleNorm, self).__init__()
self.scale = nn.Parameter(torch.tensor(math.sqrt(scale)))
self.eps = eps
def forward(self, x):
norm = self.scale / torch.norm(x, dim=-1, keepdim=True).clamp(min=
self.eps)
return x * norm
class Generator(nn.Module):
"""Define standard linear + softmax generation step."""
def __init__(self, d_model, aggregation_type='mean', n_output=1,
n_layers=1, leaky_relu_slope=0.01, dropout=0.0, scale_norm=False):
super(Generator, self).__init__()
if n_layers == 1:
self.proj = nn.Linear(d_model, n_output)
else:
self.proj = []
for i in range(n_layers - 1):
self.proj.append(nn.Linear(d_model, d_model))
self.proj.append(nn.LeakyReLU(leaky_relu_slope))
self.proj.append(ScaleNorm(d_model) if scale_norm else
LayerNorm(d_model))
self.proj.append(nn.Dropout(dropout))
self.proj.append(nn.Linear(d_model, n_output))
self.proj = torch.nn.Sequential(*self.proj)
self.aggregation_type = aggregation_type
def forward(self, x, mask):
mask = mask.unsqueeze(-1).float()
out_masked = x * mask
if self.aggregation_type == 'mean':
out_sum = out_masked.sum(dim=1)
mask_sum = mask.sum(dim=1)
out_avg_pooling = out_sum / mask_sum
elif self.aggregation_type == 'sum':
out_sum = out_masked.sum(dim=1)
out_avg_pooling = out_sum
elif self.aggregation_type == 'dummy_node':
out_avg_pooling = out_masked[:, 0]
projected = self.proj(out_avg_pooling)
return projected
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 64
x1 = xindex // 4 % 16
x2 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x1 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (64 + x3), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (16 + x1 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (128 + x3), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (32 + x1 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (192 + x3), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (48 + x1 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp15 = tmp1 + tmp4
tmp16 = tmp15 + tmp8
tmp17 = tmp16 + tmp12
tmp18 = tmp14 / tmp17
tl.store(out_ptr0 + x4, tmp18, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1, 4), (4, 1))
assert_size_stride(primals_4, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_mul_sum_0[grid(256)](primals_2, primals_1,
buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_3, (4, 1), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_3
del primals_4
return reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 4, 1, 1), 0
), reinterpret_tensor(buf0, (64, 4), (4, 1), 0)
class LayerNorm(nn.Module):
"""Construct a layernorm module (See citation for details)."""
def __init__(self, features, eps=1e-06):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class ScaleNorm(nn.Module):
"""ScaleNorm"""
"""All g’s in SCALE NORM are initialized to sqrt(d)"""
def __init__(self, scale, eps=1e-05):
super(ScaleNorm, self).__init__()
self.scale = nn.Parameter(torch.tensor(math.sqrt(scale)))
self.eps = eps
def forward(self, x):
norm = self.scale / torch.norm(x, dim=-1, keepdim=True).clamp(min=
self.eps)
return x * norm
class GeneratorNew(nn.Module):
"""Define standard linear + softmax generation step."""
def __init__(self, d_model, aggregation_type='mean', n_output=1,
n_layers=1, leaky_relu_slope=0.01, dropout=0.0, scale_norm=False):
super(GeneratorNew, self).__init__()
if n_layers == 1:
self.proj = nn.Linear(d_model, n_output)
else:
self.proj = []
for i in range(n_layers - 1):
self.proj.append(nn.Linear(d_model, d_model))
self.proj.append(nn.LeakyReLU(leaky_relu_slope))
self.proj.append(ScaleNorm(d_model) if scale_norm else
LayerNorm(d_model))
self.proj.append(nn.Dropout(dropout))
self.proj.append(nn.Linear(d_model, n_output))
self.proj = torch.nn.Sequential(*self.proj)
self.aggregation_type = aggregation_type
def forward(self, input_0, input_1):
primals_3 = self.proj.weight
primals_4 = self.proj.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
odb9402/MAT
|
Generator
| false
| 4,107
|
[
"MIT"
] | 0
|
95d8083170da2c8ce1f5898b3a556bcf54eac8cc
|
https://github.com/odb9402/MAT/tree/95d8083170da2c8ce1f5898b3a556bcf54eac8cc
|
Concat
|
import torch
from torch import nn
import torch.nn
import torch.optim
class Concat(nn.Module):
def forward(self, state: 'torch.Tensor', action: 'torch.Tensor'):
return torch.cat((state, action), dim=-1)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.nn
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](arg0_1, arg1_1, buf0, 512, XBLOCK
=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class ConcatNew(nn.Module):
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
mcx/ReAgent
|
Concat
| false
| 4,108
|
[
"BSD-3-Clause"
] | 0
|
57b58a8b3a6b74bb87a197b73a6cd108ddad895e
|
https://github.com/mcx/ReAgent/tree/57b58a8b3a6b74bb87a197b73a6cd108ddad895e
|
Quantization
|
import torch
import torch.utils.data
import torch.nn as nn
class Quant(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
input = torch.clamp(input, 0, 1)
output = (input * 255.0).round() / 255.0
return output
@staticmethod
def backward(ctx, grad_output):
return grad_output
class Quantization(nn.Module):
def __init__(self):
super(Quantization, self).__init__()
def forward(self, input):
return Quant.apply(input)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.utils.data
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clamp_div_mul_round_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 1.0
tmp4 = triton_helpers.minimum(tmp2, tmp3)
tmp5 = 255.0
tmp6 = tmp4 * tmp5
tmp7 = libdevice.nearbyint(tmp6)
tmp8 = 0.00392156862745098
tmp9 = tmp7 * tmp8
tl.store(out_ptr0 + x0, tmp9, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_div_mul_round_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class Quant(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
input = torch.clamp(input, 0, 1)
output = (input * 255.0).round() / 255.0
return output
@staticmethod
def backward(ctx, grad_output):
return grad_output
class QuantizationNew(nn.Module):
def __init__(self):
super(QuantizationNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
peterhan91/Invertible-Image-Rescaling
|
Quantization
| false
| 4,109
|
[
"Apache-2.0"
] | 0
|
b92162f5e9be2cff2f5dba379914fcded4e04f4c
|
https://github.com/peterhan91/Invertible-Image-Rescaling/tree/b92162f5e9be2cff2f5dba379914fcded4e04f4c
|
SpatialMeanAndStd
|
import torch
import torch.nn.functional
import torch.nn as nn
import torch.nn.init
import torch.onnx
class SpatialMeanAndStd(nn.Module):
def __init__(self, shape, eps=0.0001, half_size=1.0):
super(SpatialMeanAndStd, self).__init__()
p = torch.empty((2, shape[0], shape[1]), dtype=torch.float32)
p[0, ...] = torch.linspace(-half_size, half_size, shape[1])[None, :]
p[1, ...] = torch.linspace(-half_size, half_size, shape[0])[:, None]
self.position_code = nn.Parameter(p)
self.position_code.requires_grad = False
self._shape = shape
self._eps = eps
def forward(self, x):
assert x.shape[1] == self._shape[0
], f'input shape {x.shape} vs expected {self._shape}'
assert x.shape[2] == self._shape[1
], f'input shape {x.shape} vs expected {self._shape}'
mean = torch.sum(x[:, None, :, :] * self.position_code[None, ...],
dim=[2, 3])
diff = self.position_code[None, ...] - mean[..., None, None]
diff_squared = diff * diff
std = torch.sqrt(torch.sum(x[:, None, :, :] * diff_squared, dim=[2,
3]) + self._eps)
return mean, std
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'shape': [4, 4]}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn.functional
import torch.nn as nn
import torch.nn.init
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mul_sqrt_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 8
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x1 = xindex // 2
x0 = xindex % 2
x3 = xindex
tmp0 = tl.load(in_ptr0 + (r2 + 16 * x1), xmask, eviction_policy=
'evict_last', other=0.0)
tmp1 = tl.load(in_ptr1 + (r2 + 16 * x0), xmask, eviction_policy=
'evict_last', other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tmp1 - tmp6
tmp8 = tmp7 * tmp7
tmp9 = tmp0 * tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.where(xmask, tmp10, 0)
tmp13 = tl.sum(tmp12, 1)[:, None]
tmp14 = 0.0001
tmp15 = tmp13 + tmp14
tmp16 = libdevice.sqrt(tmp15)
tl.debug_barrier()
tl.store(in_out_ptr0 + x3, tmp16, xmask)
tl.store(out_ptr0 + x3, tmp6, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (2, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
buf1 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_add_mul_sqrt_sub_sum_0[grid(8)](buf2, arg0_1,
arg1_1, buf0, 8, 16, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf0, buf2
class SpatialMeanAndStdNew(nn.Module):
def __init__(self, shape, eps=0.0001, half_size=1.0):
super(SpatialMeanAndStdNew, self).__init__()
p = torch.empty((2, shape[0], shape[1]), dtype=torch.float32)
p[0, ...] = torch.linspace(-half_size, half_size, shape[1])[None, :]
p[1, ...] = torch.linspace(-half_size, half_size, shape[0])[:, None]
self.position_code = nn.Parameter(p)
self.position_code.requires_grad = False
self._shape = shape
self._eps = eps
def forward(self, input_0):
arg1_1 = self.position_code
arg0_1 = input_0
output = call([arg0_1, arg1_1])
return output[0], output[1]
|
opentrack/neuralnet-tracker-traincode
|
SpatialMeanAndStd
| false
| 4,110
|
[
"ISC",
"CC0-1.0",
"Unlicense"
] | 0
|
688ada0f46cb407d1809b50c11a136a239290123
|
https://github.com/opentrack/neuralnet-tracker-traincode/tree/688ada0f46cb407d1809b50c11a136a239290123
|
PositionGenerator
|
import torch
import torch.nn as nn
class LayerNorm(nn.Module):
"""Construct a layernorm module (See citation for details)."""
def __init__(self, features, eps=1e-06):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class PositionGenerator(nn.Module):
"""Define standard linear + softmax generation step."""
def __init__(self, d_model):
super(PositionGenerator, self).__init__()
self.norm = LayerNorm(d_model)
self.proj = nn.Linear(d_model, 3)
def forward(self, x, mask):
mask = mask.unsqueeze(-1).float()
out_masked = self.norm(x) * mask
projected = self.proj(out_masked)
return projected
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp0 * tmp11
tmp13 = tmp2 - tmp10
tmp14 = tmp13 * tmp13
tmp15 = tmp3 - tmp10
tmp16 = tmp15 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = tmp5 - tmp10
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp21 = tmp7 - tmp10
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp24 = 3.0
tmp25 = tmp23 / tmp24
tmp26 = libdevice.sqrt(tmp25)
tmp27 = 1e-06
tmp28 = tmp26 + tmp27
tmp29 = tmp12 / tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + x2, tmp31, xmask)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 256
x4 = xindex // 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x5, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (3, 4), (4, 1))
assert_size_stride(primals_6, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_std_sub_0[grid(256)](primals_3,
primals_2, primals_4, buf0, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_3
del primals_4
buf1 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_poi_fused_mul_1[grid(1024)](buf0, primals_1, buf1, 1024,
XBLOCK=128, num_warps=4, num_stages=1)
del buf0
buf2 = empty_strided_cuda((256, 3), (3, 1), torch.float32)
extern_kernels.addmm(primals_6, reinterpret_tensor(buf1, (256, 4),
(4, 1), 0), reinterpret_tensor(primals_5, (4, 3), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_6
return reinterpret_tensor(buf2, (4, 4, 4, 4, 3), (192, 48, 12, 3, 1), 0
), primals_1, primals_2, reinterpret_tensor(buf1, (256, 4), (4, 1), 0
), primals_5
class LayerNorm(nn.Module):
"""Construct a layernorm module (See citation for details)."""
def __init__(self, features, eps=1e-06):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class PositionGeneratorNew(nn.Module):
"""Define standard linear + softmax generation step."""
def __init__(self, d_model):
super(PositionGeneratorNew, self).__init__()
self.norm = LayerNorm(d_model)
self.proj = nn.Linear(d_model, 3)
def forward(self, input_0, input_1):
primals_3 = self.norm.a_2
primals_4 = self.norm.b_2
primals_5 = self.proj.weight
primals_6 = self.proj.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
odb9402/MAT
|
PositionGenerator
| false
| 4,111
|
[
"MIT"
] | 0
|
95d8083170da2c8ce1f5898b3a556bcf54eac8cc
|
https://github.com/odb9402/MAT/tree/95d8083170da2c8ce1f5898b3a556bcf54eac8cc
|
SoftmaxOutputLayer
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class OutputLayer(nn.Module):
"""
Abstract base class for output layer.
Handles projection to output labels
"""
def __init__(self, hidden_size, output_size):
super(OutputLayer, self).__init__()
self.output_size = output_size
self.output_projection = nn.Linear(hidden_size, output_size)
def loss(self, hidden, labels):
raise NotImplementedError('Must implement {}.loss'.format(self.
__class__.__name__))
class SoftmaxOutputLayer(OutputLayer):
"""
Implements a softmax based output layer
"""
def forward(self, hidden):
logits = self.output_projection(hidden)
probs = F.softmax(logits, -1)
_, predictions = torch.max(probs, dim=-1)
return predictions
def loss(self, hidden, labels):
logits = self.output_projection(hidden)
log_probs = F.log_softmax(logits, -1)
return F.nll_loss(log_probs.view(-1, self.output_size), labels.view(-1)
)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4, 'output_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_max_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = tmp0 / tmp6
tmp8 = tmp1 / tmp6
tmp9 = tmp7 > tmp8
tmp10 = tmp7 == tmp8
tmp11 = tmp7 != tmp7
tmp12 = tmp8 != tmp8
tmp13 = tmp11 > tmp12
tmp14 = tmp9 | tmp13
tmp15 = tmp11 & tmp12
tmp16 = tmp10 | tmp15
tmp17 = tl.full([1], 0, tl.int64)
tmp18 = tl.full([1], 1, tl.int64)
tmp19 = tmp17 < tmp18
tmp20 = tmp16 & tmp19
tmp21 = tmp14 | tmp20
tmp22 = tl.where(tmp21, tmp7, tmp8)
tmp23 = tl.where(tmp21, tmp17, tmp18)
tmp24 = tmp3 / tmp6
tmp25 = tmp22 > tmp24
tmp26 = tmp22 == tmp24
tmp27 = tmp22 != tmp22
tmp28 = tmp24 != tmp24
tmp29 = tmp27 > tmp28
tmp30 = tmp25 | tmp29
tmp31 = tmp27 & tmp28
tmp32 = tmp26 | tmp31
tmp33 = tl.full([1], 2, tl.int64)
tmp34 = tmp23 < tmp33
tmp35 = tmp32 & tmp34
tmp36 = tmp30 | tmp35
tmp37 = tl.where(tmp36, tmp22, tmp24)
tmp38 = tl.where(tmp36, tmp23, tmp33)
tmp39 = tmp5 / tmp6
tmp40 = tmp37 > tmp39
tmp41 = tmp37 == tmp39
tmp42 = tmp37 != tmp37
tmp43 = tmp39 != tmp39
tmp44 = tmp42 > tmp43
tmp45 = tmp40 | tmp44
tmp46 = tmp42 & tmp43
tmp47 = tmp41 | tmp46
tmp48 = tl.full([1], 3, tl.int64)
tmp49 = tmp38 < tmp48
tmp50 = tmp47 & tmp49
tmp51 = tmp45 | tmp50
tl.where(tmp51, tmp37, tmp39)
tmp53 = tl.where(tmp51, tmp38, tmp48)
tl.store(out_ptr0 + x0, tmp53, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4,), (1,))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(arg1_1, reinterpret_tensor(arg2_1, (64, 4), (4,
1), 0), reinterpret_tensor(arg0_1, (4, 4), (1, 4), 0), alpha=1,
beta=1, out=buf0)
del arg0_1
del arg1_1
del arg2_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](buf0, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf0
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64)
triton_poi_fused_max_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf1
return buf2,
class OutputLayer(nn.Module):
"""
Abstract base class for output layer.
Handles projection to output labels
"""
def __init__(self, hidden_size, output_size):
super(OutputLayer, self).__init__()
self.output_size = output_size
self.output_projection = nn.Linear(hidden_size, output_size)
def loss(self, hidden, labels):
raise NotImplementedError('Must implement {}.loss'.format(self.
__class__.__name__))
class SoftmaxOutputLayerNew(OutputLayer):
"""
Implements a softmax based output layer
"""
def loss(self, hidden, labels):
logits = self.output_projection(hidden)
log_probs = F.log_softmax(logits, -1)
return F.nll_loss(log_probs.view(-1, self.output_size), labels.view(-1)
)
def forward(self, input_0):
arg0_1 = self.output_projection.weight
arg1_1 = self.output_projection.bias
arg2_1 = input_0
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
oya163/torchnlp
|
SoftmaxOutputLayer
| false
| 4,112
|
[
"Apache-2.0"
] | 0
|
361caa24d741e47b8bd92af122ae281d6ad72d9d
|
https://github.com/oya163/torchnlp/tree/361caa24d741e47b8bd92af122ae281d6ad72d9d
|
ScoreCap
|
import torch
from torch import nn
import torch.nn
import torch.optim
class ScoreCap(nn.Module):
def __init__(self, cap: 'float'):
super().__init__()
self.cap = cap
def forward(self, input):
return torch.clip(input, max=self.cap)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'cap': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
import torch.nn
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clamp_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 4.0
tmp2 = triton_helpers.minimum(tmp0, tmp1)
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class ScoreCapNew(nn.Module):
def __init__(self, cap: 'float'):
super().__init__()
self.cap = cap
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
mcx/ReAgent
|
ScoreCap
| false
| 4,113
|
[
"BSD-3-Clause"
] | 0
|
57b58a8b3a6b74bb87a197b73a6cd108ddad895e
|
https://github.com/mcx/ReAgent/tree/57b58a8b3a6b74bb87a197b73a6cd108ddad895e
|
SelfGating
|
import torch
import torch.nn as nn
class SelfGating(nn.Module):
def __init__(self, input_dim):
super(SelfGating, self).__init__()
self.fc = nn.Linear(input_dim, input_dim)
def forward(self, input_tensor):
"""Feature gating as used in S3D-G"""
spatiotemporal_average = torch.mean(input_tensor, dim=[2, 3, 4])
weights = self.fc(spatiotemporal_average)
weights = torch.sigmoid(weights)
return weights[:, :, None, None, None] * input_tensor
def get_inputs():
return [torch.rand([4, 4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 64.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + x2, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 64, XBLOCK=8,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, buf1, reinterpret_tensor(primals_2,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_2
del primals_3
buf3 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_poi_fused_mul_1[grid(1024)](buf2, primals_1, buf3, 1024,
XBLOCK=256, num_warps=4, num_stages=1)
return buf3, primals_1, buf1, buf2
class SelfGatingNew(nn.Module):
def __init__(self, input_dim):
super(SelfGatingNew, self).__init__()
self.fc = nn.Linear(input_dim, input_dim)
def forward(self, input_0):
primals_2 = self.fc.weight
primals_3 = self.fc.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
necla-ml/CPR
|
SelfGating
| false
| 4,114
|
[
"BSD-3-Clause"
] | 0
|
101023c587a35b254ea640b4501167a6830856af
|
https://github.com/necla-ml/CPR/tree/101023c587a35b254ea640b4501167a6830856af
|
SharpenedCosineSimilarity
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class SharpenedCosineSimilarity(nn.Conv2d):
def __init__(self, in_channels: 'int', out_channels: 'int', kernel_size,
stride=1, padding=None, dilation=1, groups: 'int'=1, bias: 'bool'=
False, q_init: 'float'=10, p_init: 'float'=1.0, q_scale: 'float'=
0.3, p_scale: 'float'=5, eps: 'float'=1e-06):
if padding is None:
if int(torch.__version__.split('.')[1]) >= 10:
padding = 'same'
else:
padding = (stride - 1 + dilation * (kernel_size - 1)) // 2
if isinstance(kernel_size, int):
kernel_size = kernel_size, kernel_size
bias = False
assert dilation == 1, 'Dilation has to be 1 to use AvgPool2d as L2-Norm backend.'
assert groups == in_channels or groups == 1, 'Either depthwise or full convolution. Grouped not supported'
super(SharpenedCosineSimilarity, self).__init__(in_channels,
out_channels, kernel_size, stride, padding, dilation, groups, bias)
self.p_scale = p_scale
self.q_scale = q_scale
self.p = torch.nn.Parameter(torch.full((out_channels,), float(
p_init * self.p_scale)))
self.q = torch.nn.Parameter(torch.full((1,), float(q_init * self.
q_scale)))
self.eps = eps
def forward(self, inp: 'torch.Tensor') ->torch.Tensor:
out = inp.square()
if self.groups == 1:
out = out.sum(1, keepdim=True)
q = torch.exp(-self.q / self.q_scale)
norm = F.conv2d(out, torch.ones_like(self.weight[:1, :1]), None,
self.stride, self.padding, self.dilation)
norm = norm + (q + self.eps)
weight = self.weight / self.weight.square().sum(dim=(1, 2, 3),
keepdim=True).sqrt()
out = F.conv2d(inp, weight, self.bias, self.stride, self.padding,
self.dilation, self.groups) / norm.sqrt()
magnitude = out.abs() + self.eps
sign = out.sign()
p = torch.exp(self.p / self.p_scale)
out = magnitude.pow(p.view(1, -1, 1, 1))
out = out * sign
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_ones_like_pow_sum_0(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp8 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_convolution_ones_like_pow_sum_1(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 1.0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_per_fused_div_pow_sqrt_sum_2(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp7 = tmp0 / tmp6
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr0 + (r1 + 64 * x0), tmp7, xmask)
@triton.jit
def triton_poi_fused_abs_add_div_exp_mul_neg_pow_sign_sqrt_3(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 9
x2 = xindex // 36
x1 = xindex // 9 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 9 * x2), xmask, eviction_policy='evict_last'
)
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp15 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp4 = -tmp3
tmp5 = 3.3333333333333335
tmp6 = tmp4 * tmp5
tmp7 = tl_math.exp(tmp6)
tmp8 = 1e-06
tmp9 = tmp7 + tmp8
tmp10 = tmp1 + tmp9
tmp11 = libdevice.sqrt(tmp10)
tmp12 = tmp0 / tmp11
tmp13 = tl_math.abs(tmp12)
tmp14 = tmp13 + tmp8
tmp16 = 0.2
tmp17 = tmp15 * tmp16
tmp18 = tl_math.exp(tmp17)
tmp19 = libdevice.pow(tmp14, tmp18)
tmp20 = tl.full([1], 0, tl.int32)
tmp21 = tmp20 < tmp12
tmp22 = tmp21.to(tl.int8)
tmp23 = tmp12 < tmp20
tmp24 = tmp23.to(tl.int8)
tmp25 = tmp22 - tmp24
tmp26 = tmp25.to(tmp12.dtype)
tmp27 = tmp19 * tmp26
tl.store(out_ptr0 + x3, tmp27, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_ones_like_pow_sum_0[grid(64)](primals_1,
buf0, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((1, 1, 4, 4), (16, 16, 4, 1), torch.float32)
triton_poi_fused_convolution_ones_like_pow_sum_1[grid(16)](buf1, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf2 = extern_kernels.convolution(buf0, buf1, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 1, 3, 3), (9, 9, 3, 1))
del buf0
del buf1
buf3 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf4 = reinterpret_tensor(buf3, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf3
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_per_fused_div_pow_sqrt_sum_2[grid(4)](buf4, primals_3, buf5,
4, 64, XBLOCK=1, num_warps=2, num_stages=1)
buf6 = extern_kernels.convolution(primals_1, buf5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 3, 3), (36, 9, 3, 1))
buf7 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32)
triton_poi_fused_abs_add_div_exp_mul_neg_pow_sign_sqrt_3[grid(144)](
buf6, buf2, primals_2, primals_4, buf7, 144, XBLOCK=128,
num_warps=4, num_stages=1)
return (buf7, primals_1, primals_2, primals_3, primals_4, buf2, buf4,
buf5, buf6)
class SharpenedCosineSimilarityNew(nn.Conv2d):
def __init__(self, in_channels: 'int', out_channels: 'int', kernel_size,
stride=1, padding=None, dilation=1, groups: 'int'=1, bias: 'bool'=
False, q_init: 'float'=10, p_init: 'float'=1.0, q_scale: 'float'=
0.3, p_scale: 'float'=5, eps: 'float'=1e-06):
if padding is None:
if int(torch.__version__.split('.')[1]) >= 10:
padding = 'same'
else:
padding = (stride - 1 + dilation * (kernel_size - 1)) // 2
if isinstance(kernel_size, int):
kernel_size = kernel_size, kernel_size
bias = False
assert dilation == 1, 'Dilation has to be 1 to use AvgPool2d as L2-Norm backend.'
assert groups == in_channels or groups == 1, 'Either depthwise or full convolution. Grouped not supported'
super(SharpenedCosineSimilarityNew, self).__init__(in_channels,
out_channels, kernel_size, stride, padding, dilation, groups, bias)
self.p_scale = p_scale
self.q_scale = q_scale
self.p = torch.nn.Parameter(torch.full((out_channels,), float(
p_init * self.p_scale)))
self.q = torch.nn.Parameter(torch.full((1,), float(q_init * self.
q_scale)))
self.eps = eps
def forward(self, input_0):
primals_1 = self.weight
primals_4 = self.p
primals_2 = self.q
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
p-sodmann/sharpened_cosine_similarity_torch
|
SharpenedCosineSimilarity
| false
| 4,115
|
[
"MIT"
] | 0
|
0562e54f6494f365e321da9ae91edaba8595e3aa
|
https://github.com/p-sodmann/sharpened_cosine_similarity_torch/tree/0562e54f6494f365e321da9ae91edaba8595e3aa
|
GaussianParamNet
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class GaussianParamNet(nn.Module):
"""
Parameterise a Gaussian distributions.
"""
def __init__(self, input_dim, output_dim):
super(GaussianParamNet, self).__init__()
self.fc1 = nn.Linear(input_dim, input_dim, bias=False)
self.layer_nml = nn.LayerNorm(input_dim, elementwise_affine=False)
self.fc2 = nn.Linear(input_dim, output_dim)
def forward(self, x):
"""
x: input image with shape [B, K, 2*D]
"""
x = self.fc2(F.relu(self.layer_nml(self.fc1(x))))
mu, sigma = x.chunk(2, dim=-1)
sigma = F.softplus(sigma + 0.5) + 1e-08
return mu, sigma
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'output_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_relu_threshold_backward_1(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = 0.0
tmp8 = tmp6 <= tmp7
tl.store(out_ptr0 + x2, tmp6, xmask)
tl.store(out_ptr1 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_softplus_softplus_backward_2(in_ptr0, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 + x0 + 4 * x1), xmask)
tmp1 = 0.5
tmp2 = tmp0 + tmp1
tmp3 = 20.0
tmp4 = tmp2 > tmp3
tmp5 = tl_math.exp(tmp2)
tmp6 = libdevice.log1p(tmp5)
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = 1e-08
tmp9 = tmp7 + tmp8
tmp10 = 1.0
tmp11 = tmp2 * tmp10
tl.store(out_ptr0 + x2, tmp9, xmask)
tl.store(out_ptr1 + x2, tmp11, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(64)](buf0, buf1, buf2, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_native_layer_norm_relu_threshold_backward_1[grid(256)
](buf0, buf1, buf2, buf3, buf7, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf1
del buf2
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_4, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_4
buf5 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
triton_poi_fused_add_softplus_softplus_backward_2[grid(128)](buf4,
buf5, buf6, 128, XBLOCK=128, num_warps=4, num_stages=1)
return reinterpret_tensor(buf4, (4, 4, 4, 2), (64, 16, 4, 1), 0
), buf5, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf3, (64, 4), (4, 1), 0
), buf6, primals_3, buf7
class GaussianParamNetNew(nn.Module):
"""
Parameterise a Gaussian distributions.
"""
def __init__(self, input_dim, output_dim):
super(GaussianParamNetNew, self).__init__()
self.fc1 = nn.Linear(input_dim, input_dim, bias=False)
self.layer_nml = nn.LayerNorm(input_dim, elementwise_affine=False)
self.fc2 = nn.Linear(input_dim, output_dim)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_3 = self.fc2.weight
primals_4 = self.fc2.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0], output[1]
|
pemami4911/MulMON
|
GaussianParamNet
| false
| 4,116
|
[
"MIT"
] | 0
|
e01438e7a9a1259dc473e7ffd20a005eeaea87cb
|
https://github.com/pemami4911/MulMON/tree/e01438e7a9a1259dc473e7ffd20a005eeaea87cb
|
VectorQuantizer
|
import torch
import torch.utils.data
from torch import nn
from torch.nn import functional as F
class VectorQuantizer(nn.Module):
"""
Tensorflow original: https://github.com/deepmind/sonnet/blob/v2/sonnet/src/nets/vqvae.py
Based on: https://github.com/AntixK/PyTorch-VAE/blob/master/models/vq_vae.py
"""
def __init__(self, num_embeddings: 'int', embedding_dim: 'int', beta:
'float'=0.25):
super(VectorQuantizer, self).__init__()
self.K = num_embeddings
self.D = embedding_dim
self.beta = beta
self.embedding = nn.Embedding(self.K, self.D)
self.embedding.weight.data.uniform_(-1 / self.K, 1 / self.K)
def forward(self, latents):
flat_latents = latents.view(-1, self.D)
dist = torch.sum(flat_latents ** 2, dim=1, keepdim=True) + torch.sum(
self.embedding.weight ** 2, dim=1) - 2 * torch.matmul(flat_latents,
self.embedding.weight.t())
encoding_inds = torch.argmin(dist, dim=1).unsqueeze(1)
device = latents.device
encoding_one_hot = torch.zeros(encoding_inds.size(0), self.K,
device=device)
encoding_one_hot.scatter_(1, encoding_inds, 1)
quantized_latents = torch.matmul(encoding_one_hot, self.embedding.
weight)
quantized_latents = quantized_latents.view(latents.shape)
commitment_loss = F.mse_loss(quantized_latents.detach(), latents)
embedding_loss = F.mse_loss(quantized_latents, latents.detach())
vq_loss = commitment_loss * self.beta + embedding_loss
quantized_latents = latents + (quantized_latents - latents).detach()
avg_probs = torch.mean(encoding_one_hot, dim=0)
perplexity = torch.exp(-torch.sum(avg_probs * torch.log(avg_probs +
1e-10)))
return quantized_latents.contiguous(), vq_loss, perplexity
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_embeddings': 4, 'embedding_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.utils.data
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp23 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp12 = tmp11 * tmp11
tmp14 = tmp13 * tmp13
tmp15 = tmp12 + tmp14
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp10 + tmp21
tmp24 = 2.0
tmp25 = tmp23 * tmp24
tmp26 = tmp22 - tmp25
tl.store(in_out_ptr0 + x2, tmp26, xmask)
@triton.jit
def triton_poi_fused_argmin_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp32 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 < tmp1
tmp3 = tmp0 == tmp1
tmp4 = tmp0 != tmp0
tmp5 = tmp1 != tmp1
tmp6 = tmp4 > tmp5
tmp7 = tmp2 | tmp6
tmp8 = tmp4 & tmp5
tmp9 = tmp3 | tmp8
tmp10 = tl.full([1], 0, tl.int64)
tmp11 = tl.full([1], 1, tl.int64)
tmp12 = tmp10 < tmp11
tmp13 = tmp9 & tmp12
tmp14 = tmp7 | tmp13
tmp15 = tl.where(tmp14, tmp0, tmp1)
tmp16 = tl.where(tmp14, tmp10, tmp11)
tmp18 = tmp15 < tmp17
tmp19 = tmp15 == tmp17
tmp20 = tmp15 != tmp15
tmp21 = tmp17 != tmp17
tmp22 = tmp20 > tmp21
tmp23 = tmp18 | tmp22
tmp24 = tmp20 & tmp21
tmp25 = tmp19 | tmp24
tmp26 = tl.full([1], 2, tl.int64)
tmp27 = tmp16 < tmp26
tmp28 = tmp25 & tmp27
tmp29 = tmp23 | tmp28
tmp30 = tl.where(tmp29, tmp15, tmp17)
tmp31 = tl.where(tmp29, tmp16, tmp26)
tmp33 = tmp30 < tmp32
tmp34 = tmp30 == tmp32
tmp35 = tmp30 != tmp30
tmp36 = tmp32 != tmp32
tmp37 = tmp35 > tmp36
tmp38 = tmp33 | tmp37
tmp39 = tmp35 & tmp36
tmp40 = tmp34 | tmp39
tmp41 = tl.full([1], 3, tl.int64)
tmp42 = tmp31 < tmp41
tmp43 = tmp40 & tmp42
tmp44 = tmp38 | tmp43
tl.where(tmp44, tmp30, tmp32)
tmp46 = tl.where(tmp44, tmp31, tmp41)
tl.store(out_ptr0 + x0, tmp46, xmask)
@triton.jit
def triton_poi_fused_scatter_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = x0
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tl.store(out_ptr0 + x2, tmp5, xmask)
@triton.jit
def triton_per_fused_add_mse_loss_mse_loss_backward_mul_3(in_out_ptr0,
in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = tmp1 + tmp2
tmp8 = 0.0078125
tmp9 = tmp2 * tmp8
tmp10 = 256.0
tmp11 = tmp6 / tmp10
tmp12 = 0.25
tmp13 = tmp11 * tmp12
tmp14 = tmp13 + tmp11
tl.store(out_ptr0 + tl.broadcast_to(r0, [RBLOCK]), tmp7, None)
tl.store(out_ptr1 + tl.broadcast_to(r0, [RBLOCK]), tmp9, None)
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp14, None)
@triton.jit
def triton_per_fused_mean_4(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * r1), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_per_fused_add_exp_log_mean_mul_neg_sum_5(in_out_ptr0, in_ptr0,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = 64.0
tmp2 = tmp0 / tmp1
tmp3 = 1e-10
tmp4 = tmp2 + tmp3
tmp5 = tl_math.log(tmp4)
tmp6 = tmp2 * tmp5
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.sum(tmp7, 1)[:, None]
tmp10 = -tmp9
tmp11 = tl_math.exp(tmp10)
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp11, None)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_add_mul_pow_sub_sum_0[grid(256)](buf1, primals_1,
primals_2, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused_argmin_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = buf1
del buf1
triton_poi_fused_scatter_2[grid(256)](buf2, buf3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf2
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(buf3, primals_2, out=buf4)
del primals_2
buf5 = empty_strided_cuda((), (), torch.float32)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf10 = buf5
del buf5
triton_per_fused_add_mse_loss_mse_loss_backward_mul_3[grid(1)](buf10,
buf4, primals_1, buf6, buf9, 1, 256, num_warps=2, num_stages=1)
del buf4
del primals_1
buf7 = empty_strided_cuda((4,), (1,), torch.float32)
triton_per_fused_mean_4[grid(4)](buf3, buf7, 4, 64, XBLOCK=1,
num_warps=2, num_stages=1)
buf8 = empty_strided_cuda((), (), torch.float32)
buf11 = buf8
del buf8
triton_per_fused_add_exp_log_mean_mul_neg_sum_5[grid(1)](buf11,
buf7, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del buf7
return buf6, buf10, buf11, buf9, reinterpret_tensor(buf3, (4, 64), (1,
4), 0)
class VectorQuantizerNew(nn.Module):
"""
Tensorflow original: https://github.com/deepmind/sonnet/blob/v2/sonnet/src/nets/vqvae.py
Based on: https://github.com/AntixK/PyTorch-VAE/blob/master/models/vq_vae.py
"""
def __init__(self, num_embeddings: 'int', embedding_dim: 'int', beta:
'float'=0.25):
super(VectorQuantizerNew, self).__init__()
self.K = num_embeddings
self.D = embedding_dim
self.beta = beta
self.embedding = nn.Embedding(self.K, self.D)
self.embedding.weight.data.uniform_(-1 / self.K, 1 / self.K)
def forward(self, input_0):
primals_2 = self.embedding.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0], output[1], output[2]
|
ltschmitt/RecGen
|
VectorQuantizer
| false
| 4,117
|
[
"MIT"
] | 0
|
7f69b76b4213c823a3ff05c0e754face8b179896
|
https://github.com/ltschmitt/RecGen/tree/7f69b76b4213c823a3ff05c0e754face8b179896
|
CRFOutputLayer
|
import torch
import torch.nn as nn
class CRF(nn.Module):
"""
Implements Conditional Random Fields that can be trained via
backpropagation.
"""
def __init__(self, num_tags):
super(CRF, self).__init__()
self.num_tags = num_tags
self.transitions = nn.Parameter(torch.Tensor(num_tags, num_tags))
self.start_transitions = nn.Parameter(torch.randn(num_tags))
self.stop_transitions = nn.Parameter(torch.randn(num_tags))
nn.init.xavier_normal_(self.transitions)
def forward(self, feats):
if len(feats.shape) != 3:
raise ValueError('feats must be 3-d got {}-d'.format(feats.shape))
return self._viterbi(feats)
def loss(self, feats, tags):
"""
Computes negative log likelihood between features and tags.
Essentially difference between individual sequence scores and
sum of all possible sequence scores (partition function)
Parameters:
feats: Input features [batch size, sequence length, number of tags]
tags: Target tag indices [batch size, sequence length]. Should be between
0 and num_tags
Returns:
Negative log likelihood [a scalar]
"""
if len(feats.shape) != 3:
raise ValueError('feats must be 3-d got {}-d'.format(feats.shape))
if len(tags.shape) != 2:
raise ValueError('tags must be 2-d but got {}-d'.format(tags.shape)
)
if feats.shape[:2] != tags.shape:
raise ValueError(
'First two dimensions of feats and tags must match')
sequence_score = self._sequence_score(feats, tags)
partition_function = self._partition_function(feats)
log_probability = sequence_score - partition_function
return -log_probability.mean()
def _sequence_score(self, feats, tags):
"""
Parameters:
feats: Input features [batch size, sequence length, number of tags]
tags: Target tag indices [batch size, sequence length]. Should be between
0 and num_tags
Returns: Sequence score of shape [batch size]
"""
feats.shape[0]
feat_score = feats.gather(2, tags.unsqueeze(-1)).squeeze(-1).sum(dim=-1
)
tags_pairs = tags.unfold(1, 2, 1)
indices = tags_pairs.permute(2, 0, 1).chunk(2)
trans_score = self.transitions[indices].squeeze(0).sum(dim=-1)
start_score = self.start_transitions[tags[:, 0]]
stop_score = self.stop_transitions[tags[:, -1]]
return feat_score + start_score + trans_score + stop_score
def _partition_function(self, feats):
"""
Computes the partitition function for CRF using the forward algorithm.
Basically calculate scores for all possible tag sequences for
the given feature vector sequence
Parameters:
feats: Input features [batch size, sequence length, number of tags]
Returns:
Total scores of shape [batch size]
"""
_, seq_size, num_tags = feats.shape
if self.num_tags != num_tags:
raise ValueError('num_tags should be {} but got {}'.format(self
.num_tags, num_tags))
a = feats[:, 0] + self.start_transitions.unsqueeze(0)
transitions = self.transitions.unsqueeze(0)
for i in range(1, seq_size):
feat = feats[:, i].unsqueeze(1)
a = self._log_sum_exp(a.unsqueeze(-1) + transitions + feat, 1)
return self._log_sum_exp(a + self.stop_transitions.unsqueeze(0), 1)
def _viterbi(self, feats):
"""
Uses Viterbi algorithm to predict the best sequence
Parameters:
feats: Input features [batch size, sequence length, number of tags]
Returns: Best tag sequence [batch size, sequence length]
"""
_, seq_size, num_tags = feats.shape
if self.num_tags != num_tags:
raise ValueError('num_tags should be {} but got {}'.format(self
.num_tags, num_tags))
v = feats[:, 0] + self.start_transitions.unsqueeze(0)
transitions = self.transitions.unsqueeze(0)
paths = []
for i in range(1, seq_size):
feat = feats[:, i]
v, idx = (v.unsqueeze(-1) + transitions).max(1)
paths.append(idx)
v = v + feat
v, tag = (v + self.stop_transitions.unsqueeze(0)).max(1, True)
tags = [tag]
for idx in reversed(paths):
tag = idx.gather(1, tag)
tags.append(tag)
tags.reverse()
return torch.cat(tags, 1)
def _log_sum_exp(self, logits, dim):
"""
Computes log-sum-exp in a stable way
"""
max_val, _ = logits.max(dim)
return max_val + (logits - max_val.unsqueeze(dim)).exp().sum(dim).log()
class OutputLayer(nn.Module):
"""
Abstract base class for output layer.
Handles projection to output labels
"""
def __init__(self, hidden_size, output_size):
super(OutputLayer, self).__init__()
self.output_size = output_size
self.output_projection = nn.Linear(hidden_size, output_size)
def loss(self, hidden, labels):
raise NotImplementedError('Must implement {}.loss'.format(self.
__class__.__name__))
class CRFOutputLayer(OutputLayer):
"""
Implements a CRF based output layer
"""
def __init__(self, hidden_size, output_size):
super(CRFOutputLayer, self).__init__(hidden_size, output_size)
self.crf = CRF(output_size)
def forward(self, hidden):
feats = self.output_projection(hidden)
return self.crf(feats)
def loss(self, hidden, labels):
feats = self.output_projection(hidden)
return self.crf.loss(feats, labels)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4, 'output_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_max_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (1 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr1 + 1)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp13 = tl.load(in_ptr2 + 1)
tmp14 = tl.broadcast_to(tmp13, [XBLOCK])
tmp16 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (2 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr1 + 2)
tmp21 = tl.broadcast_to(tmp20, [XBLOCK])
tmp23 = tl.load(in_ptr2 + 2)
tmp24 = tl.broadcast_to(tmp23, [XBLOCK])
tmp26 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (3 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp30 = tl.load(in_ptr1 + 3)
tmp31 = tl.broadcast_to(tmp30, [XBLOCK])
tmp33 = tl.load(in_ptr2 + 3)
tmp34 = tl.broadcast_to(tmp33, [XBLOCK])
tmp36 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp0 + tmp2
tmp6 = tmp3 + tmp5
tmp8 = tmp6 + tmp7
tmp12 = tmp9 + tmp11
tmp15 = tmp12 + tmp14
tmp17 = tmp15 + tmp16
tmp18 = triton_helpers.maximum(tmp8, tmp17)
tmp22 = tmp19 + tmp21
tmp25 = tmp22 + tmp24
tmp27 = tmp25 + tmp26
tmp28 = triton_helpers.maximum(tmp18, tmp27)
tmp32 = tmp29 + tmp31
tmp35 = tmp32 + tmp34
tmp37 = tmp35 + tmp36
tmp38 = triton_helpers.maximum(tmp28, tmp37)
tmp39 = tmp8 > tmp17
tmp40 = tmp8 == tmp17
tmp41 = tmp8 != tmp8
tmp42 = tmp17 != tmp17
tmp43 = tmp41 > tmp42
tmp44 = tmp39 | tmp43
tmp45 = tmp41 & tmp42
tmp46 = tmp40 | tmp45
tmp47 = tl.full([1], 0, tl.int64)
tmp48 = tl.full([1], 1, tl.int64)
tmp49 = tmp47 < tmp48
tmp50 = tmp46 & tmp49
tmp51 = tmp44 | tmp50
tmp52 = tl.where(tmp51, tmp8, tmp17)
tmp53 = tl.where(tmp51, tmp47, tmp48)
tmp54 = tmp52 > tmp27
tmp55 = tmp52 == tmp27
tmp56 = tmp52 != tmp52
tmp57 = tmp27 != tmp27
tmp58 = tmp56 > tmp57
tmp59 = tmp54 | tmp58
tmp60 = tmp56 & tmp57
tmp61 = tmp55 | tmp60
tmp62 = tl.full([1], 2, tl.int64)
tmp63 = tmp53 < tmp62
tmp64 = tmp61 & tmp63
tmp65 = tmp59 | tmp64
tmp66 = tl.where(tmp65, tmp52, tmp27)
tmp67 = tl.where(tmp65, tmp53, tmp62)
tmp68 = tmp66 > tmp37
tmp69 = tmp66 == tmp37
tmp70 = tmp66 != tmp66
tmp71 = tmp37 != tmp37
tmp72 = tmp70 > tmp71
tmp73 = tmp68 | tmp72
tmp74 = tmp70 & tmp71
tmp75 = tmp69 | tmp74
tmp76 = tl.full([1], 3, tl.int64)
tmp77 = tmp67 < tmp76
tmp78 = tmp75 & tmp77
tmp79 = tmp73 | tmp78
tl.where(tmp79, tmp66, tmp37)
tmp81 = tl.where(tmp79, tmp67, tmp76)
tl.store(out_ptr0 + x2, tmp38, xmask)
tl.store(out_ptr1 + x2, tmp81, xmask)
@triton.jit
def triton_poi_fused_add_max_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (5 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr2 + 1)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp14 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr1 + (6 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr2 + 2)
tmp20 = tl.broadcast_to(tmp19, [XBLOCK])
tmp23 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp27 = tl.load(in_ptr1 + (7 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr2 + 3)
tmp29 = tl.broadcast_to(tmp28, [XBLOCK])
tmp32 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last')
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp7 = tmp5 + tmp6
tmp12 = tmp9 + tmp11
tmp13 = tmp8 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = triton_helpers.maximum(tmp7, tmp15)
tmp21 = tmp18 + tmp20
tmp22 = tmp17 + tmp21
tmp24 = tmp22 + tmp23
tmp25 = triton_helpers.maximum(tmp16, tmp24)
tmp30 = tmp27 + tmp29
tmp31 = tmp26 + tmp30
tmp33 = tmp31 + tmp32
tmp34 = triton_helpers.maximum(tmp25, tmp33)
tmp35 = tmp7 > tmp15
tmp36 = tmp7 == tmp15
tmp37 = tmp7 != tmp7
tmp38 = tmp15 != tmp15
tmp39 = tmp37 > tmp38
tmp40 = tmp35 | tmp39
tmp41 = tmp37 & tmp38
tmp42 = tmp36 | tmp41
tmp43 = tl.full([1], 0, tl.int64)
tmp44 = tl.full([1], 1, tl.int64)
tmp45 = tmp43 < tmp44
tmp46 = tmp42 & tmp45
tmp47 = tmp40 | tmp46
tmp48 = tl.where(tmp47, tmp7, tmp15)
tmp49 = tl.where(tmp47, tmp43, tmp44)
tmp50 = tmp48 > tmp24
tmp51 = tmp48 == tmp24
tmp52 = tmp48 != tmp48
tmp53 = tmp24 != tmp24
tmp54 = tmp52 > tmp53
tmp55 = tmp50 | tmp54
tmp56 = tmp52 & tmp53
tmp57 = tmp51 | tmp56
tmp58 = tl.full([1], 2, tl.int64)
tmp59 = tmp49 < tmp58
tmp60 = tmp57 & tmp59
tmp61 = tmp55 | tmp60
tmp62 = tl.where(tmp61, tmp48, tmp24)
tmp63 = tl.where(tmp61, tmp49, tmp58)
tmp64 = tmp62 > tmp33
tmp65 = tmp62 == tmp33
tmp66 = tmp62 != tmp62
tmp67 = tmp33 != tmp33
tmp68 = tmp66 > tmp67
tmp69 = tmp64 | tmp68
tmp70 = tmp66 & tmp67
tmp71 = tmp65 | tmp70
tmp72 = tl.full([1], 3, tl.int64)
tmp73 = tmp63 < tmp72
tmp74 = tmp71 & tmp73
tmp75 = tmp69 | tmp74
tl.where(tmp75, tmp62, tmp33)
tmp77 = tl.where(tmp75, tmp63, tmp72)
tl.store(out_ptr0 + x2, tmp34, xmask)
tl.store(out_ptr1 + x2, tmp77, xmask)
@triton.jit
def triton_poi_fused_add_max_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (8 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (9 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr2 + 1)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp14 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr1 + (10 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr2 + 2)
tmp20 = tl.broadcast_to(tmp19, [XBLOCK])
tmp23 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp27 = tl.load(in_ptr1 + (11 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr2 + 3)
tmp29 = tl.broadcast_to(tmp28, [XBLOCK])
tmp32 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last')
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp7 = tmp5 + tmp6
tmp12 = tmp9 + tmp11
tmp13 = tmp8 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = triton_helpers.maximum(tmp7, tmp15)
tmp21 = tmp18 + tmp20
tmp22 = tmp17 + tmp21
tmp24 = tmp22 + tmp23
tmp25 = triton_helpers.maximum(tmp16, tmp24)
tmp30 = tmp27 + tmp29
tmp31 = tmp26 + tmp30
tmp33 = tmp31 + tmp32
tmp34 = triton_helpers.maximum(tmp25, tmp33)
tmp35 = tmp7 > tmp15
tmp36 = tmp7 == tmp15
tmp37 = tmp7 != tmp7
tmp38 = tmp15 != tmp15
tmp39 = tmp37 > tmp38
tmp40 = tmp35 | tmp39
tmp41 = tmp37 & tmp38
tmp42 = tmp36 | tmp41
tmp43 = tl.full([1], 0, tl.int64)
tmp44 = tl.full([1], 1, tl.int64)
tmp45 = tmp43 < tmp44
tmp46 = tmp42 & tmp45
tmp47 = tmp40 | tmp46
tmp48 = tl.where(tmp47, tmp7, tmp15)
tmp49 = tl.where(tmp47, tmp43, tmp44)
tmp50 = tmp48 > tmp24
tmp51 = tmp48 == tmp24
tmp52 = tmp48 != tmp48
tmp53 = tmp24 != tmp24
tmp54 = tmp52 > tmp53
tmp55 = tmp50 | tmp54
tmp56 = tmp52 & tmp53
tmp57 = tmp51 | tmp56
tmp58 = tl.full([1], 2, tl.int64)
tmp59 = tmp49 < tmp58
tmp60 = tmp57 & tmp59
tmp61 = tmp55 | tmp60
tmp62 = tl.where(tmp61, tmp48, tmp24)
tmp63 = tl.where(tmp61, tmp49, tmp58)
tmp64 = tmp62 > tmp33
tmp65 = tmp62 == tmp33
tmp66 = tmp62 != tmp62
tmp67 = tmp33 != tmp33
tmp68 = tmp66 > tmp67
tmp69 = tmp64 | tmp68
tmp70 = tmp66 & tmp67
tmp71 = tmp65 | tmp70
tmp72 = tl.full([1], 3, tl.int64)
tmp73 = tmp63 < tmp72
tmp74 = tmp71 & tmp73
tmp75 = tmp69 | tmp74
tl.where(tmp75, tmp62, tmp33)
tmp77 = tl.where(tmp75, tmp63, tmp72)
tl.store(out_ptr0 + x2, tmp34, xmask)
tl.store(out_ptr1 + x2, tmp77, xmask)
@triton.jit
def triton_poi_fused_add_gather_max_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, in_ptr5, in_ptr6, out_ptr0, out_ptr1, out_ptr2, out_ptr3,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr3 + 0)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp9 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr2 + 1)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp15 = tl.load(in_ptr3 + 1)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp33 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp34 = tl.load(in_ptr1 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp35 = tl.load(in_ptr2 + 2)
tmp36 = tl.broadcast_to(tmp35, [XBLOCK])
tmp39 = tl.load(in_ptr3 + 2)
tmp40 = tl.broadcast_to(tmp39, [XBLOCK])
tmp56 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp57 = tl.load(in_ptr1 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp58 = tl.load(in_ptr2 + 3)
tmp59 = tl.broadcast_to(tmp58, [XBLOCK])
tmp62 = tl.load(in_ptr3 + 3)
tmp63 = tl.broadcast_to(tmp62, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp8 = tmp5 + tmp7
tmp13 = tmp10 + tmp12
tmp14 = tmp9 + tmp13
tmp17 = tmp14 + tmp16
tmp18 = tmp8 > tmp17
tmp19 = tmp8 == tmp17
tmp20 = tmp8 != tmp8
tmp21 = tmp17 != tmp17
tmp22 = tmp20 > tmp21
tmp23 = tmp18 | tmp22
tmp24 = tmp20 & tmp21
tmp25 = tmp19 | tmp24
tmp26 = tl.full([1], 0, tl.int64)
tmp27 = tl.full([1], 1, tl.int64)
tmp28 = tmp26 < tmp27
tmp29 = tmp25 & tmp28
tmp30 = tmp23 | tmp29
tmp31 = tl.where(tmp30, tmp8, tmp17)
tmp32 = tl.where(tmp30, tmp26, tmp27)
tmp37 = tmp34 + tmp36
tmp38 = tmp33 + tmp37
tmp41 = tmp38 + tmp40
tmp42 = tmp31 > tmp41
tmp43 = tmp31 == tmp41
tmp44 = tmp31 != tmp31
tmp45 = tmp41 != tmp41
tmp46 = tmp44 > tmp45
tmp47 = tmp42 | tmp46
tmp48 = tmp44 & tmp45
tmp49 = tmp43 | tmp48
tmp50 = tl.full([1], 2, tl.int64)
tmp51 = tmp32 < tmp50
tmp52 = tmp49 & tmp51
tmp53 = tmp47 | tmp52
tmp54 = tl.where(tmp53, tmp31, tmp41)
tmp55 = tl.where(tmp53, tmp32, tmp50)
tmp60 = tmp57 + tmp59
tmp61 = tmp56 + tmp60
tmp64 = tmp61 + tmp63
tmp65 = tmp54 > tmp64
tmp66 = tmp54 == tmp64
tmp67 = tmp54 != tmp54
tmp68 = tmp64 != tmp64
tmp69 = tmp67 > tmp68
tmp70 = tmp65 | tmp69
tmp71 = tmp67 & tmp68
tmp72 = tmp66 | tmp71
tmp73 = tl.full([1], 3, tl.int64)
tmp74 = tmp55 < tmp73
tmp75 = tmp72 & tmp74
tmp76 = tmp70 | tmp75
tl.where(tmp76, tmp54, tmp64)
tmp78 = tl.where(tmp76, tmp55, tmp73)
tmp79 = tl.full([XBLOCK], 4, tl.int32)
tmp80 = tmp78 + tmp79
tmp81 = tmp78 < 0
tmp82 = tl.where(tmp81, tmp80, tmp78)
tl.device_assert((0 <= tmp82) & (tmp82 < 4) | ~xmask,
'index out of bounds: 0 <= tmp82 < 4')
tmp84 = tl.load(in_ptr4 + (tmp82 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp85 = tmp84 + tmp79
tmp86 = tmp84 < 0
tmp87 = tl.where(tmp86, tmp85, tmp84)
tl.device_assert((0 <= tmp87) & (tmp87 < 4) | ~xmask,
'index out of bounds: 0 <= tmp87 < 4')
tmp89 = tl.load(in_ptr5 + (tmp87 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp90 = tmp89 + tmp79
tmp91 = tmp89 < 0
tmp92 = tl.where(tmp91, tmp90, tmp89)
tl.device_assert((0 <= tmp92) & (tmp92 < 4) | ~xmask,
'index out of bounds: 0 <= tmp92 < 4')
tmp94 = tl.load(in_ptr6 + (tmp92 + 4 * x0), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + 4 * x0, tmp78, xmask)
tl.store(out_ptr1 + 4 * x0, tmp94, xmask)
tl.store(out_ptr2 + 4 * x0, tmp89, xmask)
tl.store(out_ptr3 + 4 * x0, tmp84, xmask)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4,), (1,))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg3_1, (4,), (1,))
assert_size_stride(arg4_1, (4, 4), (4, 1))
assert_size_stride(arg5_1, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(arg2_1, (16, 4), (4, 1), 0),
reinterpret_tensor(arg0_1, (4, 4), (1, 4), 0), out=buf0)
del arg0_1
del arg2_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
get_raw_stream(0)
triton_poi_fused_add_max_0[grid(16)](buf0, arg1_1, arg3_1, arg4_1,
buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1)
del arg3_1
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
triton_poi_fused_add_max_1[grid(16)](buf1, buf0, arg1_1, arg4_1,
buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf5 = buf1
del buf1
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
triton_poi_fused_add_max_2[grid(16)](buf3, buf0, arg1_1, arg4_1,
buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1)
del arg4_1
del buf3
buf11 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
buf7 = reinterpret_tensor(buf11, (4, 1), (4, 1), 3)
buf8 = reinterpret_tensor(buf11, (4, 1), (4, 1), 0)
buf9 = reinterpret_tensor(buf11, (4, 1), (4, 1), 1)
buf10 = reinterpret_tensor(buf11, (4, 1), (4, 1), 2)
triton_poi_fused_add_gather_max_3[grid(4)](buf5, buf0, arg1_1,
arg5_1, buf6, buf4, buf2, buf7, buf8, buf9, buf10, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del arg1_1
del arg5_1
del buf0
del buf2
del buf4
del buf5
del buf6
return buf11,
class CRF(nn.Module):
"""
Implements Conditional Random Fields that can be trained via
backpropagation.
"""
def __init__(self, num_tags):
super(CRF, self).__init__()
self.num_tags = num_tags
self.transitions = nn.Parameter(torch.Tensor(num_tags, num_tags))
self.start_transitions = nn.Parameter(torch.randn(num_tags))
self.stop_transitions = nn.Parameter(torch.randn(num_tags))
nn.init.xavier_normal_(self.transitions)
def forward(self, feats):
if len(feats.shape) != 3:
raise ValueError('feats must be 3-d got {}-d'.format(feats.shape))
return self._viterbi(feats)
def loss(self, feats, tags):
"""
Computes negative log likelihood between features and tags.
Essentially difference between individual sequence scores and
sum of all possible sequence scores (partition function)
Parameters:
feats: Input features [batch size, sequence length, number of tags]
tags: Target tag indices [batch size, sequence length]. Should be between
0 and num_tags
Returns:
Negative log likelihood [a scalar]
"""
if len(feats.shape) != 3:
raise ValueError('feats must be 3-d got {}-d'.format(feats.shape))
if len(tags.shape) != 2:
raise ValueError('tags must be 2-d but got {}-d'.format(tags.shape)
)
if feats.shape[:2] != tags.shape:
raise ValueError(
'First two dimensions of feats and tags must match')
sequence_score = self._sequence_score(feats, tags)
partition_function = self._partition_function(feats)
log_probability = sequence_score - partition_function
return -log_probability.mean()
def _sequence_score(self, feats, tags):
"""
Parameters:
feats: Input features [batch size, sequence length, number of tags]
tags: Target tag indices [batch size, sequence length]. Should be between
0 and num_tags
Returns: Sequence score of shape [batch size]
"""
feats.shape[0]
feat_score = feats.gather(2, tags.unsqueeze(-1)).squeeze(-1).sum(dim=-1
)
tags_pairs = tags.unfold(1, 2, 1)
indices = tags_pairs.permute(2, 0, 1).chunk(2)
trans_score = self.transitions[indices].squeeze(0).sum(dim=-1)
start_score = self.start_transitions[tags[:, 0]]
stop_score = self.stop_transitions[tags[:, -1]]
return feat_score + start_score + trans_score + stop_score
def _partition_function(self, feats):
"""
Computes the partitition function for CRF using the forward algorithm.
Basically calculate scores for all possible tag sequences for
the given feature vector sequence
Parameters:
feats: Input features [batch size, sequence length, number of tags]
Returns:
Total scores of shape [batch size]
"""
_, seq_size, num_tags = feats.shape
if self.num_tags != num_tags:
raise ValueError('num_tags should be {} but got {}'.format(self
.num_tags, num_tags))
a = feats[:, 0] + self.start_transitions.unsqueeze(0)
transitions = self.transitions.unsqueeze(0)
for i in range(1, seq_size):
feat = feats[:, i].unsqueeze(1)
a = self._log_sum_exp(a.unsqueeze(-1) + transitions + feat, 1)
return self._log_sum_exp(a + self.stop_transitions.unsqueeze(0), 1)
def _viterbi(self, feats):
"""
Uses Viterbi algorithm to predict the best sequence
Parameters:
feats: Input features [batch size, sequence length, number of tags]
Returns: Best tag sequence [batch size, sequence length]
"""
_, seq_size, num_tags = feats.shape
if self.num_tags != num_tags:
raise ValueError('num_tags should be {} but got {}'.format(self
.num_tags, num_tags))
v = feats[:, 0] + self.start_transitions.unsqueeze(0)
transitions = self.transitions.unsqueeze(0)
paths = []
for i in range(1, seq_size):
feat = feats[:, i]
v, idx = (v.unsqueeze(-1) + transitions).max(1)
paths.append(idx)
v = v + feat
v, tag = (v + self.stop_transitions.unsqueeze(0)).max(1, True)
tags = [tag]
for idx in reversed(paths):
tag = idx.gather(1, tag)
tags.append(tag)
tags.reverse()
return torch.cat(tags, 1)
def _log_sum_exp(self, logits, dim):
"""
Computes log-sum-exp in a stable way
"""
max_val, _ = logits.max(dim)
return max_val + (logits - max_val.unsqueeze(dim)).exp().sum(dim).log()
class OutputLayer(nn.Module):
"""
Abstract base class for output layer.
Handles projection to output labels
"""
def __init__(self, hidden_size, output_size):
super(OutputLayer, self).__init__()
self.output_size = output_size
self.output_projection = nn.Linear(hidden_size, output_size)
def loss(self, hidden, labels):
raise NotImplementedError('Must implement {}.loss'.format(self.
__class__.__name__))
class CRFOutputLayerNew(OutputLayer):
"""
Implements a CRF based output layer
"""
def __init__(self, hidden_size, output_size):
super(CRFOutputLayerNew, self).__init__(hidden_size, output_size)
self.crf = CRF(output_size)
def loss(self, hidden, labels):
feats = self.output_projection(hidden)
return self.crf.loss(feats, labels)
def forward(self, input_0):
arg0_1 = self.output_projection.weight
arg1_1 = self.output_projection.bias
arg4_1 = self.crf.transitions
arg3_1 = self.crf.start_transitions
arg5_1 = self.crf.stop_transitions
arg2_1 = input_0
output = call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1])
return output[0]
|
oya163/torchnlp
|
CRFOutputLayer
| false
| 4,118
|
[
"Apache-2.0"
] | 0
|
361caa24d741e47b8bd92af122ae281d6ad72d9d
|
https://github.com/oya163/torchnlp/tree/361caa24d741e47b8bd92af122ae281d6ad72d9d
|
SparseDownSampleClose
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
class SparseDownSampleClose(nn.Module):
def __init__(self, stride):
super(SparseDownSampleClose, self).__init__()
self.pooling = nn.MaxPool2d(stride, stride)
self.large_number = 600
def forward(self, d, mask):
encode_d = -(1 - mask) * self.large_number - d
d = -self.pooling(encode_d)
mask_result = self.pooling(mask)
d_result = d - (1 - mask_result) * self.large_number
return d_result, mask_result
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'stride': 1}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_mul_neg_rsub_sub_0(in_out_ptr0,
in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp6 = tl.load(in_ptr1 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp3 = -tmp2
tmp4 = 600.0
tmp5 = tmp3 * tmp4
tmp7 = tmp5 - tmp6
tmp8 = -tmp7
tmp9 = tmp2 * tmp4
tmp10 = tmp8 - tmp9
tl.store(out_ptr0 + x0, tmp0, xmask)
tl.store(in_out_ptr0 + x0, tmp10, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_mul_neg_rsub_sub_0[grid(256)](
buf2, arg0_1, arg1_1, buf1, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del arg0_1
del arg1_1
return buf2, buf1
class SparseDownSampleCloseNew(nn.Module):
def __init__(self, stride):
super(SparseDownSampleCloseNew, self).__init__()
self.pooling = nn.MaxPool2d(stride, stride)
self.large_number = 600
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0], output[1]
|
phatli/PENet_ICRA2021
|
SparseDownSampleClose
| false
| 4,119
|
[
"MIT"
] | 0
|
18594b8f11d4d99022d9c80a86a6e2d4e854404a
|
https://github.com/phatli/PENet_ICRA2021/tree/18594b8f11d4d99022d9c80a86a6e2d4e854404a
|
Allocation
|
from torch.nn import Module
import torch
from torch.nn import functional as F
from torch.nn import Linear
class Allocation(Module):
"""Determines allocation probability for each of the bidders given an input.
Args:
in_features: size of each input sample
bidders: number of bidders, which governs the size of each output sample
Shape:
- Input: :math:`(N, *, H_{in})` where :math:`*` means any number of
additional dimensions and :math:`H_{in} = \\text{in\\_features}`
- Output: :math:`(N, *, H_{out})` where all but the last dimension
are the same shape as the input and :math:`H_{out} = \\text{bidders}`.
Examples::
>>> m = Allocation(20, 30)
>>> input = torch.randn(128, 20)
>>> allocation = m(input)
>>> print(allocation.size())
torch.Size([128, 30])
"""
__constants__ = ['in_features', 'bidders']
def __init__(self, in_features, bidders):
super(Allocation, self).__init__()
self.in_features = in_features
self.bidders = bidders
self.linear = Linear(in_features, bidders + 1)
def forward(self, x):
return F.softmax(self.linear(x), dim=1)[:, 0:self.bidders]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'bidders': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch.nn import Module
from torch.nn import Linear
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 20
x2 = xindex // 80
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 80 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (20 + x0 + 80 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (40 + x0 + 80 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (60 + x0 + 80 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 20
x2 = xindex // 80
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 80 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (20 + x0 + 80 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (40 + x0 + 80 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (60 + x0 + 80 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (5, 4), (4, 1))
assert_size_stride(primals_2, (5,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 5), (5, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 5), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(320)](buf0, buf1, 320, XBLOCK=256,
num_warps=4, num_stages=1)
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 5), (80, 20, 5, 1), 0)
del buf0
triton_poi_fused__softmax_1[grid(320)](buf1, buf2, 320, XBLOCK=128,
num_warps=4, num_stages=1)
del buf1
return buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2
class AllocationNew(Module):
"""Determines allocation probability for each of the bidders given an input.
Args:
in_features: size of each input sample
bidders: number of bidders, which governs the size of each output sample
Shape:
- Input: :math:`(N, *, H_{in})` where :math:`*` means any number of
additional dimensions and :math:`H_{in} = \\text{in\\_features}`
- Output: :math:`(N, *, H_{out})` where all but the last dimension
are the same shape as the input and :math:`H_{out} = \\text{bidders}`.
Examples::
>>> m = Allocation(20, 30)
>>> input = torch.randn(128, 20)
>>> allocation = m(input)
>>> print(allocation.size())
torch.Size([128, 30])
"""
__constants__ = ['in_features', 'bidders']
def __init__(self, in_features, bidders):
super(AllocationNew, self).__init__()
self.in_features = in_features
self.bidders = bidders
self.linear = Linear(in_features, bidders + 1)
def forward(self, input_0):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
pjordan/dmch
|
Allocation
| false
| 4,120
|
[
"Apache-2.0"
] | 0
|
84e04ddb0679007b15acfdc275e0e3f51e50d9f2
|
https://github.com/pjordan/dmch/tree/84e04ddb0679007b15acfdc275e0e3f51e50d9f2
|
MinLossModule
|
import torch
import torch.nn.functional as F
class MinLossModule(torch.nn.Module):
def __init__(self):
super(MinLossModule, self).__init__()
def forward(self, predictions, targets):
y_losses = F.cross_entropy(predictions, targets, reduction='none')
y_losses = torch.sum(y_losses, dim=[1, 2])
Y_loss = torch.min(y_losses)
return Y_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_per_fused__log_softmax_mul_neg_sum_1(in_ptr0, in_ptr1, out_ptr0,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr0 + (16 + r1 + 64 * x0), xmask, other=0.0)
tmp5 = tl.load(in_ptr0 + (32 + r1 + 64 * x0), xmask, other=0.0)
tmp8 = tl.load(in_ptr0 + (48 + r1 + 64 * x0), xmask, other=0.0)
tmp13 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0)
tmp16 = tl.load(in_ptr1 + (16 + r1 + 64 * x0), xmask, other=0.0)
tmp20 = tl.load(in_ptr1 + (32 + r1 + 64 * x0), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + (48 + r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl_math.exp(tmp0)
tmp3 = tl_math.exp(tmp2)
tmp4 = tmp1 + tmp3
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp4 + tmp6
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tl_math.log(tmp10)
tmp12 = tmp0 - tmp11
tmp14 = tmp12 * tmp13
tmp15 = tmp2 - tmp11
tmp17 = tmp15 * tmp16
tmp18 = tmp14 + tmp17
tmp19 = tmp5 - tmp11
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp8 - tmp11
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = -tmp26
tmp28 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK])
tmp30 = tl.where(xmask, tmp28, 0)
tmp31 = tl.sum(tmp30, 1)[:, None]
tl.store(out_ptr0 + x0, tmp31, xmask)
@triton.jit
def triton_per_fused_min_2(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = triton_helpers.min2(tmp1, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp3, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg1_1
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
triton_per_fused__log_softmax_mul_neg_sum_1[grid(4)](buf0, arg0_1,
buf1, 4, 16, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del buf0
buf2 = empty_strided_cuda((), (), torch.float32)
triton_per_fused_min_2[grid(1)](buf1, buf2, 1, 4, XBLOCK=1,
num_warps=2, num_stages=1)
del buf1
return buf2,
class MinLossModuleNew(torch.nn.Module):
def __init__(self):
super(MinLossModuleNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
pkalluri/specialized-conditional-pcnn
|
MinLossModule
| false
| 4,121
|
[
"Apache-2.0"
] | 0
|
ed94e47654ed749a7dd3492c4e074e2a8fb12df8
|
https://github.com/pkalluri/specialized-conditional-pcnn/tree/ed94e47654ed749a7dd3492c4e074e2a8fb12df8
|
SequentialAllocation
|
from torch.nn import Module
import torch
from torch.nn import functional as F
from torch.nn import Linear
def _sequential_allocation(p, weights):
_, slots, bidders_plus_one = p.shape
bidders = bidders_plus_one - 1
cumulative_total = p[:, 0, :bidders]
if weights is None:
alloc = cumulative_total
else:
alloc = cumulative_total * weights[0]
for k in range(1, slots):
slot_total = (1 - cumulative_total) * p[:, k, :bidders] * (1 - p[:,
k - 1, [bidders for _ in range(bidders)]])
if weights is None:
alloc = alloc + slot_total
else:
alloc = alloc + slot_total * weights[k]
cumulative_total = cumulative_total + slot_total
return alloc
class SequentialAllocation(Module):
__constants__ = ['in_features', 'bidders', 'slots', 'weights']
def __init__(self, in_features, slots, bidders, weights=None):
super(SequentialAllocation, self).__init__()
self.in_features = in_features
self.slots = slots
self.bidders = bidders
self.weights = weights
self.linear = Linear(in_features, slots * (bidders + 1))
def forward(self, x):
probs = F.softmax(self.linear(x).reshape(-1, self.slots, self.
bidders + 1), dim=2)
return _sequential_allocation(probs, weights=self.weights)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'slots': 4, 'bidders': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch.nn import Module
from torch.nn import Linear
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 5 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 5 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 5 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 5 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (4 + 5 * x0), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp0 - tmp8
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp1 - tmp8
tmp12 = tl_math.exp(tmp11)
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tl_math.exp(tmp14)
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp16 + tmp18
tmp20 = tmp7 - tmp8
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp22, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 1280
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 5
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = tl_math.exp(tmp2)
tmp5 = tmp3 / tmp4
tl.store(in_out_ptr0 + x2, tmp5, xmask)
@triton.jit
def triton_poi_fused_add_index_mul_rsub_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 20 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (5 + x0 + 20 * x1), xmask)
tmp21 = tl.load(in_ptr0 + (10 + x0 + 20 * x1), xmask)
tmp28 = tl.load(in_ptr0 + (15 + x0 + 20 * x1), xmask)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = x0
tmp6 = tl.full([1], 2, tl.int64)
tmp7 = tmp5 < tmp6
tmp8 = tl.full([1], 1, tl.int64)
tmp9 = tmp5 < tmp8
tmp10 = tl.full([1], 4, tl.int64)
tmp11 = tl.where(tmp9, tmp10, tmp10)
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = tmp5 < tmp12
tmp14 = tl.where(tmp13, tmp10, tmp10)
tmp15 = tl.where(tmp7, tmp11, tmp14)
tmp16 = tl.load(in_ptr0 + (tmp15 + 20 * x1), xmask, eviction_policy=
'evict_last')
tmp17 = tmp1 - tmp16
tmp18 = tmp4 * tmp17
tmp19 = tmp0 + tmp18
tmp20 = tmp1 - tmp19
tmp22 = tmp20 * tmp21
tmp23 = tl.load(in_ptr0 + (5 + tmp15 + 20 * x1), xmask, eviction_policy
='evict_last')
tmp24 = tmp1 - tmp23
tmp25 = tmp22 * tmp24
tmp26 = tmp19 + tmp25
tmp27 = tmp1 - tmp26
tmp29 = tmp27 * tmp28
tmp30 = tl.load(in_ptr0 + (10 + tmp15 + 20 * x1), xmask,
eviction_policy='evict_last')
tmp31 = tmp1 - tmp30
tmp32 = tmp29 * tmp31
tmp33 = tmp26 + tmp32
tl.store(in_out_ptr0 + x2, tmp33, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (20, 4), (4, 1))
assert_size_stride(primals_2, (20,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 20), (20, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 20), (1, 4),
0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4, 1), (4, 1, 256), torch.float32)
buf2 = empty_strided_cuda((64, 4, 1), (4, 1, 256), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](buf0, buf1, buf2, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf0, (64, 4, 5), (20, 5, 1), 0)
del buf0
triton_poi_fused__softmax_1[grid(1280)](buf3, buf1, buf2, 1280,
XBLOCK=256, num_warps=4, num_stages=1)
del buf1
buf4 = reinterpret_tensor(buf2, (64, 4), (4, 1), 0)
del buf2
buf5 = buf4
del buf4
triton_poi_fused_add_index_mul_rsub_2[grid(256)](buf5, buf3, 256,
XBLOCK=128, num_warps=4, num_stages=1)
return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf3
def _sequential_allocation(p, weights):
_, slots, bidders_plus_one = p.shape
bidders = bidders_plus_one - 1
cumulative_total = p[:, 0, :bidders]
if weights is None:
alloc = cumulative_total
else:
alloc = cumulative_total * weights[0]
for k in range(1, slots):
slot_total = (1 - cumulative_total) * p[:, k, :bidders] * (1 - p[:,
k - 1, [bidders for _ in range(bidders)]])
if weights is None:
alloc = alloc + slot_total
else:
alloc = alloc + slot_total * weights[k]
cumulative_total = cumulative_total + slot_total
return alloc
class SequentialAllocationNew(Module):
__constants__ = ['in_features', 'bidders', 'slots', 'weights']
def __init__(self, in_features, slots, bidders, weights=None):
super(SequentialAllocationNew, self).__init__()
self.in_features = in_features
self.slots = slots
self.bidders = bidders
self.weights = weights
self.linear = Linear(in_features, slots * (bidders + 1))
def forward(self, input_0):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
pjordan/dmch
|
SequentialAllocation
| false
| 4,122
|
[
"Apache-2.0"
] | 0
|
84e04ddb0679007b15acfdc275e0e3f51e50d9f2
|
https://github.com/pjordan/dmch/tree/84e04ddb0679007b15acfdc275e0e3f51e50d9f2
|
TextureSegmentation
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class TextureSegmentation(nn.Module):
def __init__(self):
super(TextureSegmentation, self).__init__()
self.decoder_conv1 = nn.ConvTranspose2d(16, 32, kernel_size=(8, 16),
stride=2, padding=(3, 7))
self.decoder_conv1.bias.data.zero_()
self.decoder_conv1.weight.data[:, :, :, :] = 1 / (8 * 8 * 8
) + torch.normal(mean=torch.tensor(0.0), std=torch.tensor(0.0001))
self.decoder_normalization1 = nn.GroupNorm(1, 32, eps=1e-05, affine
=True)
self.decoder_conv2 = nn.ConvTranspose2d(32, 16, kernel_size=(8, 16),
stride=2, padding=(3, 7), output_padding=0, groups=1, bias=True,
dilation=1)
self.decoder_conv2.bias.data.zero_()
self.decoder_conv2.weight.data[:, :, :, :] = 1 / (8 * 8 * 8
) + torch.normal(mean=torch.tensor(0.0), std=torch.tensor(0.0001))
self.decoder_normalization2 = nn.GroupNorm(1, 16, eps=1e-05, affine
=True)
self.decoder_conv3 = nn.ConvTranspose2d(16, 8, kernel_size=(8, 16),
stride=2, padding=(3, 7), output_padding=0, groups=1, bias=True,
dilation=1)
self.decoder_conv3.bias.data.zero_()
self.decoder_conv3.weight.data[:, :, :, :] = 1 / (4 * 8 * 8
) + torch.normal(mean=torch.tensor(0.0), std=torch.tensor(0.001))
self.decoder_normalization3 = nn.GroupNorm(1, 8, eps=1e-05, affine=True
)
self.decoder_conv5 = nn.ConvTranspose2d(8, 1, kernel_size=(8, 16),
stride=2, padding=(3, 7), output_padding=0, groups=1, bias=True,
dilation=1)
self.decoder_conv5.bias.data[:] = -(0.5 / 0.24)
self.decoder_conv5.weight.data[:, :, :, :] = 1 / (8 * 8 * 8 * 0.24
) + torch.normal(mean=torch.tensor(0.0), std=torch.tensor(0.001))
def forward(self, sample):
embeddings_dec1 = F.relu(self.decoder_conv1(sample, output_size=
torch.empty(sample.size()[0], 32, sample.size()[2] * 2, sample.
size()[3] * 2).size()))
embeddings_dec1 = self.decoder_normalization1(embeddings_dec1)
embeddings_dec2 = F.relu(self.decoder_conv2(embeddings_dec1,
output_size=torch.empty(embeddings_dec1.size()[0], 16,
embeddings_dec1.size()[2] * 2, embeddings_dec1.size()[3] * 2).
size()))
embeddings_dec2 = self.decoder_normalization2(embeddings_dec2)
embeddings_dec3 = F.relu(self.decoder_conv3(embeddings_dec2,
output_size=torch.empty(embeddings_dec2.size()[0], 8,
embeddings_dec2.size()[2] * 2, embeddings_dec2.size()[3] * 2).
size()))
embeddings_dec3 = self.decoder_normalization3(embeddings_dec3)
segment = F.sigmoid(self.decoder_conv5(embeddings_dec3, output_size
=torch.empty(embeddings_dec3.size()[0], 1, embeddings_dec3.size
()[2] * 2, embeddings_dec3.size()[3] * 2).size()))
return segment
def get_inputs():
return [torch.rand([4, 16, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_red_fused_convolution_native_group_norm_0(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK:
tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 4
rnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp6_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp6_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp6_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r3 = rindex
r2 = rindex // 64
tmp0 = tl.load(in_out_ptr0 + (r3 + 2048 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp1 = tl.load(in_ptr0 + r2, rmask, eviction_policy='evict_last',
other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp6_mean_next, tmp6_m2_next, tmp6_weight_next = (triton_helpers.
welford_reduce(tmp5, tmp6_mean, tmp6_m2, tmp6_weight, roffset == 0)
)
tmp6_mean = tl.where(rmask & xmask, tmp6_mean_next, tmp6_mean)
tmp6_m2 = tl.where(rmask & xmask, tmp6_m2_next, tmp6_m2)
tmp6_weight = tl.where(rmask & xmask, tmp6_weight_next, tmp6_weight)
tl.store(in_out_ptr0 + (r3 + 2048 * x0), tmp2, rmask & xmask)
tmp6_tmp, tmp7_tmp, tmp8_tmp = triton_helpers.welford(tmp6_mean,
tmp6_m2, tmp6_weight, 1)
tmp6 = tmp6_tmp[:, None]
tmp7 = tmp7_tmp[:, None]
tmp8_tmp[:, None]
tl.store(out_ptr0 + x0, tmp6, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r3 = rindex
r2 = rindex // 64
tmp9 = tl.load(in_out_ptr0 + (r3 + 2048 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp19 = tl.load(in_ptr1 + r2, rmask, eviction_policy='evict_last',
other=0.0)
tmp21 = tl.load(in_ptr2 + r2, rmask, eviction_policy='evict_last',
other=0.0)
tmp10 = tl.full([1, 1], 0, tl.int32)
tmp11 = triton_helpers.maximum(tmp10, tmp9)
tmp12 = tmp11 - tmp6
tmp13 = 2048.0
tmp14 = tmp7 / tmp13
tmp15 = 1e-05
tmp16 = tmp14 + tmp15
tmp17 = libdevice.rsqrt(tmp16)
tmp18 = tmp12 * tmp17
tmp20 = tmp18 * tmp19
tmp22 = tmp20 + tmp21
tl.store(out_ptr2 + (r3 + 2048 * x0), tmp22, rmask & xmask)
tmp23 = 2048.0
tmp24 = tmp7 / tmp23
tmp25 = 1e-05
tmp26 = tmp24 + tmp25
tmp27 = libdevice.rsqrt(tmp26)
tl.store(out_ptr3 + x0, tmp27, xmask)
@triton.jit
def triton_red_fused_convolution_native_group_norm_1(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK:
tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 4
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp6_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp6_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp6_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r3 = rindex
r2 = rindex // 256
tmp0 = tl.load(in_out_ptr0 + (r3 + 4096 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp1 = tl.load(in_ptr0 + r2, rmask, eviction_policy='evict_last',
other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp6_mean_next, tmp6_m2_next, tmp6_weight_next = (triton_helpers.
welford_reduce(tmp5, tmp6_mean, tmp6_m2, tmp6_weight, roffset == 0)
)
tmp6_mean = tl.where(rmask & xmask, tmp6_mean_next, tmp6_mean)
tmp6_m2 = tl.where(rmask & xmask, tmp6_m2_next, tmp6_m2)
tmp6_weight = tl.where(rmask & xmask, tmp6_weight_next, tmp6_weight)
tl.store(in_out_ptr0 + (r3 + 4096 * x0), tmp2, rmask & xmask)
tmp6_tmp, tmp7_tmp, tmp8_tmp = triton_helpers.welford(tmp6_mean,
tmp6_m2, tmp6_weight, 1)
tmp6 = tmp6_tmp[:, None]
tmp7 = tmp7_tmp[:, None]
tmp8_tmp[:, None]
tl.store(out_ptr0 + x0, tmp6, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r3 = rindex
r2 = rindex // 256
tmp9 = tl.load(in_out_ptr0 + (r3 + 4096 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp19 = tl.load(in_ptr1 + r2, rmask, eviction_policy='evict_last',
other=0.0)
tmp21 = tl.load(in_ptr2 + r2, rmask, eviction_policy='evict_last',
other=0.0)
tmp10 = tl.full([1, 1], 0, tl.int32)
tmp11 = triton_helpers.maximum(tmp10, tmp9)
tmp12 = tmp11 - tmp6
tmp13 = 4096.0
tmp14 = tmp7 / tmp13
tmp15 = 1e-05
tmp16 = tmp14 + tmp15
tmp17 = libdevice.rsqrt(tmp16)
tmp18 = tmp12 * tmp17
tmp20 = tmp18 * tmp19
tmp22 = tmp20 + tmp21
tl.store(out_ptr2 + (r3 + 4096 * x0), tmp22, rmask & xmask)
tmp23 = 4096.0
tmp24 = tmp7 / tmp23
tmp25 = 1e-05
tmp26 = tmp24 + tmp25
tmp27 = libdevice.rsqrt(tmp26)
tl.store(out_ptr3 + x0, tmp27, xmask)
@triton.jit
def triton_red_fused_convolution_native_group_norm_2(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK:
tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 4
rnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp6_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp6_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp6_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r3 = rindex
r2 = rindex // 1024
tmp0 = tl.load(in_out_ptr0 + (r3 + 8192 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp1 = tl.load(in_ptr0 + r2, rmask, eviction_policy='evict_last',
other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp6_mean_next, tmp6_m2_next, tmp6_weight_next = (triton_helpers.
welford_reduce(tmp5, tmp6_mean, tmp6_m2, tmp6_weight, roffset == 0)
)
tmp6_mean = tl.where(rmask & xmask, tmp6_mean_next, tmp6_mean)
tmp6_m2 = tl.where(rmask & xmask, tmp6_m2_next, tmp6_m2)
tmp6_weight = tl.where(rmask & xmask, tmp6_weight_next, tmp6_weight)
tl.store(in_out_ptr0 + (r3 + 8192 * x0), tmp2, rmask & xmask)
tmp6_tmp, tmp7_tmp, tmp8_tmp = triton_helpers.welford(tmp6_mean,
tmp6_m2, tmp6_weight, 1)
tmp6 = tmp6_tmp[:, None]
tmp7 = tmp7_tmp[:, None]
tmp8_tmp[:, None]
tl.store(out_ptr0 + x0, tmp6, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r3 = rindex
r2 = rindex // 1024
tmp9 = tl.load(in_out_ptr0 + (r3 + 8192 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp19 = tl.load(in_ptr1 + r2, rmask, eviction_policy='evict_last',
other=0.0)
tmp21 = tl.load(in_ptr2 + r2, rmask, eviction_policy='evict_last',
other=0.0)
tmp10 = tl.full([1, 1], 0, tl.int32)
tmp11 = triton_helpers.maximum(tmp10, tmp9)
tmp12 = tmp11 - tmp6
tmp13 = 8192.0
tmp14 = tmp7 / tmp13
tmp15 = 1e-05
tmp16 = tmp14 + tmp15
tmp17 = libdevice.rsqrt(tmp16)
tmp18 = tmp12 * tmp17
tmp20 = tmp18 * tmp19
tmp22 = tmp20 + tmp21
tl.store(out_ptr2 + (r3 + 8192 * x0), tmp22, rmask & xmask)
tmp23 = 8192.0
tmp24 = tmp7 / tmp23
tmp25 = 1e-05
tmp26 = tmp24 + tmp25
tmp27 = libdevice.rsqrt(tmp26)
tl.store(out_ptr3 + x0, tmp27, xmask)
@triton.jit
def triton_poi_fused_convolution_sigmoid_3(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15) = args
args.clear()
assert_size_stride(primals_1, (4, 16, 4, 4), (256, 16, 4, 1))
assert_size_stride(primals_2, (16, 32, 8, 16), (4096, 128, 16, 1))
assert_size_stride(primals_3, (32,), (1,))
assert_size_stride(primals_4, (32,), (1,))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (32, 16, 8, 16), (2048, 128, 16, 1))
assert_size_stride(primals_7, (16,), (1,))
assert_size_stride(primals_8, (16,), (1,))
assert_size_stride(primals_9, (16,), (1,))
assert_size_stride(primals_10, (16, 8, 8, 16), (1024, 128, 16, 1))
assert_size_stride(primals_11, (8,), (1,))
assert_size_stride(primals_12, (8,), (1,))
assert_size_stride(primals_13, (8,), (1,))
assert_size_stride(primals_14, (8, 1, 8, 16), (128, 128, 16, 1))
assert_size_stride(primals_15, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(2,
2), padding=(3, 7), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 8, 8), (2048, 64, 8, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf5 = empty_strided_cuda((4, 32, 8, 8), (2048, 64, 8, 1), torch.
float32)
buf6 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
get_raw_stream(0)
triton_red_fused_convolution_native_group_norm_0[grid(4)](buf1,
primals_3, primals_4, primals_5, buf2, buf5, buf6, 4, 2048,
XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1)
del primals_3
del primals_5
buf7 = extern_kernels.convolution(buf5, primals_6, stride=(2, 2),
padding=(3, 7), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 16, 16, 16), (4096, 256, 16, 1))
buf8 = buf7
del buf7
buf9 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf12 = empty_strided_cuda((4, 16, 16, 16), (4096, 256, 16, 1),
torch.float32)
buf13 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
triton_red_fused_convolution_native_group_norm_1[grid(4)](buf8,
primals_7, primals_8, primals_9, buf9, buf12, buf13, 4, 4096,
XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1)
del primals_7
del primals_9
buf14 = extern_kernels.convolution(buf12, primals_10, stride=(2, 2),
padding=(3, 7), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 8, 32, 32), (8192, 1024, 32, 1))
buf15 = buf14
del buf14
buf16 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf19 = empty_strided_cuda((4, 8, 32, 32), (8192, 1024, 32, 1),
torch.float32)
buf20 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
triton_red_fused_convolution_native_group_norm_2[grid(4)](buf15,
primals_11, primals_12, primals_13, buf16, buf19, buf20, 4,
8192, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1)
del primals_11
del primals_13
buf21 = extern_kernels.convolution(buf19, primals_14, stride=(2, 2),
padding=(3, 7), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf21, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf22 = buf21
del buf21
triton_poi_fused_convolution_sigmoid_3[grid(16384)](buf22,
primals_15, 16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_15
return (buf22, primals_1, primals_2, primals_4, primals_6, primals_8,
primals_10, primals_12, primals_14, buf1, buf5, reinterpret_tensor(
buf2, (4, 1), (1, 1), 0), reinterpret_tensor(buf6, (4, 1), (1, 1),
0), buf8, buf12, reinterpret_tensor(buf9, (4, 1), (1, 1), 0),
reinterpret_tensor(buf13, (4, 1), (1, 1), 0), buf15, buf19,
reinterpret_tensor(buf16, (4, 1), (1, 1), 0), reinterpret_tensor(
buf20, (4, 1), (1, 1), 0), buf22)
class TextureSegmentationNew(nn.Module):
def __init__(self):
super(TextureSegmentationNew, self).__init__()
self.decoder_conv1 = nn.ConvTranspose2d(16, 32, kernel_size=(8, 16),
stride=2, padding=(3, 7))
self.decoder_conv1.bias.data.zero_()
self.decoder_conv1.weight.data[:, :, :, :] = 1 / (8 * 8 * 8
) + torch.normal(mean=torch.tensor(0.0), std=torch.tensor(0.0001))
self.decoder_normalization1 = nn.GroupNorm(1, 32, eps=1e-05, affine
=True)
self.decoder_conv2 = nn.ConvTranspose2d(32, 16, kernel_size=(8, 16),
stride=2, padding=(3, 7), output_padding=0, groups=1, bias=True,
dilation=1)
self.decoder_conv2.bias.data.zero_()
self.decoder_conv2.weight.data[:, :, :, :] = 1 / (8 * 8 * 8
) + torch.normal(mean=torch.tensor(0.0), std=torch.tensor(0.0001))
self.decoder_normalization2 = nn.GroupNorm(1, 16, eps=1e-05, affine
=True)
self.decoder_conv3 = nn.ConvTranspose2d(16, 8, kernel_size=(8, 16),
stride=2, padding=(3, 7), output_padding=0, groups=1, bias=True,
dilation=1)
self.decoder_conv3.bias.data.zero_()
self.decoder_conv3.weight.data[:, :, :, :] = 1 / (4 * 8 * 8
) + torch.normal(mean=torch.tensor(0.0), std=torch.tensor(0.001))
self.decoder_normalization3 = nn.GroupNorm(1, 8, eps=1e-05, affine=True
)
self.decoder_conv5 = nn.ConvTranspose2d(8, 1, kernel_size=(8, 16),
stride=2, padding=(3, 7), output_padding=0, groups=1, bias=True,
dilation=1)
self.decoder_conv5.bias.data[:] = -(0.5 / 0.24)
self.decoder_conv5.weight.data[:, :, :, :] = 1 / (8 * 8 * 8 * 0.24
) + torch.normal(mean=torch.tensor(0.0), std=torch.tensor(0.001))
def forward(self, input_0):
primals_2 = self.decoder_conv1.weight
primals_3 = self.decoder_conv1.bias
primals_4 = self.decoder_normalization1.weight
primals_5 = self.decoder_normalization1.bias
primals_6 = self.decoder_conv2.weight
primals_7 = self.decoder_conv2.bias
primals_8 = self.decoder_normalization2.weight
primals_9 = self.decoder_normalization2.bias
primals_10 = self.decoder_conv3.weight
primals_11 = self.decoder_conv3.bias
primals_12 = self.decoder_normalization3.weight
primals_13 = self.decoder_normalization3.bias
primals_14 = self.decoder_conv5.weight
primals_15 = self.decoder_conv5.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15])
return output[0]
|
paucarre/staal
|
TextureSegmentation
| false
| 4,123
|
[
"MIT"
] | 0
|
1635e514f0ed978a08c078afd258980bcb6f0cec
|
https://github.com/paucarre/staal/tree/1635e514f0ed978a08c078afd258980bcb6f0cec
|
GeometryFeature
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
class GeometryFeature(nn.Module):
def __init__(self):
super(GeometryFeature, self).__init__()
def forward(self, z, vnorm, unorm, h, w, ch, cw, fh, fw):
x = z * (0.5 * h * (vnorm + 1) - ch) / fh
y = z * (0.5 * w * (unorm + 1) - cw) / fw
return torch.cat((x, y, z), 1)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]),
torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4,
4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 12
x0 = xindex % 16
x2 = xindex // 192
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tl.load(in_ptr1 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp7 = 0.5
tmp8 = tmp6 * tmp7
tmp9 = tl.load(in_ptr2 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp10 = 1.0
tmp11 = tmp9 + tmp10
tmp12 = tmp8 * tmp11
tmp13 = tl.load(in_ptr3 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0
)
tmp14 = tmp12 - tmp13
tmp15 = tmp5 * tmp14
tmp16 = tl.load(in_ptr4 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0
)
tmp17 = tmp15 / tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp4, tmp17, tmp18)
tmp20 = tmp0 >= tmp3
tmp21 = tl.full([1], 8, tl.int64)
tmp22 = tmp0 < tmp21
tmp23 = tmp20 & tmp22
tmp24 = tl.load(in_ptr0 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp23 &
xmask, other=0.0)
tmp25 = tl.load(in_ptr5 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp23 &
xmask, other=0.0)
tmp26 = tmp25 * tmp7
tmp27 = tl.load(in_ptr6 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp23 &
xmask, other=0.0)
tmp28 = tmp27 + tmp10
tmp29 = tmp26 * tmp28
tmp30 = tl.load(in_ptr7 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp23 &
xmask, other=0.0)
tmp31 = tmp29 - tmp30
tmp32 = tmp24 * tmp31
tmp33 = tl.load(in_ptr8 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp23 &
xmask, other=0.0)
tmp34 = tmp32 / tmp33
tmp35 = tl.full(tmp34.shape, 0.0, tmp34.dtype)
tmp36 = tl.where(tmp23, tmp34, tmp35)
tmp37 = tmp0 >= tmp21
tl.full([1], 12, tl.int64)
tmp40 = tl.load(in_ptr0 + (x0 + 16 * (-8 + x1) + 64 * x2), tmp37 &
xmask, other=0.0)
tmp41 = tl.where(tmp23, tmp36, tmp40)
tmp42 = tl.where(tmp4, tmp19, tmp41)
tl.store(out_ptr0 + x3, tmp42, xmask)
def call(args):
(arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1, arg6_1, arg7_1, arg8_1
) = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg4_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg5_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg6_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg7_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg8_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.float32
)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(768)](arg3_1, arg0_1, arg1_1, arg2_1,
arg4_1, arg5_1, arg6_1, arg7_1, arg8_1, buf0, 768, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
del arg4_1
del arg5_1
del arg6_1
del arg7_1
del arg8_1
return buf0,
class GeometryFeatureNew(nn.Module):
def __init__(self):
super(GeometryFeatureNew, self).__init__()
def forward(self, input_0, input_1, input_2, input_3, input_4, input_5,
input_6, input_7, input_8):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
arg4_1 = input_4
arg5_1 = input_5
arg6_1 = input_6
arg7_1 = input_7
arg8_1 = input_8
output = call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1,
arg6_1, arg7_1, arg8_1])
return output[0]
|
phatli/PENet_ICRA2021
|
GeometryFeature
| false
| 4,124
|
[
"MIT"
] | 0
|
18594b8f11d4d99022d9c80a86a6e2d4e854404a
|
https://github.com/phatli/PENet_ICRA2021/tree/18594b8f11d4d99022d9c80a86a6e2d4e854404a
|
_VariableWeightsAndBiases
|
import torch
import torch.nn as nn
class _VariableWeightsAndBiases(nn.Module):
def __init__(self, in_features, hidden_features, out_features):
super(_VariableWeightsAndBiases, self).__init__()
self.linear = nn.Linear(in_features, hidden_features)
self.weights = nn.Linear(hidden_features, out_features)
self.biases = nn.Linear(hidden_features, out_features)
def forward(self, x):
x = torch.sigmoid(self.linear(x))
return self.weights(x), self.biases(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'hidden_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_sigmoid_0[grid(256)](buf1, primals_2, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_7
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, primals_6, primals_4
class _VariableWeightsAndBiasesNew(nn.Module):
def __init__(self, in_features, hidden_features, out_features):
super(_VariableWeightsAndBiasesNew, self).__init__()
self.linear = nn.Linear(in_features, hidden_features)
self.weights = nn.Linear(hidden_features, out_features)
self.biases = nn.Linear(hidden_features, out_features)
def forward(self, input_0):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_4 = self.weights.weight
primals_5 = self.weights.bias
primals_6 = self.biases.weight
primals_7 = self.biases.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0], output[1]
|
pjordan/dmch
|
_VariableWeightsAndBiases
| false
| 4,125
|
[
"Apache-2.0"
] | 0
|
84e04ddb0679007b15acfdc275e0e3f51e50d9f2
|
https://github.com/pjordan/dmch/tree/84e04ddb0679007b15acfdc275e0e3f51e50d9f2
|
Prototypes
|
import torch
import torch.nn as nn
from torch.nn import functional as F
class Prototypes(nn.Module):
def __init__(self, fdim, num_classes, temp=0.05):
super().__init__()
self.prototypes = nn.Linear(fdim, num_classes, bias=False)
self.temp = temp
def forward(self, x):
x = F.normalize(x, p=2, dim=1)
out = self.prototypes(x)
out = out / self.temp
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'fdim': 4, 'num_classes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x3, tmp15, xmask)
@triton.jit
def triton_poi_fused_div_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(256)](primals_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
triton_poi_fused_div_1[grid(256)](buf2, 256, XBLOCK=256, num_warps=
4, num_stages=1)
return buf2, reinterpret_tensor(buf0, (64, 4), (4, 1), 0)
class PrototypesNew(nn.Module):
def __init__(self, fdim, num_classes, temp=0.05):
super().__init__()
self.prototypes = nn.Linear(fdim, num_classes, bias=False)
self.temp = temp
def forward(self, input_0):
primals_2 = self.prototypes.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
pmirallesr/Dassl.pytorch
|
Prototypes
| false
| 4,126
|
[
"MIT"
] | 0
|
ec41f816bb60a9af94c9b055c500f0e2e404cfc6
|
https://github.com/pmirallesr/Dassl.pytorch/tree/ec41f816bb60a9af94c9b055c500f0e2e404cfc6
|
Value
|
import torch
import torch.nn as nn
class Value(nn.Module):
def __init__(self, num_inputs):
super(Value, self).__init__()
self.affine1 = nn.Linear(num_inputs, 64)
self.affine2 = nn.Linear(64, 64)
self.value_head = nn.Linear(64, 1)
self.value_head.weight.data.mul_(0.1)
self.value_head.bias.data.mul_(0.0)
self.device = torch.device('cuda:0' if torch.cuda.is_available() else
'cpu')
self
def forward(self, x):
x = torch.tanh(self.affine1(x))
x = torch.tanh(self.affine2(x))
state_values = self.value_head(x)
return state_values
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_inputs': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (64, 4), (4, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (64, 64), (64, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (1, 64), (64, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(4096)](buf1, primals_2, 4096, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0),
reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf2
triton_poi_fused_tanh_0[grid(4096)](buf3, primals_5, 4096, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64),
(64, 1), 0), reinterpret_tensor(primals_6, (64, 1), (1, 64), 0),
alpha=1, beta=1, out=buf5)
del primals_7
return reinterpret_tensor(buf5, (4, 4, 4, 1), (16, 4, 1, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, buf3, primals_6, primals_4
class ValueNew(nn.Module):
def __init__(self, num_inputs):
super(ValueNew, self).__init__()
self.affine1 = nn.Linear(num_inputs, 64)
self.affine2 = nn.Linear(64, 64)
self.value_head = nn.Linear(64, 1)
self.value_head.weight.data.mul_(0.1)
self.value_head.bias.data.mul_(0.0)
self.device = torch.device('cuda:0' if torch.cuda.is_available() else
'cpu')
self
def forward(self, input_0):
primals_1 = self.affine1.weight
primals_2 = self.affine1.bias
primals_4 = self.affine2.weight
primals_5 = self.affine2.bias
primals_6 = self.value_head.weight
primals_7 = self.value_head.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
SaminYeasar/pytorch-trpo
|
Value
| false
| 4,127
|
[
"MIT"
] | 0
|
653a3357cf0461c175fb741604c0cd4ad1f4b841
|
https://github.com/SaminYeasar/pytorch-trpo/tree/653a3357cf0461c175fb741604c0cd4ad1f4b841
|
SpatialAttentionModule
|
import torch
import torch.nn as nn
class SpatialAttentionModule(nn.Module):
def __init__(self):
super(SpatialAttentionModule, self).__init__()
self.conv2d = nn.Conv2d(in_channels=2, out_channels=1, kernel_size=
7, stride=1, padding=3)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avgout = torch.mean(x, dim=1, keepdim=True)
maxout, _ = torch.max(x, dim=1, keepdim=True)
out = torch.cat([avgout, maxout], dim=1)
out = self.sigmoid(self.conv2d(out))
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 2
x0 = xindex % 16
x2 = xindex // 32
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp9 = tmp7 + tmp8
tmp10 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp4, tmp13, tmp14)
tmp16 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp19 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp20 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp21 = triton_helpers.maximum(tmp19, tmp20)
tmp22 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp23 = triton_helpers.maximum(tmp21, tmp22)
tmp24 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp25 = triton_helpers.maximum(tmp23, tmp24)
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp16, tmp25, tmp26)
tmp28 = tl.where(tmp4, tmp15, tmp27)
tl.store(out_ptr0 + x3, tmp28, xmask)
@triton.jit
def triton_poi_fused_convolution_sigmoid_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 2, 7, 7), (98, 49, 7, 1))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(128)](primals_1, buf0, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_sigmoid_1[grid(64)](buf2, primals_3,
64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
return buf2, primals_2, buf0, buf2
class SpatialAttentionModuleNew(nn.Module):
def __init__(self):
super(SpatialAttentionModuleNew, self).__init__()
self.conv2d = nn.Conv2d(in_channels=2, out_channels=1, kernel_size=
7, stride=1, padding=3)
self.sigmoid = nn.Sigmoid()
def forward(self, input_0):
primals_2 = self.conv2d.weight
primals_3 = self.conv2d.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
poppy862/Qnet
|
SpatialAttentionModule
| false
| 4,128
|
[
"Apache-2.0"
] | 0
|
da751bc6eb9ae23e0ff9b96fe0afdfd6bed31f8b
|
https://github.com/poppy862/Qnet/tree/da751bc6eb9ae23e0ff9b96fe0afdfd6bed31f8b
|
SumLossModule
|
import torch
import torch.nn.functional as F
class SumLossModule(torch.nn.Module):
def __init__(self):
super(SumLossModule, self).__init__()
def forward(self, predictions, targets):
y_losses = F.cross_entropy(predictions, targets, reduction='none')
y_losses = torch.sum(y_losses, dim=[1, 2])
Y_loss = torch.logsumexp(y_losses, dim=0)
return Y_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_per_fused__log_softmax_mul_neg_sum_1(in_ptr0, in_ptr1, out_ptr0,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr0 + (16 + r1 + 64 * x0), xmask, other=0.0)
tmp5 = tl.load(in_ptr0 + (32 + r1 + 64 * x0), xmask, other=0.0)
tmp8 = tl.load(in_ptr0 + (48 + r1 + 64 * x0), xmask, other=0.0)
tmp13 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0)
tmp16 = tl.load(in_ptr1 + (16 + r1 + 64 * x0), xmask, other=0.0)
tmp20 = tl.load(in_ptr1 + (32 + r1 + 64 * x0), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + (48 + r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl_math.exp(tmp0)
tmp3 = tl_math.exp(tmp2)
tmp4 = tmp1 + tmp3
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp4 + tmp6
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tl_math.log(tmp10)
tmp12 = tmp0 - tmp11
tmp14 = tmp12 * tmp13
tmp15 = tmp2 - tmp11
tmp17 = tmp15 * tmp16
tmp18 = tmp14 + tmp17
tmp19 = tmp5 - tmp11
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp8 - tmp11
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = -tmp26
tmp28 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK])
tmp30 = tl.where(xmask, tmp28, 0)
tmp31 = tl.sum(tmp30, 1)[:, None]
tl.store(out_ptr0 + x0, tmp31, xmask)
@triton.jit
def triton_per_fused_logsumexp_2(in_out_ptr0, in_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = triton_helpers.max2(tmp1, 1)[:, None]
tmp4 = tl_math.abs(tmp3)
tmp5 = float('inf')
tmp6 = tmp4 == tmp5
tmp7 = 0.0
tmp8 = tl.where(tmp6, tmp7, tmp3)
tmp9 = tmp0 - tmp8
tmp10 = tl_math.exp(tmp9)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.sum(tmp11, 1)[:, None]
tmp14 = tl_math.log(tmp13)
tmp15 = tmp14 + tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp15, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg1_1
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
triton_per_fused__log_softmax_mul_neg_sum_1[grid(4)](buf0, arg0_1,
buf1, 4, 16, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del buf0
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3
del buf3
triton_per_fused_logsumexp_2[grid(1)](buf4, buf1, 1, 4, XBLOCK=1,
num_warps=2, num_stages=1)
del buf1
return buf4,
class SumLossModuleNew(torch.nn.Module):
def __init__(self):
super(SumLossModuleNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
pkalluri/specialized-conditional-pcnn
|
SumLossModule
| false
| 4,129
|
[
"Apache-2.0"
] | 0
|
ed94e47654ed749a7dd3492c4e074e2a8fb12df8
|
https://github.com/pkalluri/specialized-conditional-pcnn/tree/ed94e47654ed749a7dd3492c4e074e2a8fb12df8
|
DQN
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class DQN(nn.Module):
def __init__(self, num_in_features, num_out_features):
super(DQN, self).__init__()
self.linear1 = nn.Linear(num_in_features, 32)
self.ln1 = nn.LayerNorm(32)
self.linear2 = nn.Linear(32, 64)
self.ln2 = nn.LayerNorm(64)
self.linear3 = nn.Linear(64, 64)
self.ln3 = nn.LayerNorm(64)
self.linear4 = nn.Linear(64, 32)
self.ln4 = nn.LayerNorm(32)
self.out_layer = nn.Linear(32, num_out_features)
def forward(self, x):
x = F.leaky_relu(self.ln1(self.linear1(x)))
x = F.leaky_relu(self.ln2(self.linear2(x)))
x = F.leaky_relu(self.ln3(self.linear3(x)))
x = F.leaky_relu(self.ln4(self.linear4(x)))
return self.out_layer(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_in_features': 4, 'num_out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_leaky_relu_native_layer_norm_0(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 32 * x0), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + r1, None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + r1, None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 32, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 32.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tmp22 = tmp0 - tmp10
tmp23 = tmp22 * tmp21
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = 0.0
tmp29 = tmp27 > tmp28
tmp30 = 0.01
tmp31 = tmp27 * tmp30
tmp32 = tl.where(tmp29, tmp27, tmp31)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp21, xmask)
tl.store(in_out_ptr1 + (r1 + 32 * x0), tmp32, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_per_fused_leaky_relu_native_layer_norm_1(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + r1, None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + r1, None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 64.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tmp22 = tmp0 - tmp10
tmp23 = tmp22 * tmp21
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = 0.0
tmp29 = tmp27 > tmp28
tmp30 = 0.01
tmp31 = tmp27 * tmp30
tmp32 = tl.where(tmp29, tmp27, tmp31)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp21, xmask)
tl.store(in_out_ptr1 + (r1 + 64 * x0), tmp32, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19) = args
args.clear()
assert_size_stride(primals_1, (32, 4), (4, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (32,), (1,))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (64, 32), (32, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (64,), (1,))
assert_size_stride(primals_9, (64,), (1,))
assert_size_stride(primals_10, (64, 64), (64, 1))
assert_size_stride(primals_11, (64,), (1,))
assert_size_stride(primals_12, (64,), (1,))
assert_size_stride(primals_13, (64,), (1,))
assert_size_stride(primals_14, (32, 64), (64, 1))
assert_size_stride(primals_15, (32,), (1,))
assert_size_stride(primals_16, (32,), (1,))
assert_size_stride(primals_17, (32,), (1,))
assert_size_stride(primals_18, (4, 32), (32, 1))
assert_size_stride(primals_19, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 32), (32, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 32), (1, 4),
0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf2
buf5 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.
float32)
buf6 = buf5
del buf5
get_raw_stream(0)
triton_per_fused_leaky_relu_native_layer_norm_0[grid(64)](buf4,
buf6, buf0, primals_4, primals_5, buf1, 64, 32, XBLOCK=1,
num_warps=2, num_stages=1)
buf7 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf6, (64, 32),
(32, 1), 0), reinterpret_tensor(primals_6, (32, 64), (1, 32), 0
), alpha=1, beta=1, out=buf7)
del primals_7
buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf11 = reinterpret_tensor(buf9, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf9
buf12 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch
.float32)
buf13 = buf12
del buf12
triton_per_fused_leaky_relu_native_layer_norm_1[grid(64)](buf11,
buf13, buf7, primals_8, primals_9, buf8, 64, 64, XBLOCK=1,
num_warps=2, num_stages=1)
buf14 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.addmm(primals_11, reinterpret_tensor(buf13, (64, 64),
(64, 1), 0), reinterpret_tensor(primals_10, (64, 64), (1, 64),
0), alpha=1, beta=1, out=buf14)
del primals_11
buf15 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
buf16 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf18 = reinterpret_tensor(buf16, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf16
buf19 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch
.float32)
buf20 = buf19
del buf19
triton_per_fused_leaky_relu_native_layer_norm_1[grid(64)](buf18,
buf20, buf14, primals_12, primals_13, buf15, 64, 64, XBLOCK=1,
num_warps=2, num_stages=1)
buf21 = empty_strided_cuda((64, 32), (32, 1), torch.float32)
extern_kernels.addmm(primals_15, reinterpret_tensor(buf20, (64, 64),
(64, 1), 0), reinterpret_tensor(primals_14, (64, 32), (1, 64),
0), alpha=1, beta=1, out=buf21)
del primals_15
buf22 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
buf23 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf25 = reinterpret_tensor(buf23, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf23
buf26 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.
float32)
buf27 = buf26
del buf26
triton_per_fused_leaky_relu_native_layer_norm_0[grid(64)](buf25,
buf27, buf21, primals_16, primals_17, buf22, 64, 32, XBLOCK=1,
num_warps=2, num_stages=1)
buf28 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_19, reinterpret_tensor(buf27, (64, 32),
(32, 1), 0), reinterpret_tensor(primals_18, (32, 4), (1, 32), 0
), alpha=1, beta=1, out=buf28)
del primals_19
return (reinterpret_tensor(buf28, (4, 4, 4, 4), (64, 16, 4, 1), 0),
primals_4, primals_5, primals_8, primals_9, primals_12, primals_13,
primals_16, primals_17, reinterpret_tensor(primals_3, (64, 4), (4,
1), 0), buf0, buf1, buf4, reinterpret_tensor(buf6, (64, 32), (32, 1
), 0), buf7, buf8, buf11, reinterpret_tensor(buf13, (64, 64), (64,
1), 0), buf14, buf15, buf18, reinterpret_tensor(buf20, (64, 64), (
64, 1), 0), buf21, buf22, buf25, reinterpret_tensor(buf27, (64, 32),
(32, 1), 0), primals_18, primals_14, primals_10, primals_6)
class DQNNew(nn.Module):
def __init__(self, num_in_features, num_out_features):
super(DQNNew, self).__init__()
self.linear1 = nn.Linear(num_in_features, 32)
self.ln1 = nn.LayerNorm(32)
self.linear2 = nn.Linear(32, 64)
self.ln2 = nn.LayerNorm(64)
self.linear3 = nn.Linear(64, 64)
self.ln3 = nn.LayerNorm(64)
self.linear4 = nn.Linear(64, 32)
self.ln4 = nn.LayerNorm(32)
self.out_layer = nn.Linear(32, num_out_features)
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_4 = self.ln1.weight
primals_5 = self.ln1.bias
primals_6 = self.linear2.weight
primals_7 = self.linear2.bias
primals_8 = self.ln2.weight
primals_9 = self.ln2.bias
primals_10 = self.linear3.weight
primals_11 = self.linear3.bias
primals_12 = self.ln3.weight
primals_13 = self.ln3.bias
primals_14 = self.linear4.weight
primals_15 = self.linear4.bias
primals_16 = self.ln4.weight
primals_17 = self.ln4.bias
primals_18 = self.out_layer.weight
primals_19 = self.out_layer.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19])
return output[0]
|
pgabriela/dqn-jitsi-autoscaler
|
DQN
| false
| 4,130
|
[
"Apache-2.0"
] | 0
|
b39eb335e584095ef66a9941dbe0b2ea21a02d4a
|
https://github.com/pgabriela/dqn-jitsi-autoscaler/tree/b39eb335e584095ef66a9941dbe0b2ea21a02d4a
|
AttentiveNorm2d
|
import torch
import torch.nn as nn
import torch.utils.data
class AttentiveNorm2d(nn.BatchNorm2d):
def __init__(self, num_features, hidden_channels=32, eps=1e-05,
momentum=0.1, track_running_stats=False):
super(AttentiveNorm2d, self).__init__(num_features, eps=eps,
momentum=momentum, affine=False, track_running_stats=
track_running_stats)
self.gamma = nn.Parameter(torch.randn(hidden_channels, num_features))
self.beta = nn.Parameter(torch.randn(hidden_channels, num_features))
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(num_features, hidden_channels)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
output = super(AttentiveNorm2d, self).forward(x)
size = output.size()
b, c, _, _ = x.size()
y = self.avgpool(x).view(b, c)
y = self.fc(y)
y = self.sigmoid(y)
gamma = y @ self.gamma
beta = y @ self.beta
gamma = gamma.unsqueeze(-1).unsqueeze(-1).expand(size)
beta = beta.unsqueeze(-1).unsqueeze(-1).expand(size)
return gamma * output + beta
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__native_batch_norm_legit_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex % 16
r2 = rindex // 16
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0 + 64 * r2), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 64.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp21, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_per_fused_mean_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused__native_batch_norm_legit_add_mul_3(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 16
x4 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x4, xmask)
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x3, xmask, eviction_policy='evict_last')
tmp3 = tmp1 - tmp2
tmp5 = tmp3 * tmp4
tmp6 = tmp0 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x4, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (32, 4), (4, 1))
assert_size_stride(primals_3, (32,), (1,))
assert_size_stride(primals_4, (32, 4), (4, 1))
assert_size_stride(primals_5, (32, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 1, 1), torch.float32)
buf1 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 4, 4), torch.float32)
buf3 = reinterpret_tensor(buf1, (1, 4, 1, 1), (4, 1, 1, 1), 0)
del buf1
get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_0[grid(4)](buf3,
primals_1, buf0, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf5 = buf4
del buf4
triton_per_fused_mean_1[grid(16)](buf5, primals_1, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
buf6 = empty_strided_cuda((4, 32), (32, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf5, (4, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 32), (1, 4), 0), out=buf6)
del primals_2
buf7 = buf6
del buf6
triton_poi_fused_sigmoid_2[grid(128)](buf7, primals_3, 128, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_3
buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf7, primals_4, out=buf8)
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf7, primals_5, out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__native_batch_norm_legit_add_mul_3[grid(256)](buf8,
primals_1, buf0, buf3, buf9, buf10, 256, XBLOCK=128, num_warps=
4, num_stages=1)
del buf8
del buf9
return buf10, primals_1, buf0, buf3, reinterpret_tensor(buf5, (4, 4), (
4, 1), 0), buf7, reinterpret_tensor(primals_5, (4, 32), (1, 4), 0
), reinterpret_tensor(primals_4, (4, 32), (1, 4), 0)
class AttentiveNorm2dNew(nn.BatchNorm2d):
def __init__(self, num_features, hidden_channels=32, eps=1e-05,
momentum=0.1, track_running_stats=False):
super(AttentiveNorm2dNew, self).__init__(num_features, eps=eps,
momentum=momentum, affine=False, track_running_stats=
track_running_stats)
self.gamma = nn.Parameter(torch.randn(hidden_channels, num_features))
self.beta = nn.Parameter(torch.randn(hidden_channels, num_features))
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(num_features, hidden_channels)
self.sigmoid = nn.Sigmoid()
def forward(self, input_0):
primals_2 = self.gamma
primals_4 = self.beta
primals_5 = self.fc.weight
primals_3 = self.fc.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
ppomelo/Attentive-Transformation-Based-Normalization
|
AttentiveNorm2d
| false
| 4,131
|
[
"Apache-2.0"
] | 0
|
62ad02eb025613e90f4fe0e0a9f0f85839e53092
|
https://github.com/ppomelo/Attentive-Transformation-Based-Normalization/tree/62ad02eb025613e90f4fe0e0a9f0f85839e53092
|
DenseCrossEntropy
|
import torch
import torch.nn.functional as F
import torch.nn as nn
class DenseCrossEntropy(nn.Module):
def __init__(self):
super().__init__()
def forward(self, logits, labels):
logits = logits.float()
labels = labels.float()
logprobs = F.log_softmax(logits, dim=-1)
loss = labels * logprobs
loss = loss.sum(-1)
return loss.mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_per_fused__log_softmax_mean_mul_sum_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp1 - tmp12
tmp14 = tmp0 * tmp13
tmp16 = tmp3 - tmp12
tmp17 = tmp15 * tmp16
tmp18 = tmp14 + tmp17
tmp20 = tmp6 - tmp12
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp24 = tmp9 - tmp12
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK])
tmp29 = tl.sum(tmp27, 1)[:, None]
tmp30 = 64.0
tmp31 = tmp29 / tmp30
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp31, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused__log_softmax_mean_mul_sum_1[grid(1)](buf2, arg1_1,
buf0, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg1_1
del buf0
return buf2,
class DenseCrossEntropyNew(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
prakhar154/Cassava-Leaf-Disease-Classification
|
DenseCrossEntropy
| false
| 4,132
|
[
"MIT"
] | 0
|
04824834a6a1898c77858e8134bd3767c64789f2
|
https://github.com/prakhar154/Cassava-Leaf-Disease-Classification/tree/04824834a6a1898c77858e8134bd3767c64789f2
|
BCEDiceLoss
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
class BCEDiceLoss(nn.Module):
def __init__(self):
super(BCEDiceLoss, self).__init__()
def forward(self, input, target):
bce = F.binary_cross_entropy_with_logits(input, target)
smooth = 1e-05
input = torch.sigmoid(input)
num = target.size(0)
input = input.view(num, -1)
target = target.view(num, -1)
intersection = input * target
dice = (2.0 * intersection.sum(1) + smooth) / (input.sum(1) +
target.sum(1) + smooth)
dice = 1 - dice.sum() / num
return 0.5 * bce + 0.5 * dice
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_binary_cross_entropy_with_logits_0(in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp15, None)
@triton.jit
def triton_per_fused_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp10 = tl.where(xmask, tmp8, 0)
tmp11 = tl.sum(tmp10, 1)[:, None]
tmp12 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp14 = tl.where(xmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tl.store(out_ptr0 + x0, tmp7, xmask)
tl.store(out_ptr1 + x0, tmp11, xmask)
tl.store(out_ptr2 + x0, tmp15, xmask)
@triton.jit
def triton_per_fused_add_binary_cross_entropy_with_logits_div_mul_rsub_sum_2(
in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.
constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp5 = tl.load(in_ptr1 + r0, None)
tmp6 = tl.load(in_ptr2 + r0, None)
tmp13 = tl.load(in_out_ptr0 + 0)
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, 1])
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp3 = 1e-05
tmp4 = tmp2 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp7 + tmp3
tmp9 = tmp4 / tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.sum(tmp10, 1)[:, None]
tmp15 = 256.0
tmp16 = tmp14 / tmp15
tmp17 = 0.5
tmp18 = tmp16 * tmp17
tmp19 = 0.25
tmp20 = tmp12 * tmp19
tmp21 = 1.0
tmp22 = tmp21 - tmp20
tmp23 = tmp22 * tmp17
tmp24 = tmp18 + tmp23
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp24, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_binary_cross_entropy_with_logits_0[grid(1)](arg0_1,
arg1_1, buf0, 1, 256, num_warps=2, num_stages=1)
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
buf2 = empty_strided_cuda((4,), (1,), torch.float32)
buf3 = empty_strided_cuda((4,), (1,), torch.float32)
triton_per_fused_mul_sum_1[grid(4)](arg1_1, arg0_1, buf1, buf2,
buf3, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf5 = buf0
del buf0
triton_per_fused_add_binary_cross_entropy_with_logits_div_mul_rsub_sum_2[
grid(1)](buf5, buf1, buf2, buf3, 1, 4, XBLOCK=1, num_warps=2,
num_stages=1)
del buf1
del buf2
del buf3
return buf5,
class BCEDiceLossNew(nn.Module):
def __init__(self):
super(BCEDiceLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
ppomelo/Attentive-Transformation-Based-Normalization
|
BCEDiceLoss
| false
| 4,133
|
[
"Apache-2.0"
] | 0
|
62ad02eb025613e90f4fe0e0a9f0f85839e53092
|
https://github.com/ppomelo/Attentive-Transformation-Based-Normalization/tree/62ad02eb025613e90f4fe0e0a9f0f85839e53092
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.