entry_point
stringlengths 1
65
| original_triton_python_code
stringlengths 208
619k
| optimised_triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
listlengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
|
|---|---|---|---|---|---|---|---|---|---|---|
SMAPELoss
|
import torch
import torch.nn as nn
class SMAPELoss(nn.Module):
def forward(self, input, target):
return (torch.abs(input - target) / (torch.abs(input) + torch.abs(
target) + 0.01)).mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_add_div_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tl_math.abs(tmp0)
tmp5 = tl_math.abs(tmp1)
tmp6 = tmp4 + tmp5
tmp7 = 0.01
tmp8 = tmp6 + tmp7
tmp9 = tmp3 / tmp8
tmp10 = tl.broadcast_to(tmp9, [RBLOCK])
tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0))
tmp13 = 256.0
tmp14 = tmp12 / tmp13
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp14, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_abs_add_div_mean_sub_0[grid(1)](buf1, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class SMAPELossNew(nn.Module):
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
LongerVision/oidn
|
SMAPELoss
| false
| 5,556
|
[
"Apache-2.0"
] | 1
|
2f9e59f8b747b217f78c5c274f4f2bff347a03a7
|
https://github.com/LongerVision/oidn/tree/2f9e59f8b747b217f78c5c274f4f2bff347a03a7
|
GradientLoss
|
import torch
import torch.nn as nn
def gradient(input):
input0 = input[..., :-1, :-1]
didy = input[..., 1:, :-1] - input0
didx = input[..., :-1, 1:] - input0
return torch.cat((didy, didx), -3)
class GradientLoss(nn.Module):
def forward(self, input, target):
return torch.abs(gradient(input) - gradient(target)).mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_cat_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
rnumel = 288
RBLOCK: tl.constexpr = 512
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
rmask = rindex < rnumel
r2 = rindex // 9 % 8
r0 = rindex % 3
r1 = rindex // 3 % 3
r3 = rindex // 72
tmp0 = r2
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + tl.broadcast_to(4 + r0 + 4 * r1 + 16 * r2 + 64 *
r3, [RBLOCK]), rmask & tmp4, other=0.0)
tmp6 = tl.load(in_ptr0 + tl.broadcast_to(r0 + 4 * r1 + 16 * r2 + 64 *
r3, [RBLOCK]), rmask & tmp4, other=0.0)
tmp7 = tmp5 - tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp13 = tl.load(in_ptr0 + tl.broadcast_to(1 + r0 + 4 * r1 + 16 * (-4 +
r2) + 64 * r3, [RBLOCK]), rmask & tmp10, other=0.0)
tmp14 = tl.load(in_ptr0 + tl.broadcast_to(r0 + 4 * r1 + 16 * (-4 + r2) +
64 * r3, [RBLOCK]), rmask & tmp10, other=0.0)
tmp15 = tmp13 - tmp14
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp10, tmp15, tmp16)
tmp18 = tl.where(tmp4, tmp9, tmp17)
tmp19 = tl.load(in_ptr1 + tl.broadcast_to(4 + r0 + 4 * r1 + 16 * r2 +
64 * r3, [RBLOCK]), rmask & tmp4, other=0.0)
tmp20 = tl.load(in_ptr1 + tl.broadcast_to(r0 + 4 * r1 + 16 * r2 + 64 *
r3, [RBLOCK]), rmask & tmp4, other=0.0)
tmp21 = tmp19 - tmp20
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp4, tmp21, tmp22)
tmp24 = tl.load(in_ptr1 + tl.broadcast_to(1 + r0 + 4 * r1 + 16 * (-4 +
r2) + 64 * r3, [RBLOCK]), rmask & tmp10, other=0.0)
tmp25 = tl.load(in_ptr1 + tl.broadcast_to(r0 + 4 * r1 + 16 * (-4 + r2) +
64 * r3, [RBLOCK]), rmask & tmp10, other=0.0)
tmp26 = tmp24 - tmp25
tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype)
tmp28 = tl.where(tmp10, tmp26, tmp27)
tmp29 = tl.where(tmp4, tmp23, tmp28)
tmp30 = tmp18 - tmp29
tmp31 = tl_math.abs(tmp30)
tmp32 = tl.broadcast_to(tmp31, [RBLOCK])
tmp34 = tl.where(rmask, tmp32, 0)
tmp35 = triton_helpers.promote_to_tensor(tl.sum(tmp34, 0))
tmp36 = 288.0
tmp37 = tmp35 / tmp36
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp37, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_abs_cat_mean_sub_0[grid(1)](buf1, arg0_1, arg1_1,
1, 288, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf1,
def gradient(input):
input0 = input[..., :-1, :-1]
didy = input[..., 1:, :-1] - input0
didx = input[..., :-1, 1:] - input0
return torch.cat((didy, didx), -3)
class GradientLossNew(nn.Module):
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
LongerVision/oidn
|
GradientLoss
| false
| 5,557
|
[
"Apache-2.0"
] | 1
|
2f9e59f8b747b217f78c5c274f4f2bff347a03a7
|
https://github.com/LongerVision/oidn/tree/2f9e59f8b747b217f78c5c274f4f2bff347a03a7
|
ResHead
|
from torch.nn import Module
import torch
import torch.nn as nn
import torch.utils.data
def gap2d(_w_in):
"""Helper for building a gap2d layer."""
return nn.AdaptiveAvgPool2d((1, 1))
def gap2d_cx(cx, _w_in):
"""Accumulates complexity of gap2d into cx = (h, w, flops, params, acts)."""
flops, params, acts = cx['flops'], cx['params'], cx['acts']
return {'h': 1, 'w': 1, 'flops': flops, 'params': params, 'acts': acts}
def linear(w_in, w_out, *, bias=False):
"""Helper for building a linear layer."""
return nn.Linear(w_in, w_out, bias=bias)
def linear_cx(cx, w_in, w_out, *, bias=False, num_locations=1):
"""Accumulates complexity of linear into cx = (h, w, flops, params, acts)."""
h, w, flops, params, acts = cx['h'], cx['w'], cx['flops'], cx['params'
], cx['acts']
flops += w_in * w_out * num_locations + (w_out * num_locations if bias else
0)
params += w_in * w_out + (w_out if bias else 0)
acts += w_out * num_locations
return {'h': h, 'w': w, 'flops': flops, 'params': params, 'acts': acts}
class ResHead(Module):
"""ResNet head: AvgPool, 1x1."""
def __init__(self, w_in, num_classes):
super(ResHead, self).__init__()
self.avg_pool = gap2d(w_in)
self.fc = linear(w_in, num_classes, bias=True)
def forward(self, x):
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
@staticmethod
def complexity(cx, w_in, num_classes):
cx = gap2d_cx(cx, w_in)
cx = linear_cx(cx, w_in, num_classes, bias=True)
return cx
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'w_in': 4, 'num_classes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.nn import Module
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del primals_1
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(buf1, (4, 4), (4,
1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha
=1, beta=1, out=buf2)
del primals_2
del primals_3
return buf2, reinterpret_tensor(buf1, (4, 4), (4, 1), 0)
def gap2d(_w_in):
"""Helper for building a gap2d layer."""
return nn.AdaptiveAvgPool2d((1, 1))
def gap2d_cx(cx, _w_in):
"""Accumulates complexity of gap2d into cx = (h, w, flops, params, acts)."""
flops, params, acts = cx['flops'], cx['params'], cx['acts']
return {'h': 1, 'w': 1, 'flops': flops, 'params': params, 'acts': acts}
def linear(w_in, w_out, *, bias=False):
"""Helper for building a linear layer."""
return nn.Linear(w_in, w_out, bias=bias)
def linear_cx(cx, w_in, w_out, *, bias=False, num_locations=1):
"""Accumulates complexity of linear into cx = (h, w, flops, params, acts)."""
h, w, flops, params, acts = cx['h'], cx['w'], cx['flops'], cx['params'
], cx['acts']
flops += w_in * w_out * num_locations + (w_out * num_locations if bias else
0)
params += w_in * w_out + (w_out if bias else 0)
acts += w_out * num_locations
return {'h': h, 'w': w, 'flops': flops, 'params': params, 'acts': acts}
class ResHeadNew(Module):
"""ResNet head: AvgPool, 1x1."""
def __init__(self, w_in, num_classes):
super(ResHeadNew, self).__init__()
self.avg_pool = gap2d(w_in)
self.fc = linear(w_in, num_classes, bias=True)
@staticmethod
def complexity(cx, w_in, num_classes):
cx = gap2d_cx(cx, w_in)
cx = linear_cx(cx, w_in, num_classes, bias=True)
return cx
def forward(self, input_0):
primals_2 = self.fc.weight
primals_3 = self.fc.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
MAC-AutoML/XCompression
|
ResHead
| false
| 5,558
|
[
"MIT"
] | 1
|
9f76eb3ccfb3057110ecf12aa48dec00a4667a25
|
https://github.com/MAC-AutoML/XCompression/tree/9f76eb3ccfb3057110ecf12aa48dec00a4667a25
|
MaxPool
|
import torch
import torch.nn as nn
import torch.optim
import torch.utils.data
class MaxPool(nn.Module):
def __init__(self, kernel_size, stride=1, padding=1, zero_pad=False):
super(MaxPool, self).__init__()
self.zero_pad = nn.ZeroPad2d((1, 0, 1, 0)) if zero_pad else None
self.pool = nn.MaxPool2d(kernel_size, stride=stride, padding=padding)
def forward(self, x):
if self.zero_pad:
x = self.zero_pad(x)
x = self.pool(x)
if self.zero_pad:
x = x[:, :, 1:, 1:]
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'kernel_size': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.optim
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 3 % 3
x0 = xindex % 3
x2 = xindex // 9
x4 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask,
other=float('-inf'))
tmp12 = x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-4 + x0 + 4 * x1 + 16 * x2), tmp16 & xmask,
other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + x0
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-3 + x0 + 4 * x1 + 16 * x2), tmp23 & xmask,
other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 2 + x0
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp5 & tmp29
tmp31 = tl.load(in_ptr0 + (-2 + x0 + 4 * x1 + 16 * x2), tmp30 & xmask,
other=float('-inf'))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = x1
tmp34 = tmp33 >= tmp1
tmp35 = tmp33 < tmp3
tmp36 = tmp34 & tmp35
tmp37 = tmp36 & tmp9
tmp38 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1 + 16 * x2), tmp37 & xmask,
other=float('-inf'))
tmp39 = triton_helpers.maximum(tmp38, tmp32)
tmp40 = tmp36 & tmp15
tmp41 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp40 & xmask, other
=float('-inf'))
tmp42 = triton_helpers.maximum(tmp41, tmp39)
tmp43 = tmp36 & tmp22
tmp44 = tl.load(in_ptr0 + (1 + x0 + 4 * x1 + 16 * x2), tmp43 & xmask,
other=float('-inf'))
tmp45 = triton_helpers.maximum(tmp44, tmp42)
tmp46 = tmp36 & tmp29
tmp47 = tl.load(in_ptr0 + (2 + x0 + 4 * x1 + 16 * x2), tmp46 & xmask,
other=float('-inf'))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = 1 + x1
tmp50 = tmp49 >= tmp1
tmp51 = tmp49 < tmp3
tmp52 = tmp50 & tmp51
tmp53 = tmp52 & tmp9
tmp54 = tl.load(in_ptr0 + (3 + x0 + 4 * x1 + 16 * x2), tmp53 & xmask,
other=float('-inf'))
tmp55 = triton_helpers.maximum(tmp54, tmp48)
tmp56 = tmp52 & tmp15
tmp57 = tl.load(in_ptr0 + (4 + x0 + 4 * x1 + 16 * x2), tmp56 & xmask,
other=float('-inf'))
tmp58 = triton_helpers.maximum(tmp57, tmp55)
tmp59 = tmp52 & tmp22
tmp60 = tl.load(in_ptr0 + (5 + x0 + 4 * x1 + 16 * x2), tmp59 & xmask,
other=float('-inf'))
tmp61 = triton_helpers.maximum(tmp60, tmp58)
tmp62 = tmp52 & tmp29
tmp63 = tl.load(in_ptr0 + (6 + x0 + 4 * x1 + 16 * x2), tmp62 & xmask,
other=float('-inf'))
tmp64 = triton_helpers.maximum(tmp63, tmp61)
tmp65 = 2 + x1
tmp66 = tmp65 >= tmp1
tmp67 = tmp65 < tmp3
tmp68 = tmp66 & tmp67
tmp69 = tmp68 & tmp9
tmp70 = tl.load(in_ptr0 + (7 + x0 + 4 * x1 + 16 * x2), tmp69 & xmask,
other=float('-inf'))
tmp71 = triton_helpers.maximum(tmp70, tmp64)
tmp72 = tmp68 & tmp15
tmp73 = tl.load(in_ptr0 + (8 + x0 + 4 * x1 + 16 * x2), tmp72 & xmask,
other=float('-inf'))
tmp74 = triton_helpers.maximum(tmp73, tmp71)
tmp75 = tmp68 & tmp22
tmp76 = tl.load(in_ptr0 + (9 + x0 + 4 * x1 + 16 * x2), tmp75 & xmask,
other=float('-inf'))
tmp77 = triton_helpers.maximum(tmp76, tmp74)
tmp78 = tmp68 & tmp29
tmp79 = tl.load(in_ptr0 + (10 + x0 + 4 * x1 + 16 * x2), tmp78 & xmask,
other=float('-inf'))
tmp80 = triton_helpers.maximum(tmp79, tmp77)
tl.store(out_ptr0 + x4, tmp80, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0[grid(144)](arg0_1, buf0,
144, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class MaxPoolNew(nn.Module):
def __init__(self, kernel_size, stride=1, padding=1, zero_pad=False):
super(MaxPoolNew, self).__init__()
self.zero_pad = nn.ZeroPad2d((1, 0, 1, 0)) if zero_pad else None
self.pool = nn.MaxPool2d(kernel_size, stride=stride, padding=padding)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
LongKt7/Face_Recognize_Pytorch
|
MaxPool
| false
| 5,559
|
[
"MIT"
] | 1
|
baa02e633d379abe1001c8b8acb942617177329c
|
https://github.com/LongKt7/Face_Recognize_Pytorch/tree/baa02e633d379abe1001c8b8acb942617177329c
|
UsBlock_nounpool
|
import torch
import torch.nn as nn
def conv3x3(in_channels, out_channels, stride=1, padding=1, bias=True):
return nn.Conv3d(in_channels, out_channels, kernel_size=3, stride=
stride, padding=padding, bias=bias)
class UsBlock_nounpool(nn.Module):
def __init__(self, in_channels, out_channels, up_mode='transpose'):
super(UsBlock_nounpool, self).__init__()
self.conv = conv3x3(in_channels, out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.relu(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1,
4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1,
1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return buf1, primals_1, reinterpret_tensor(primals_3, (1, 4, 4, 4, 4),
(256, 64, 16, 4, 1), 0), buf2
def conv3x3(in_channels, out_channels, stride=1, padding=1, bias=True):
return nn.Conv3d(in_channels, out_channels, kernel_size=3, stride=
stride, padding=padding, bias=bias)
class UsBlock_nounpoolNew(nn.Module):
def __init__(self, in_channels, out_channels, up_mode='transpose'):
super(UsBlock_nounpoolNew, self).__init__()
self.conv = conv3x3(in_channels, out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
MATHplus-Young-Academy/P2-Cardiac-Motion
|
UsBlock_nounpool
| false
| 5,560
|
[
"Apache-2.0"
] | 1
|
844995e8e5760f981c425d13c0bd7f2f3bb8baec
|
https://github.com/MATHplus-Young-Academy/P2-Cardiac-Motion/tree/844995e8e5760f981c425d13c0bd7f2f3bb8baec
|
WDV29LayerNormalization
|
import numbers
import torch
import torch.nn.functional as F
import torch.utils.data
from torch.nn import Parameter
import torch.onnx.operators
from torch.nn.parameter import Parameter
from torch.nn import init
import torch.optim
import torch.optim.lr_scheduler
class WDV29LayerNormalization(torch.nn.Module):
"""Applies Layer Normalization over a mini-batch of inputs as described in
the paper `Layer Normalization`_ .
.. math::
y = \\frac{x - \\mathrm{E}[x]}{ \\sqrt{\\mathrm{Var}[x] + \\epsilon}} * \\gamma + \\beta
The mean and standard-deviation are calculated separately over the last
certain number dimensions which have to be of the shape specified by
:attr:`normalized_shape`.
:math:`\\gamma` and :math:`\\beta` are learnable affine transform parameters of
:attr:`normalized_shape` if :attr:`elementwise_affine` is ``True``.
.. note::
Unlike Batch Normalization and Instance Normalization, which applies
scalar scale and bias for each entire channel/plane with the
:attr:`affine` option, Layer Normalization applies per-element scale and
bias with :attr:`elementwise_affine`.
This layer uses statistics computed from input data in both training and
evaluation modes.
Args:
normalized_shape (int or list or torch.Size): input shape from an expected input
of size
.. math::
[* \\times \\text{normalized\\_shape}[0] \\times \\text{normalized\\_shape}[1]
\\times \\ldots \\times \\text{normalized\\_shape}[-1]]
If a single integer is used, it is treated as a singleton list, and this module will
normalize over the last dimension which is expected to be of that specific size.
eps: a value added to the denominator for numerical stability. Default: 1e-5
elementwise_affine: a boolean value that when set to ``True``, this module
has learnable per-element affine parameters initialized to ones (for weights)
and zeros (for biases). Default: ``True``.
Shape:
- Input: :math:`(N, *)`
- Output: :math:`(N, *)` (same shape as input)
Examples::
>>> input = torch.randn(20, 5, 10, 10)
>>> # With Learnable Parameters
>>> m = nn.LayerNorm(input.size()[1:])
>>> # Without Learnable Parameters
>>> m = nn.LayerNorm(input.size()[1:], elementwise_affine=False)
>>> # Normalize over last two dimensions
>>> m = nn.LayerNorm([10, 10])
>>> # Normalize over last dimension of size 10
>>> m = nn.LayerNorm(10)
>>> # Activating the module
>>> output = m(input)
.. _`Layer Normalization`: https://arxiv.org/abs/1607.06450
"""
__constants__ = ['normalized_shape', 'weight', 'bias', 'eps',
'elementwise_affine']
def __init__(self, normalized_shape, wd_require_gradient=False, eps=
1e-05, elementwise_affine=True):
super(WDV29LayerNormalization, self).__init__()
if isinstance(normalized_shape, numbers.Integral):
normalized_shape = normalized_shape,
self.normalized_shape = tuple(normalized_shape)
self.eps = eps
self.elementwise_affine = elementwise_affine
if self.elementwise_affine:
self.weight = Parameter(torch.Tensor(*normalized_shape),
requires_grad=wd_require_gradient)
self.tanh_weight_weight = Parameter(torch.Tensor(*normalized_shape)
)
self.tanh_weight_bias = Parameter(torch.Tensor(*normalized_shape))
self.bias = Parameter(torch.Tensor(*normalized_shape),
requires_grad=wd_require_gradient)
self.tanh_bias_weight = Parameter(torch.Tensor(*normalized_shape))
self.tanh_bias_bias = Parameter(torch.Tensor(*normalized_shape))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
if self.elementwise_affine:
torch.nn.init.constant_(self.tanh_weight_weight, 1.0)
torch.nn.init.constant_(self.tanh_weight_bias, 1.0)
torch.nn.init.constant_(self.tanh_bias_weight, 0.0)
torch.nn.init.constant_(self.tanh_bias_bias, 0.0)
init.ones_(self.weight)
init.zeros_(self.bias)
def forward(self, input):
weight = self.weight
bias = self.bias
weight = self.tanh_weight_weight * torch.tanh(weight
) + self.tanh_bias_weight
bias = self.tanh_weight_bias * torch.tanh(bias) + self.tanh_bias_bias
return F.layer_norm(input, self.normalized_shape, weight, bias,
self.eps)
def extra_repr(self):
return (
'{normalized_shape}, eps={eps}, elementwise_affine={elementwise_affine}'
.format(**self.__dict__))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'normalized_shape': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import numbers
import torch.utils.data
from torch.nn import Parameter
import torch.onnx.operators
from torch.nn.parameter import Parameter
from torch.nn import init
import torch.optim
import torch.optim.lr_scheduler
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_add_mul_native_layer_norm_tanh_1(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr7 + x0, xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr8 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp7 = libdevice.tanh(tmp6)
tmp8 = tmp5 * tmp7
tmp10 = tmp8 + tmp9
tmp11 = tmp4 * tmp10
tmp14 = libdevice.tanh(tmp13)
tmp15 = tmp12 * tmp14
tmp17 = tmp15 + tmp16
tmp18 = tmp11 + tmp17
tl.store(out_ptr0 + x2, tmp18, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(64)](primals_7, buf0,
buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_native_layer_norm_tanh_1[grid(256)](primals_7,
buf0, buf1, primals_3, primals_1, primals_4, primals_5,
primals_2, primals_6, buf2, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del buf0
del buf1
del primals_3
del primals_4
del primals_5
del primals_6
return buf2, primals_1, primals_2, primals_7
class WDV29LayerNormalizationNew(torch.nn.Module):
"""Applies Layer Normalization over a mini-batch of inputs as described in
the paper `Layer Normalization`_ .
.. math::
y = \\frac{x - \\mathrm{E}[x]}{ \\sqrt{\\mathrm{Var}[x] + \\epsilon}} * \\gamma + \\beta
The mean and standard-deviation are calculated separately over the last
certain number dimensions which have to be of the shape specified by
:attr:`normalized_shape`.
:math:`\\gamma` and :math:`\\beta` are learnable affine transform parameters of
:attr:`normalized_shape` if :attr:`elementwise_affine` is ``True``.
.. note::
Unlike Batch Normalization and Instance Normalization, which applies
scalar scale and bias for each entire channel/plane with the
:attr:`affine` option, Layer Normalization applies per-element scale and
bias with :attr:`elementwise_affine`.
This layer uses statistics computed from input data in both training and
evaluation modes.
Args:
normalized_shape (int or list or torch.Size): input shape from an expected input
of size
.. math::
[* \\times \\text{normalized\\_shape}[0] \\times \\text{normalized\\_shape}[1]
\\times \\ldots \\times \\text{normalized\\_shape}[-1]]
If a single integer is used, it is treated as a singleton list, and this module will
normalize over the last dimension which is expected to be of that specific size.
eps: a value added to the denominator for numerical stability. Default: 1e-5
elementwise_affine: a boolean value that when set to ``True``, this module
has learnable per-element affine parameters initialized to ones (for weights)
and zeros (for biases). Default: ``True``.
Shape:
- Input: :math:`(N, *)`
- Output: :math:`(N, *)` (same shape as input)
Examples::
>>> input = torch.randn(20, 5, 10, 10)
>>> # With Learnable Parameters
>>> m = nn.LayerNorm(input.size()[1:])
>>> # Without Learnable Parameters
>>> m = nn.LayerNorm(input.size()[1:], elementwise_affine=False)
>>> # Normalize over last two dimensions
>>> m = nn.LayerNorm([10, 10])
>>> # Normalize over last dimension of size 10
>>> m = nn.LayerNorm(10)
>>> # Activating the module
>>> output = m(input)
.. _`Layer Normalization`: https://arxiv.org/abs/1607.06450
"""
__constants__ = ['normalized_shape', 'weight', 'bias', 'eps',
'elementwise_affine']
def __init__(self, normalized_shape, wd_require_gradient=False, eps=
1e-05, elementwise_affine=True):
super(WDV29LayerNormalizationNew, self).__init__()
if isinstance(normalized_shape, numbers.Integral):
normalized_shape = normalized_shape,
self.normalized_shape = tuple(normalized_shape)
self.eps = eps
self.elementwise_affine = elementwise_affine
if self.elementwise_affine:
self.weight = Parameter(torch.Tensor(*normalized_shape),
requires_grad=wd_require_gradient)
self.tanh_weight_weight = Parameter(torch.Tensor(*normalized_shape)
)
self.tanh_weight_bias = Parameter(torch.Tensor(*normalized_shape))
self.bias = Parameter(torch.Tensor(*normalized_shape),
requires_grad=wd_require_gradient)
self.tanh_bias_weight = Parameter(torch.Tensor(*normalized_shape))
self.tanh_bias_bias = Parameter(torch.Tensor(*normalized_shape))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
if self.elementwise_affine:
torch.nn.init.constant_(self.tanh_weight_weight, 1.0)
torch.nn.init.constant_(self.tanh_weight_bias, 1.0)
torch.nn.init.constant_(self.tanh_bias_weight, 0.0)
torch.nn.init.constant_(self.tanh_bias_bias, 0.0)
init.ones_(self.weight)
init.zeros_(self.bias)
def extra_repr(self):
return (
'{normalized_shape}, eps={eps}, elementwise_affine={elementwise_affine}'
.format(**self.__dict__))
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.tanh_weight_weight
primals_3 = self.tanh_weight_bias
primals_4 = self.bias
primals_5 = self.tanh_bias_weight
primals_6 = self.tanh_bias_bias
primals_7 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
Lollipop321/weight-distillation
|
WDV29LayerNormalization
| false
| 5,561
|
[
"BSD-3-Clause"
] | 1
|
cfc76ec58e3e88094dde1825287b2968f9718431
|
https://github.com/Lollipop321/weight-distillation/tree/cfc76ec58e3e88094dde1825287b2968f9718431
|
WDV29Linear
|
import math
import torch
import torch.nn.functional as F
import torch.utils.data
from torch.nn import Parameter
import torch.onnx.operators
from torch.nn.parameter import Parameter
from torch.nn import init
import torch.optim
import torch.optim.lr_scheduler
class WDV29Linear(torch.nn.Module):
"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`
Args:
in_features: size of each input sample
out_features: size of each output sample
bias: If set to ``False``, the layer will not learn an additive bias.
Default: ``True``
Shape:
- Input: :math:`(N, *, H_{in})` where :math:`*` means any number of
additional dimensions and :math:`H_{in} = \\text{in\\_features}`
- Output: :math:`(N, *, H_{out})` where all but the last dimension
are the same shape as the input and :math:`H_{out} = \\text{out\\_features}`.
Attributes:
weight: the learnable weights of the module of shape
:math:`(\\text{out\\_features}, \\text{in\\_features})`. The values are
initialized from :math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})`, where
:math:`k = \\frac{1}{\\text{in\\_features}}`
bias: the learnable bias of the module of shape :math:`(\\text{out\\_features})`.
If :attr:`bias` is ``True``, the values are initialized from
:math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})` where
:math:`k = \\frac{1}{\\text{in\\_features}}`
Examples::
>>> m = nn.Linear(20, 30)
>>> input = torch.randn(128, 20)
>>> output = m(input)
>>> print(output.size())
torch.Size([128, 30])
"""
__constants__ = ['bias', 'in_features', 'out_features']
def __init__(self, in_features, out_features, wd_require_gradient=False,
bias=True):
super(WDV29Linear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(out_features, in_features),
requires_grad=wd_require_gradient)
self.tanh_weight_weight = Parameter(torch.Tensor(out_features,
in_features))
self.tanh_bias_weight = Parameter(torch.Tensor(out_features,
in_features))
if bias:
self.bias = Parameter(torch.Tensor(out_features), requires_grad
=wd_require_gradient)
self.tanh_weight_bias = Parameter(torch.Tensor(out_features))
self.tanh_bias_bias = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.constant_(self.tanh_weight_weight, 1.0)
torch.nn.init.constant_(self.tanh_weight_bias, 1.0)
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
torch.nn.init.constant_(self.tanh_bias_weight, 0.0)
torch.nn.init.constant_(self.tanh_bias_bias, 0.0)
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def forward(self, input):
weight = self.weight
weight = self.tanh_weight_weight * torch.tanh(weight
) + self.tanh_bias_weight
bias = self.bias
bias = self.tanh_weight_bias * torch.tanh(bias) + self.tanh_bias_bias
return F.linear(input, weight, bias)
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(self.
in_features, self.out_features, self.bias is not None)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.utils.data
from torch.nn import Parameter
import torch.onnx.operators
from torch.nn.parameter import Parameter
from torch.nn import init
import torch.optim
import torch.optim.lr_scheduler
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_tanh_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask)
tmp2 = libdevice.tanh(tmp1)
tmp3 = tmp0 * tmp2
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_add_mul_tanh_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask)
tmp2 = libdevice.tanh(tmp1)
tmp3 = tmp0 * tmp2
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_tanh_0[grid(16)](primals_2, primals_1,
primals_3, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused_add_mul_tanh_1[grid(4)](primals_5, primals_4,
primals_6, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1)
del primals_5
del primals_6
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(buf1, reinterpret_tensor(primals_7, (64, 4), (
4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1,
beta=1, out=buf2)
del buf0
del buf1
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_1, primals_4, reinterpret_tensor(primals_7, (64, 4), (4,
1), 0)
class WDV29LinearNew(torch.nn.Module):
"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`
Args:
in_features: size of each input sample
out_features: size of each output sample
bias: If set to ``False``, the layer will not learn an additive bias.
Default: ``True``
Shape:
- Input: :math:`(N, *, H_{in})` where :math:`*` means any number of
additional dimensions and :math:`H_{in} = \\text{in\\_features}`
- Output: :math:`(N, *, H_{out})` where all but the last dimension
are the same shape as the input and :math:`H_{out} = \\text{out\\_features}`.
Attributes:
weight: the learnable weights of the module of shape
:math:`(\\text{out\\_features}, \\text{in\\_features})`. The values are
initialized from :math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})`, where
:math:`k = \\frac{1}{\\text{in\\_features}}`
bias: the learnable bias of the module of shape :math:`(\\text{out\\_features})`.
If :attr:`bias` is ``True``, the values are initialized from
:math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})` where
:math:`k = \\frac{1}{\\text{in\\_features}}`
Examples::
>>> m = nn.Linear(20, 30)
>>> input = torch.randn(128, 20)
>>> output = m(input)
>>> print(output.size())
torch.Size([128, 30])
"""
__constants__ = ['bias', 'in_features', 'out_features']
def __init__(self, in_features, out_features, wd_require_gradient=False,
bias=True):
super(WDV29LinearNew, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(out_features, in_features),
requires_grad=wd_require_gradient)
self.tanh_weight_weight = Parameter(torch.Tensor(out_features,
in_features))
self.tanh_bias_weight = Parameter(torch.Tensor(out_features,
in_features))
if bias:
self.bias = Parameter(torch.Tensor(out_features), requires_grad
=wd_require_gradient)
self.tanh_weight_bias = Parameter(torch.Tensor(out_features))
self.tanh_bias_bias = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.constant_(self.tanh_weight_weight, 1.0)
torch.nn.init.constant_(self.tanh_weight_bias, 1.0)
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
torch.nn.init.constant_(self.tanh_bias_weight, 0.0)
torch.nn.init.constant_(self.tanh_bias_bias, 0.0)
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(self.
in_features, self.out_features, self.bias is not None)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.tanh_weight_weight
primals_3 = self.tanh_bias_weight
primals_4 = self.bias
primals_5 = self.tanh_weight_bias
primals_6 = self.tanh_bias_bias
primals_7 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
Lollipop321/weight-distillation
|
WDV29Linear
| false
| 5,562
|
[
"BSD-3-Clause"
] | 1
|
cfc76ec58e3e88094dde1825287b2968f9718431
|
https://github.com/Lollipop321/weight-distillation/tree/cfc76ec58e3e88094dde1825287b2968f9718431
|
WDV52LayerNormalization
|
import numbers
import torch
import torch.nn.functional as F
import torch.utils.data
from torch.nn import Parameter
import torch.onnx.operators
from torch.nn.parameter import Parameter
from torch.nn import init
import torch.optim
import torch.optim.lr_scheduler
class WDV52LayerNormalization(torch.nn.Module):
"""Applies Layer Normalization over a mini-batch of inputs as described in
the paper `Layer Normalization`_ .
.. math::
y = \\frac{x - \\mathrm{E}[x]}{ \\sqrt{\\mathrm{Var}[x] + \\epsilon}} * \\gamma + \\beta
The mean and standard-deviation are calculated separately over the last
certain number dimensions which have to be of the shape specified by
:attr:`normalized_shape`.
:math:`\\gamma` and :math:`\\beta` are learnable affine transform parameters of
:attr:`normalized_shape` if :attr:`elementwise_affine` is ``True``.
.. note::
Unlike Batch Normalization and Instance Normalization, which applies
scalar scale and bias for each entire channel/plane with the
:attr:`affine` option, Layer Normalization applies per-element scale and
bias with :attr:`elementwise_affine`.
This layer uses statistics computed from input data in both training and
evaluation modes.
Args:
normalized_shape (int or list or torch.Size): input shape from an expected input
of size
.. math::
[* \\times \\text{normalized\\_shape}[0] \\times \\text{normalized\\_shape}[1]
\\times \\ldots \\times \\text{normalized\\_shape}[-1]]
If a single integer is used, it is treated as a singleton list, and this module will
normalize over the last dimension which is expected to be of that specific size.
eps: a value added to the denominator for numerical stability. Default: 1e-5
elementwise_affine: a boolean value that when set to ``True``, this module
has learnable per-element affine parameters initialized to ones (for weights)
and zeros (for biases). Default: ``True``.
Shape:
- Input: :math:`(N, *)`
- Output: :math:`(N, *)` (same shape as input)
Examples::
>>> input = torch.randn(20, 5, 10, 10)
>>> # With Learnable Parameters
>>> m = nn.LayerNorm(input.size()[1:])
>>> # Without Learnable Parameters
>>> m = nn.LayerNorm(input.size()[1:], elementwise_affine=False)
>>> # Normalize over last two dimensions
>>> m = nn.LayerNorm([10, 10])
>>> # Normalize over last dimension of size 10
>>> m = nn.LayerNorm(10)
>>> # Activating the module
>>> output = m(input)
.. _`Layer Normalization`: https://arxiv.org/abs/1607.06450
"""
__constants__ = ['normalized_shape', 'weight', 'bias', 'eps',
'elementwise_affine']
def __init__(self, ts, normalized_shape, wd_normalized_shape,
wd_decoder_layers, wd_require_gradient=False, eps=1e-05,
elementwise_affine=True):
super(WDV52LayerNormalization, self).__init__()
if isinstance(normalized_shape, numbers.Integral):
normalized_shape = normalized_shape,
self.normalized_shape = tuple(normalized_shape)
self.ts = ts
self.eps = eps
self.elementwise_affine = elementwise_affine
self.wd_decoder_layers = wd_decoder_layers
if self.elementwise_affine:
self.out_wd_weight = Parameter(torch.Tensor(wd_normalized_shape,
*normalized_shape))
self.ly_wd_weight = Parameter(torch.Tensor(ts, ts))
self.wd_weight = Parameter(torch.Tensor(ts, 1))
self.weight = Parameter(torch.Tensor(wd_normalized_shape, ts),
requires_grad=wd_require_gradient)
self.tanh_weight_weight = Parameter(torch.Tensor(*normalized_shape)
)
self.tanh_weight_bias = Parameter(torch.Tensor(*normalized_shape))
self.out_wd_bias = Parameter(torch.Tensor(wd_normalized_shape,
*normalized_shape))
self.ly_wd_bias = Parameter(torch.Tensor(ts, ts))
self.wd_bias = Parameter(torch.Tensor(ts, 1))
self.bias = Parameter(torch.Tensor(wd_normalized_shape, ts),
requires_grad=wd_require_gradient)
self.tanh_bias_weight = Parameter(torch.Tensor(*normalized_shape))
self.tanh_bias_bias = Parameter(torch.Tensor(*normalized_shape))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
if self.elementwise_affine:
torch.nn.init.xavier_normal_(self.out_wd_weight)
torch.nn.init.xavier_normal_(self.ly_wd_weight)
torch.nn.init.xavier_normal_(self.out_wd_bias)
torch.nn.init.xavier_normal_(self.ly_wd_bias)
torch.nn.init.constant_(self.wd_weight, 1 / self.ts)
torch.nn.init.constant_(self.wd_bias, 1 / self.ts)
torch.nn.init.constant_(self.tanh_weight_weight, 1.0)
torch.nn.init.constant_(self.tanh_weight_bias, 1.0)
torch.nn.init.constant_(self.tanh_bias_weight, 0.0)
torch.nn.init.constant_(self.tanh_bias_bias, 0.0)
init.ones_(self.weight)
init.zeros_(self.bias)
def forward(self, input):
weight = self.weight
weight_wd = torch.transpose(weight, 0, 1)
weight_wd = torch.matmul(weight_wd, self.out_wd_weight)
weight_wd = torch.transpose(weight_wd, 0, 1)
weight_wd = torch.matmul(weight_wd, self.ly_wd_weight)
weight_wd = torch.matmul(weight_wd, self.wd_weight)
weight_wd = weight_wd.squeeze(-1)
weight_wd = self.tanh_weight_weight * torch.tanh(weight_wd
) + self.tanh_bias_weight
bias = self.bias
bias_wd = torch.transpose(bias, 0, 1)
bias_wd = torch.matmul(bias_wd, self.out_wd_bias)
bias_wd = torch.transpose(bias_wd, 0, 1)
bias_wd = torch.matmul(bias_wd, self.ly_wd_bias)
bias_wd = torch.matmul(bias_wd, self.wd_bias)
bias_wd = bias_wd.squeeze(-1)
bias_wd = self.tanh_weight_bias * torch.tanh(bias_wd
) + self.tanh_bias_bias
return F.layer_norm(input, self.normalized_shape, weight_wd,
bias_wd, self.eps)
def extra_repr(self):
return (
'{normalized_shape}, eps={eps}, elementwise_affine={elementwise_affine}'
.format(**self.__dict__))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'ts': 4, 'normalized_shape': 4, 'wd_normalized_shape': 4,
'wd_decoder_layers': 1}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import numbers
import torch.utils.data
from torch.nn import Parameter
import torch.onnx.operators
from torch.nn.parameter import Parameter
from torch.nn import init
import torch.optim
import torch.optim.lr_scheduler
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_add_mul_native_layer_norm_tanh_1(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr7 + x0, xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr8 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp7 = libdevice.tanh(tmp6)
tmp8 = tmp5 * tmp7
tmp10 = tmp8 + tmp9
tmp11 = tmp4 * tmp10
tmp14 = libdevice.tanh(tmp13)
tmp15 = tmp12 * tmp14
tmp17 = tmp15 + tmp16
tmp18 = tmp11 + tmp17
tl.store(out_ptr0 + x2, tmp18, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 1), (1, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4, 1), (1, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (1, 4), 0),
primals_2, out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (4, 4), (1, 4), 0),
primals_3, out=buf1)
buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf1, primals_4, out=buf2)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_7, (4, 4), (1, 4), 0),
primals_8, out=buf3)
del primals_8
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (4, 4), (1, 4), 0),
primals_9, out=buf4)
buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf4, primals_10, out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(64)](primals_13, buf6,
buf7, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_native_layer_norm_tanh_1[grid(256)](primals_13
, buf6, buf7, primals_5, buf2, primals_6, primals_11, buf5,
primals_12, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf6
del buf7
del primals_12
del primals_6
return (buf8, primals_3, primals_5, primals_9, primals_11, primals_13,
buf2, buf5, reinterpret_tensor(buf4, (4, 4), (1, 4), 0),
reinterpret_tensor(primals_10, (1, 4), (1, 1), 0), buf3, primals_7,
reinterpret_tensor(buf1, (4, 4), (1, 4), 0), reinterpret_tensor(
primals_4, (1, 4), (1, 1), 0), buf0, primals_1)
class WDV52LayerNormalizationNew(torch.nn.Module):
"""Applies Layer Normalization over a mini-batch of inputs as described in
the paper `Layer Normalization`_ .
.. math::
y = \\frac{x - \\mathrm{E}[x]}{ \\sqrt{\\mathrm{Var}[x] + \\epsilon}} * \\gamma + \\beta
The mean and standard-deviation are calculated separately over the last
certain number dimensions which have to be of the shape specified by
:attr:`normalized_shape`.
:math:`\\gamma` and :math:`\\beta` are learnable affine transform parameters of
:attr:`normalized_shape` if :attr:`elementwise_affine` is ``True``.
.. note::
Unlike Batch Normalization and Instance Normalization, which applies
scalar scale and bias for each entire channel/plane with the
:attr:`affine` option, Layer Normalization applies per-element scale and
bias with :attr:`elementwise_affine`.
This layer uses statistics computed from input data in both training and
evaluation modes.
Args:
normalized_shape (int or list or torch.Size): input shape from an expected input
of size
.. math::
[* \\times \\text{normalized\\_shape}[0] \\times \\text{normalized\\_shape}[1]
\\times \\ldots \\times \\text{normalized\\_shape}[-1]]
If a single integer is used, it is treated as a singleton list, and this module will
normalize over the last dimension which is expected to be of that specific size.
eps: a value added to the denominator for numerical stability. Default: 1e-5
elementwise_affine: a boolean value that when set to ``True``, this module
has learnable per-element affine parameters initialized to ones (for weights)
and zeros (for biases). Default: ``True``.
Shape:
- Input: :math:`(N, *)`
- Output: :math:`(N, *)` (same shape as input)
Examples::
>>> input = torch.randn(20, 5, 10, 10)
>>> # With Learnable Parameters
>>> m = nn.LayerNorm(input.size()[1:])
>>> # Without Learnable Parameters
>>> m = nn.LayerNorm(input.size()[1:], elementwise_affine=False)
>>> # Normalize over last two dimensions
>>> m = nn.LayerNorm([10, 10])
>>> # Normalize over last dimension of size 10
>>> m = nn.LayerNorm(10)
>>> # Activating the module
>>> output = m(input)
.. _`Layer Normalization`: https://arxiv.org/abs/1607.06450
"""
__constants__ = ['normalized_shape', 'weight', 'bias', 'eps',
'elementwise_affine']
def __init__(self, ts, normalized_shape, wd_normalized_shape,
wd_decoder_layers, wd_require_gradient=False, eps=1e-05,
elementwise_affine=True):
super(WDV52LayerNormalizationNew, self).__init__()
if isinstance(normalized_shape, numbers.Integral):
normalized_shape = normalized_shape,
self.normalized_shape = tuple(normalized_shape)
self.ts = ts
self.eps = eps
self.elementwise_affine = elementwise_affine
self.wd_decoder_layers = wd_decoder_layers
if self.elementwise_affine:
self.out_wd_weight = Parameter(torch.Tensor(wd_normalized_shape,
*normalized_shape))
self.ly_wd_weight = Parameter(torch.Tensor(ts, ts))
self.wd_weight = Parameter(torch.Tensor(ts, 1))
self.weight = Parameter(torch.Tensor(wd_normalized_shape, ts),
requires_grad=wd_require_gradient)
self.tanh_weight_weight = Parameter(torch.Tensor(*normalized_shape)
)
self.tanh_weight_bias = Parameter(torch.Tensor(*normalized_shape))
self.out_wd_bias = Parameter(torch.Tensor(wd_normalized_shape,
*normalized_shape))
self.ly_wd_bias = Parameter(torch.Tensor(ts, ts))
self.wd_bias = Parameter(torch.Tensor(ts, 1))
self.bias = Parameter(torch.Tensor(wd_normalized_shape, ts),
requires_grad=wd_require_gradient)
self.tanh_bias_weight = Parameter(torch.Tensor(*normalized_shape))
self.tanh_bias_bias = Parameter(torch.Tensor(*normalized_shape))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
if self.elementwise_affine:
torch.nn.init.xavier_normal_(self.out_wd_weight)
torch.nn.init.xavier_normal_(self.ly_wd_weight)
torch.nn.init.xavier_normal_(self.out_wd_bias)
torch.nn.init.xavier_normal_(self.ly_wd_bias)
torch.nn.init.constant_(self.wd_weight, 1 / self.ts)
torch.nn.init.constant_(self.wd_bias, 1 / self.ts)
torch.nn.init.constant_(self.tanh_weight_weight, 1.0)
torch.nn.init.constant_(self.tanh_weight_bias, 1.0)
torch.nn.init.constant_(self.tanh_bias_weight, 0.0)
torch.nn.init.constant_(self.tanh_bias_bias, 0.0)
init.ones_(self.weight)
init.zeros_(self.bias)
def extra_repr(self):
return (
'{normalized_shape}, eps={eps}, elementwise_affine={elementwise_affine}'
.format(**self.__dict__))
def forward(self, input_0):
primals_1 = self.out_wd_weight
primals_2 = self.ly_wd_weight
primals_4 = self.wd_weight
primals_3 = self.weight
primals_5 = self.tanh_weight_weight
primals_6 = self.tanh_weight_bias
primals_7 = self.out_wd_bias
primals_8 = self.ly_wd_bias
primals_10 = self.wd_bias
primals_9 = self.bias
primals_11 = self.tanh_bias_weight
primals_12 = self.tanh_bias_bias
primals_13 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
|
Lollipop321/weight-distillation
|
WDV52LayerNormalization
| false
| 5,563
|
[
"BSD-3-Clause"
] | 1
|
cfc76ec58e3e88094dde1825287b2968f9718431
|
https://github.com/Lollipop321/weight-distillation/tree/cfc76ec58e3e88094dde1825287b2968f9718431
|
UsBlockRes
|
import torch
import torch.nn as nn
def conv3x3(in_channels, out_channels, stride=1, padding=1, bias=True):
return nn.Conv3d(in_channels, out_channels, kernel_size=3, stride=
stride, padding=padding, bias=bias)
def conv1x1(in_channels, out_channels):
return nn.Conv3d(in_channels, out_channels, kernel_size=1)
def upconv2x2(in_channels, out_channels, mode='transpose'):
if mode == 'transpose':
return nn.ConvTranspose3d(in_channels, out_channels, kernel_size=2,
stride=2)
else:
return nn.Sequential(nn.Upsample(mode='trilinear', scale_factor=2),
conv1x1(in_channels, out_channels))
class UsBlockRes(nn.Module):
def __init__(self, in_channels, out_channels, up_mode='transpose'):
super(UsBlockRes, self).__init__()
self.upconv = upconv2x2(in_channels, out_channels, mode=up_mode)
self.conv = conv3x3(out_channels, out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, before_pool, x):
x = self.upconv(x)
x = x + before_pool
x = self.conv(x)
x = self.relu(x)
return x
def get_inputs():
return [torch.rand([4, 8, 8, 8]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x1 = xindex // 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x1 = xindex // 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 2, 2, 2), (32, 8, 4, 2, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 8, 8, 8), (512, 64, 8, 1))
assert_size_stride(primals_5, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1,
4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(2, 2,
2), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=True,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (1, 4, 8, 8, 8), (2048, 512, 64, 8, 1))
buf1 = reinterpret_tensor(buf0, (4, 8, 8, 8), (512, 64, 8, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_add_0[grid(2048)](buf1, primals_2, primals_4, 2048,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
del primals_4
buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (1, 4, 8,
8, 8), (0, 512, 64, 8, 1), 0), primals_5, stride=(1, 1, 1),
padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf2, (1, 4, 8, 8, 8), (2048, 512, 64, 8, 1))
buf3 = reinterpret_tensor(buf2, (4, 8, 8, 8), (512, 64, 8, 1), 0)
del buf2
buf4 = empty_strided_cuda((4, 8, 8, 8), (512, 64, 8, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(2048)](buf3,
primals_6, buf4, 2048, XBLOCK=256, num_warps=4, num_stages=1)
del primals_6
return buf3, primals_1, primals_5, reinterpret_tensor(primals_3, (1, 4,
4, 4, 4), (256, 64, 16, 4, 1), 0), reinterpret_tensor(buf1, (1, 4,
8, 8, 8), (2048, 512, 64, 8, 1), 0), buf4
def conv3x3(in_channels, out_channels, stride=1, padding=1, bias=True):
return nn.Conv3d(in_channels, out_channels, kernel_size=3, stride=
stride, padding=padding, bias=bias)
def conv1x1(in_channels, out_channels):
return nn.Conv3d(in_channels, out_channels, kernel_size=1)
def upconv2x2(in_channels, out_channels, mode='transpose'):
if mode == 'transpose':
return nn.ConvTranspose3d(in_channels, out_channels, kernel_size=2,
stride=2)
else:
return nn.Sequential(nn.Upsample(mode='trilinear', scale_factor=2),
conv1x1(in_channels, out_channels))
class UsBlockResNew(nn.Module):
def __init__(self, in_channels, out_channels, up_mode='transpose'):
super(UsBlockResNew, self).__init__()
self.upconv = upconv2x2(in_channels, out_channels, mode=up_mode)
self.conv = conv3x3(out_channels, out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, input_0, input_1):
primals_1 = self.upconv.weight
primals_2 = self.upconv.bias
primals_5 = self.conv.weight
primals_6 = self.conv.bias
primals_4 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
MATHplus-Young-Academy/P2-Cardiac-Motion
|
UsBlockRes
| false
| 5,564
|
[
"Apache-2.0"
] | 1
|
844995e8e5760f981c425d13c0bd7f2f3bb8baec
|
https://github.com/MATHplus-Young-Academy/P2-Cardiac-Motion/tree/844995e8e5760f981c425d13c0bd7f2f3bb8baec
|
ContinousRotReprDecoder
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class ContinousRotReprDecoder(nn.Module):
def __init__(self):
super(ContinousRotReprDecoder, self).__init__()
def forward(self, module_input):
reshaped_input = module_input.view(-1, 3, 2)
b1 = F.normalize(reshaped_input[:, :, 0], dim=1)
dot_prod = torch.sum(b1 * reshaped_input[:, :, 1], dim=1, keepdim=True)
b2 = F.normalize(reshaped_input[:, :, 1] - dot_prod * b1, dim=-1)
b3 = torch.cross(b1, b2, dim=1)
return torch.stack([b1, b2, b3], dim=-1)
def get_inputs():
return [torch.rand([4, 3, 2])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_mul_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 6 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (2 + 6 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (4 + 6 * x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (1 + 6 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr0 + (3 + 6 * x0), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr0 + (5 + 6 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp8 = libdevice.sqrt(tmp7)
tmp9 = 1e-12
tmp10 = triton_helpers.maximum(tmp8, tmp9)
tmp11 = tmp0 / tmp10
tmp13 = tmp11 * tmp12
tmp14 = tmp2 / tmp10
tmp16 = tmp14 * tmp15
tmp17 = tmp13 + tmp16
tmp18 = tmp5 / tmp10
tmp20 = tmp18 * tmp19
tmp21 = tmp17 + tmp20
tl.store(out_ptr0 + x0, tmp21, xmask)
@triton.jit
def triton_poi_fused_div_mul_sub_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 12
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 3
tmp0 = tl.load(in_ptr0 + (1 + 2 * x2), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + 2 * x2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + 6 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 6 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (4 + 6 * x1), xmask, eviction_policy='evict_last')
tmp4 = tmp3 * tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = libdevice.sqrt(tmp10)
tmp12 = 1e-12
tmp13 = triton_helpers.maximum(tmp11, tmp12)
tmp14 = tmp2 / tmp13
tmp15 = tmp1 * tmp14
tmp16 = tmp0 - tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_div_linalg_cross_2(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 12
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x1 = xindex // 3
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * ((1 + x0) % 3) + 6 * x1), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + 6 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 6 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (4 + 6 * x1), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (3 * x1 + (2 + x0) % 3), xmask,
eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + 3 * x1, xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr1 + (1 + 3 * x1), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr1 + (2 + 3 * x1), xmask, eviction_policy='evict_last'
)
tmp26 = tl.load(in_ptr0 + (2 * ((2 + x0) % 3) + 6 * x1), xmask,
eviction_policy='evict_last')
tmp28 = tl.load(in_ptr1 + (3 * x1 + (1 + x0) % 3), xmask)
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tmp10 = 1e-12
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp12 = tmp0 / tmp11
tmp15 = tmp14 * tmp14
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = libdevice.sqrt(tmp21)
tmp23 = triton_helpers.maximum(tmp22, tmp10)
tmp24 = tmp13 / tmp23
tmp25 = tmp12 * tmp24
tmp27 = tmp26 / tmp11
tmp29 = tmp28 / tmp23
tmp30 = tmp27 * tmp29
tl.store(out_ptr0 + x2, tmp25, xmask)
tl.store(out_ptr1 + x2, tmp30, xmask)
@triton.jit
def triton_poi_fused_stack_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 36
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x3 = xindex // 3
x2 = xindex // 9
x5 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + 2 * x3, tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + 6 * x2, tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tmp6 * tmp6
tmp8 = tl.load(in_ptr0 + (2 + 6 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = tl.load(in_ptr0 + (4 + 6 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = libdevice.sqrt(tmp13)
tmp15 = 1e-12
tmp16 = triton_helpers.maximum(tmp14, tmp15)
tmp17 = tmp5 / tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp4, tmp17, tmp18)
tmp20 = tmp0 >= tmp3
tmp21 = tl.full([1], 2, tl.int64)
tmp22 = tmp0 < tmp21
tmp23 = tmp20 & tmp22
tmp24 = tl.load(in_ptr1 + x3, tmp23 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp25 = tl.load(in_ptr1 + 3 * x2, tmp23 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp26 = tmp25 * tmp25
tmp27 = tl.load(in_ptr1 + (1 + 3 * x2), tmp23 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp28 = tmp27 * tmp27
tmp29 = tmp26 + tmp28
tmp30 = tl.load(in_ptr1 + (2 + 3 * x2), tmp23 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp31 = tmp30 * tmp30
tmp32 = tmp29 + tmp31
tmp33 = libdevice.sqrt(tmp32)
tmp34 = triton_helpers.maximum(tmp33, tmp15)
tmp35 = tmp24 / tmp34
tmp36 = tl.full(tmp35.shape, 0.0, tmp35.dtype)
tmp37 = tl.where(tmp23, tmp35, tmp36)
tmp38 = tmp0 >= tmp21
tl.full([1], 3, tl.int64)
tmp41 = tl.load(in_ptr2 + x3, tmp38 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp42 = tl.load(in_ptr3 + x3, tmp38 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp43 = tmp41 - tmp42
tmp44 = tl.full(tmp43.shape, 0.0, tmp43.dtype)
tmp45 = tl.where(tmp38, tmp43, tmp44)
tmp46 = tl.where(tmp23, tmp37, tmp45)
tmp47 = tl.where(tmp4, tmp19, tmp46)
tl.store(out_ptr0 + x5, tmp47, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 3, 2), (6, 2, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_mul_sum_0[grid(4)](arg0_1, buf0, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 3), (3, 1), torch.float32)
triton_poi_fused_div_mul_sub_sum_1[grid(12)](arg0_1, buf0, buf1, 12,
XBLOCK=16, num_warps=1, num_stages=1)
del buf0
buf2 = empty_strided_cuda((4, 3), (3, 1), torch.float32)
buf3 = empty_strided_cuda((4, 3), (3, 1), torch.float32)
triton_poi_fused_div_linalg_cross_2[grid(12)](arg0_1, buf1, buf2,
buf3, 12, XBLOCK=16, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 3, 3), (9, 3, 1), torch.float32)
triton_poi_fused_stack_3[grid(36)](arg0_1, buf1, buf2, buf3, buf4,
36, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del buf1
del buf2
del buf3
return buf4,
class ContinousRotReprDecoderNew(nn.Module):
def __init__(self):
super(ContinousRotReprDecoderNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
LuckyDC/human_body_prior
|
ContinousRotReprDecoder
| false
| 5,565
|
[
"Xnet",
"X11"
] | 1
|
6a46613b4cbd9c62d888359f1435cec501643af3
|
https://github.com/LuckyDC/human_body_prior/tree/6a46613b4cbd9c62d888359f1435cec501643af3
|
BCEWithLogitsLoss2d
|
import torch
import numpy as np
import torch.nn as nn
class BCEWithLogitsLoss2d(nn.Module):
"""Computationally stable version of 2D BCE loss
"""
def __init__(self, weight=None, reduction='elementwise_mean'):
super(BCEWithLogitsLoss2d, self).__init__()
if isinstance(weight, np.ndarray):
weight = torch.from_numpy(weight)
self.bce_loss = nn.BCEWithLogitsLoss(weight, reduction)
def forward(self, logits, targets):
logits_flat = logits.view(-1)
targets_flat = targets.view(-1)
return self.bce_loss(logits_flat, targets_flat)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_binary_cross_entropy_with_logits_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 256.0
tmp17 = tmp15 / tmp16
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_binary_cross_entropy_with_logits_0[grid(1)](buf1,
arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class BCEWithLogitsLoss2dNew(nn.Module):
"""Computationally stable version of 2D BCE loss
"""
def __init__(self, weight=None, reduction='elementwise_mean'):
super(BCEWithLogitsLoss2dNew, self).__init__()
if isinstance(weight, np.ndarray):
weight = torch.from_numpy(weight)
self.bce_loss = nn.BCEWithLogitsLoss(weight, reduction)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
MIPT-Oulu/3D-Histo-Grading
|
BCEWithLogitsLoss2d
| false
| 5,566
|
[
"MIT"
] | 1
|
b779a154d0e5b104fc152c8952124768fb7b1dc6
|
https://github.com/MIPT-Oulu/3D-Histo-Grading/tree/b779a154d0e5b104fc152c8952124768fb7b1dc6
|
ViTStemPatchify
|
from torch.nn import Module
import torch
import torch.nn as nn
import torch.utils.data
def patchify2d(w_in, w_out, k, *, bias=True):
"""Helper for building a patchify layer as used by ViT models."""
return nn.Conv2d(w_in, w_out, k, stride=k, padding=0, bias=bias)
def patchify2d_cx(cx, w_in, w_out, k, *, bias=True):
"""Accumulates complexity of patchify2d into cx = (h, w, flops, params, acts)."""
err_str = 'Only kernel sizes divisible by the input size are supported.'
assert cx['h'] % k == 0 and cx['w'] % k == 0, err_str
h, w, flops, params, acts = cx['h'], cx['w'], cx['flops'], cx['params'
], cx['acts']
h, w = h // k, w // k
flops += k * k * w_in * w_out * h * w + (w_out * h * w if bias else 0)
params += k * k * w_in * w_out + (w_out if bias else 0)
acts += w_out * h * w
return {'h': h, 'w': w, 'flops': flops, 'params': params, 'acts': acts}
class ViTStemPatchify(Module):
"""The patchify vision transformer stem as per https://arxiv.org/abs/2010.11929."""
def __init__(self, w_in, w_out, k):
super(ViTStemPatchify, self).__init__()
self.patchify = patchify2d(w_in, w_out, k, bias=True)
def forward(self, x):
return self.patchify(x)
@staticmethod
def complexity(cx, w_in, w_out, k):
return patchify2d_cx(cx, w_in, w_out, k, bias=True)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'w_in': 4, 'w_out': 4, 'k': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.nn import Module
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(4,
4), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16)](buf1, primals_2, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
return buf1, primals_1, primals_3
def patchify2d(w_in, w_out, k, *, bias=True):
"""Helper for building a patchify layer as used by ViT models."""
return nn.Conv2d(w_in, w_out, k, stride=k, padding=0, bias=bias)
def patchify2d_cx(cx, w_in, w_out, k, *, bias=True):
"""Accumulates complexity of patchify2d into cx = (h, w, flops, params, acts)."""
err_str = 'Only kernel sizes divisible by the input size are supported.'
assert cx['h'] % k == 0 and cx['w'] % k == 0, err_str
h, w, flops, params, acts = cx['h'], cx['w'], cx['flops'], cx['params'
], cx['acts']
h, w = h // k, w // k
flops += k * k * w_in * w_out * h * w + (w_out * h * w if bias else 0)
params += k * k * w_in * w_out + (w_out if bias else 0)
acts += w_out * h * w
return {'h': h, 'w': w, 'flops': flops, 'params': params, 'acts': acts}
class ViTStemPatchifyNew(Module):
"""The patchify vision transformer stem as per https://arxiv.org/abs/2010.11929."""
def __init__(self, w_in, w_out, k):
super(ViTStemPatchifyNew, self).__init__()
self.patchify = patchify2d(w_in, w_out, k, bias=True)
@staticmethod
def complexity(cx, w_in, w_out, k):
return patchify2d_cx(cx, w_in, w_out, k, bias=True)
def forward(self, input_0):
primals_1 = self.patchify.weight
primals_2 = self.patchify.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
MAC-AutoML/XCompression
|
ViTStemPatchify
| false
| 5,567
|
[
"MIT"
] | 1
|
9f76eb3ccfb3057110ecf12aa48dec00a4667a25
|
https://github.com/MAC-AutoML/XCompression/tree/9f76eb3ccfb3057110ecf12aa48dec00a4667a25
|
WDV52Linear
|
import math
import torch
import torch.nn.functional as F
import torch.utils.data
from torch.nn import Parameter
import torch.onnx.operators
from torch.nn.parameter import Parameter
from torch.nn import init
import torch.optim
import torch.optim.lr_scheduler
class WDV52Linear(torch.nn.Module):
"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`
Args:
in_features: size of each input sample
out_features: size of each output sample
bias: If set to ``False``, the layer will not learn an additive bias.
Default: ``True``
Shape:
- Input: :math:`(N, *, H_{in})` where :math:`*` means any number of
additional dimensions and :math:`H_{in} = \\text{in\\_features}`
- Output: :math:`(N, *, H_{out})` where all but the last dimension
are the same shape as the input and :math:`H_{out} = \\text{out\\_features}`.
Attributes:
weight: the learnable weights of the module of shape
:math:`(\\text{out\\_features}, \\text{in\\_features})`. The values are
initialized from :math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})`, where
:math:`k = \\frac{1}{\\text{in\\_features}}`
bias: the learnable bias of the module of shape :math:`(\\text{out\\_features})`.
If :attr:`bias` is ``True``, the values are initialized from
:math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})` where
:math:`k = \\frac{1}{\\text{in\\_features}}`
Examples::
>>> m = nn.Linear(20, 30)
>>> input = torch.randn(128, 20)
>>> output = m(input)
>>> print(output.size())
torch.Size([128, 30])
"""
__constants__ = ['bias', 'in_features', 'out_features']
def __init__(self, ts, in_features, out_features, wd_in_features,
wd_out_features, wd_decoder_layers, wd_require_gradient=False, bias
=True):
super(WDV52Linear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.ts = ts
self.wd_decoder_layers = wd_decoder_layers
self.out_wd_weight = Parameter(torch.Tensor(wd_out_features,
out_features))
self.in_wd_weight = Parameter(torch.Tensor(wd_in_features, in_features)
)
self.ly_wd_weight = Parameter(torch.Tensor(ts, ts))
self.wd_weight = Parameter(torch.Tensor(ts, 1))
self.weight = Parameter(torch.Tensor(wd_out_features,
wd_in_features, ts), requires_grad=wd_require_gradient)
self.tanh_weight_weight = Parameter(torch.Tensor(out_features,
in_features))
self.tanh_bias_weight = Parameter(torch.Tensor(out_features,
in_features))
if bias:
self.out_wd_bias = Parameter(torch.Tensor(wd_out_features,
out_features))
self.ly_wd_bias = Parameter(torch.Tensor(ts, ts))
self.wd_bias = Parameter(torch.Tensor(ts, 1))
self.bias = Parameter(torch.Tensor(wd_out_features, ts),
requires_grad=wd_require_gradient)
self.tanh_weight_bias = Parameter(torch.Tensor(out_features))
self.tanh_bias_bias = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.xavier_normal_(self.out_wd_weight)
torch.nn.init.xavier_normal_(self.in_wd_weight)
torch.nn.init.xavier_normal_(self.ly_wd_weight)
torch.nn.init.constant_(self.wd_weight, 1 / self.ts)
torch.nn.init.constant_(self.tanh_weight_weight, 1.0)
torch.nn.init.constant_(self.tanh_weight_bias, 1.0)
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
torch.nn.init.xavier_normal_(self.out_wd_bias)
torch.nn.init.xavier_normal_(self.ly_wd_bias)
torch.nn.init.constant_(self.wd_bias, 1 / self.ts)
torch.nn.init.constant_(self.tanh_bias_weight, 0.0)
torch.nn.init.constant_(self.tanh_bias_bias, 0.0)
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def forward(self, input):
weight = self.weight
weight_wd = torch.transpose(weight, 0, 2)
weight_wd = torch.matmul(weight_wd, self.out_wd_weight)
weight_wd = torch.transpose(weight_wd, 0, 2)
weight_wd = torch.matmul(weight_wd, self.ly_wd_weight)
weight_wd = torch.transpose(weight_wd, 1, 2)
weight_wd = torch.matmul(weight_wd, self.in_wd_weight)
weight_wd = torch.transpose(weight_wd, 1, 2)
weight_wd = torch.matmul(weight_wd, self.wd_weight)
weight_wd = weight_wd.squeeze(-1)
weight_wd = self.tanh_weight_weight * torch.tanh(weight_wd
) + self.tanh_bias_weight
bias = self.bias
bias_wd = torch.transpose(bias, 0, 1)
bias_wd = torch.matmul(bias_wd, self.out_wd_bias)
bias_wd = torch.transpose(bias_wd, 0, 1)
bias_wd = torch.matmul(bias_wd, self.ly_wd_bias)
bias_wd = torch.matmul(bias_wd, self.wd_bias)
bias_wd = bias_wd.squeeze(-1)
bias_wd = self.tanh_weight_bias * torch.tanh(bias_wd
) + self.tanh_bias_bias
return F.linear(input, weight_wd, bias_wd)
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(self.
in_features, self.out_features, self.bias is not None)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'ts': 4, 'in_features': 4, 'out_features': 4,
'wd_in_features': 4, 'wd_out_features': 4, 'wd_decoder_layers': 1}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.utils.data
from torch.nn import Parameter
import torch.onnx.operators
from torch.nn.parameter import Parameter
from torch.nn import init
import torch.optim
import torch.optim.lr_scheduler
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__unsafe_view_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4 * (x1 % 4) + 16 * x0 + x1 // 4), xmask,
eviction_policy='evict_last')
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused__unsafe_view_clone_1(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (4 * x1 + 16 * (y0 // 4) + y0 % 4), xmask &
ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_mul_tanh_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask)
tmp2 = libdevice.tanh(tmp1)
tmp3 = tmp0 * tmp2
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_add_mul_tanh_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask)
tmp2 = libdevice.tanh(tmp1)
tmp3 = tmp0 * tmp2
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 1), (1, 1))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4, 1), (1, 1))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__unsafe_view_clone_0[grid(64)](primals_1, buf0, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, primals_2, out=buf1)
del primals_2
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
triton_poi_fused__unsafe_view_clone_0[grid(64)](buf1, buf2, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf3 = buf1
del buf1
extern_kernels.mm(buf2, primals_3, out=buf3)
buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
triton_poi_fused__unsafe_view_clone_1[grid(16, 4)](buf3, buf4, 16,
4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf5 = buf3
del buf3
extern_kernels.mm(buf4, primals_4, out=buf5)
buf6 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
triton_poi_fused__unsafe_view_clone_1[grid(16, 4)](buf5, buf6, 16,
4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
del buf5
buf7 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.mm(buf6, primals_5, out=buf7)
buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_8, (4, 4), (1, 4), 0),
primals_9, out=buf8)
del primals_9
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf8, (4, 4), (1, 4), 0),
primals_10, out=buf9)
buf10 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf9, primals_11, out=buf10)
buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_mul_tanh_2[grid(16)](primals_6, buf7,
primals_7, buf11, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_7
buf12 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused_add_mul_tanh_3[grid(4)](primals_12, buf10,
primals_13, buf12, 4, XBLOCK=4, num_warps=1, num_stages=1)
del primals_13
buf13 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(buf12, reinterpret_tensor(primals_14, (64, 4),
(4, 1), 0), reinterpret_tensor(buf11, (4, 4), (1, 4), 0), alpha
=1, beta=1, out=buf13)
del buf11
del buf12
return reinterpret_tensor(buf13, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_6, primals_10, primals_12, buf7, buf10, reinterpret_tensor(
primals_14, (64, 4), (4, 1), 0), reinterpret_tensor(buf9, (4, 4), (
1, 4), 0), reinterpret_tensor(primals_11, (1, 4), (1, 1), 0
), buf8, primals_8, reinterpret_tensor(buf6, (4, 16), (1, 4), 0
), reinterpret_tensor(primals_5, (1, 4), (1, 1), 0
), reinterpret_tensor(buf4, (4, 16), (1, 4), 0), reinterpret_tensor(
primals_4, (4, 4), (1, 4), 0), reinterpret_tensor(buf2, (4, 16), (1,
4), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0
), reinterpret_tensor(buf0, (4, 16), (1, 4), 0)
class WDV52LinearNew(torch.nn.Module):
"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`
Args:
in_features: size of each input sample
out_features: size of each output sample
bias: If set to ``False``, the layer will not learn an additive bias.
Default: ``True``
Shape:
- Input: :math:`(N, *, H_{in})` where :math:`*` means any number of
additional dimensions and :math:`H_{in} = \\text{in\\_features}`
- Output: :math:`(N, *, H_{out})` where all but the last dimension
are the same shape as the input and :math:`H_{out} = \\text{out\\_features}`.
Attributes:
weight: the learnable weights of the module of shape
:math:`(\\text{out\\_features}, \\text{in\\_features})`. The values are
initialized from :math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})`, where
:math:`k = \\frac{1}{\\text{in\\_features}}`
bias: the learnable bias of the module of shape :math:`(\\text{out\\_features})`.
If :attr:`bias` is ``True``, the values are initialized from
:math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})` where
:math:`k = \\frac{1}{\\text{in\\_features}}`
Examples::
>>> m = nn.Linear(20, 30)
>>> input = torch.randn(128, 20)
>>> output = m(input)
>>> print(output.size())
torch.Size([128, 30])
"""
__constants__ = ['bias', 'in_features', 'out_features']
def __init__(self, ts, in_features, out_features, wd_in_features,
wd_out_features, wd_decoder_layers, wd_require_gradient=False, bias
=True):
super(WDV52LinearNew, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.ts = ts
self.wd_decoder_layers = wd_decoder_layers
self.out_wd_weight = Parameter(torch.Tensor(wd_out_features,
out_features))
self.in_wd_weight = Parameter(torch.Tensor(wd_in_features, in_features)
)
self.ly_wd_weight = Parameter(torch.Tensor(ts, ts))
self.wd_weight = Parameter(torch.Tensor(ts, 1))
self.weight = Parameter(torch.Tensor(wd_out_features,
wd_in_features, ts), requires_grad=wd_require_gradient)
self.tanh_weight_weight = Parameter(torch.Tensor(out_features,
in_features))
self.tanh_bias_weight = Parameter(torch.Tensor(out_features,
in_features))
if bias:
self.out_wd_bias = Parameter(torch.Tensor(wd_out_features,
out_features))
self.ly_wd_bias = Parameter(torch.Tensor(ts, ts))
self.wd_bias = Parameter(torch.Tensor(ts, 1))
self.bias = Parameter(torch.Tensor(wd_out_features, ts),
requires_grad=wd_require_gradient)
self.tanh_weight_bias = Parameter(torch.Tensor(out_features))
self.tanh_bias_bias = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.xavier_normal_(self.out_wd_weight)
torch.nn.init.xavier_normal_(self.in_wd_weight)
torch.nn.init.xavier_normal_(self.ly_wd_weight)
torch.nn.init.constant_(self.wd_weight, 1 / self.ts)
torch.nn.init.constant_(self.tanh_weight_weight, 1.0)
torch.nn.init.constant_(self.tanh_weight_bias, 1.0)
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
torch.nn.init.xavier_normal_(self.out_wd_bias)
torch.nn.init.xavier_normal_(self.ly_wd_bias)
torch.nn.init.constant_(self.wd_bias, 1 / self.ts)
torch.nn.init.constant_(self.tanh_bias_weight, 0.0)
torch.nn.init.constant_(self.tanh_bias_bias, 0.0)
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(self.
in_features, self.out_features, self.bias is not None)
def forward(self, input_0):
primals_2 = self.out_wd_weight
primals_3 = self.in_wd_weight
primals_4 = self.ly_wd_weight
primals_5 = self.wd_weight
primals_1 = self.weight
primals_6 = self.tanh_weight_weight
primals_7 = self.tanh_bias_weight
primals_8 = self.out_wd_bias
primals_9 = self.ly_wd_bias
primals_11 = self.wd_bias
primals_10 = self.bias
primals_12 = self.tanh_weight_bias
primals_13 = self.tanh_bias_bias
primals_14 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14])
return output[0]
|
Lollipop321/weight-distillation
|
WDV52Linear
| false
| 5,568
|
[
"BSD-3-Clause"
] | 1
|
cfc76ec58e3e88094dde1825287b2968f9718431
|
https://github.com/Lollipop321/weight-distillation/tree/cfc76ec58e3e88094dde1825287b2968f9718431
|
DsBlock
|
import torch
import torch.nn as nn
def conv3x3(in_channels, out_channels, stride=1, padding=1, bias=True):
return nn.Conv3d(in_channels, out_channels, kernel_size=3, stride=
stride, padding=padding, bias=bias)
class DsBlock(nn.Module):
def __init__(self, in_channels, out_channels, pooling):
super(DsBlock, self).__init__()
self.conv = conv3x3(in_channels, out_channels)
self.relu = nn.ReLU(inplace=True)
self.pooling = pooling
if pooling:
self.mp = nn.MaxPool3d(kernel_size=2, stride=2, padding=0)
def forward(self, x):
out = self.conv(x)
out = self.relu(out)
before_pool = out
if self.pooling:
out = self.mp(out)
return out, before_pool
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'pooling': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1,
4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1,
1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = torch.ops.aten.max_pool3d_with_indices.default(buf1, [2, 2,
2], [2, 2, 2])
buf3 = buf2[0]
buf4 = buf2[1]
del buf2
return buf3, buf1, primals_1, reinterpret_tensor(primals_3, (1, 4, 4, 4,
4), (256, 64, 16, 4, 1), 0), buf1, buf4, buf5
def conv3x3(in_channels, out_channels, stride=1, padding=1, bias=True):
return nn.Conv3d(in_channels, out_channels, kernel_size=3, stride=
stride, padding=padding, bias=bias)
class DsBlockNew(nn.Module):
def __init__(self, in_channels, out_channels, pooling):
super(DsBlockNew, self).__init__()
self.conv = conv3x3(in_channels, out_channels)
self.relu = nn.ReLU(inplace=True)
self.pooling = pooling
if pooling:
self.mp = nn.MaxPool3d(kernel_size=2, stride=2, padding=0)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0], output[1]
|
MATHplus-Young-Academy/P2-Cardiac-Motion
|
DsBlock
| false
| 5,569
|
[
"Apache-2.0"
] | 1
|
844995e8e5760f981c425d13c0bd7f2f3bb8baec
|
https://github.com/MATHplus-Young-Academy/P2-Cardiac-Motion/tree/844995e8e5760f981c425d13c0bd7f2f3bb8baec
|
BinaryDiceLoss
|
import torch
import torch.nn as nn
class BinaryDiceLoss(nn.Module):
"""SoftDice loss
"""
def __init__(self):
super(BinaryDiceLoss, self).__init__()
self.SM = nn.Sigmoid()
def forward(self, logits, labels):
num = labels.size(0)
m1 = self.SM(logits).view(num, -1)
m2 = labels.view(num, -1)
intersection = m1 * m2
score = 2.0 * (intersection.sum(1) + 1e-15) / (m1.sum(1) + m2.sum(1
) + 1e-15)
score = 1 - score.sum() / num
return score
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp10 = tl.where(xmask, tmp8, 0)
tmp11 = tl.sum(tmp10, 1)[:, None]
tmp12 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp14 = tl.where(xmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tl.store(out_ptr0 + x0, tmp7, xmask)
tl.store(out_ptr1 + x0, tmp11, xmask)
tl.store(out_ptr2 + x0, tmp15, xmask)
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_1(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp5 = tl.load(in_ptr1 + r0, None)
tmp6 = tl.load(in_ptr2 + r0, None)
tmp1 = 1e-15
tmp2 = tmp0 + tmp1
tmp3 = 2.0
tmp4 = tmp2 * tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp7 + tmp1
tmp9 = tmp4 / tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.sum(tmp10, 1)[:, None]
tmp13 = 0.25
tmp14 = tmp12 * tmp13
tmp15 = 1.0
tmp16 = tmp15 - tmp14
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp16, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
buf2 = empty_strided_cuda((4,), (1,), torch.float32)
get_raw_stream(0)
triton_per_fused_mul_sum_0[grid(4)](arg1_1, arg0_1, buf0, buf1,
buf2, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3
del buf3
triton_per_fused_add_div_mul_rsub_sum_1[grid(1)](buf4, buf0, buf1,
buf2, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf1
del buf2
return buf4,
class BinaryDiceLossNew(nn.Module):
"""SoftDice loss
"""
def __init__(self):
super(BinaryDiceLossNew, self).__init__()
self.SM = nn.Sigmoid()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
MIPT-Oulu/3D-Histo-Grading
|
BinaryDiceLoss
| false
| 5,570
|
[
"MIT"
] | 1
|
b779a154d0e5b104fc152c8952124768fb7b1dc6
|
https://github.com/MIPT-Oulu/3D-Histo-Grading/tree/b779a154d0e5b104fc152c8952124768fb7b1dc6
|
MultiHeadAttention
|
import torch
import torch.nn as nn
class MultiHeadAttention(nn.Module):
def __init__(self, hidden_size, attention_dropout_rate, num_heads):
super(MultiHeadAttention, self).__init__()
self.num_heads = num_heads
self.att_size = att_size = hidden_size // num_heads
self.scale = att_size ** -0.5
self.linear_q = nn.Linear(hidden_size, num_heads * att_size)
self.linear_k = nn.Linear(hidden_size, num_heads * att_size)
self.linear_v = nn.Linear(hidden_size, num_heads * att_size)
self.att_dropout = nn.Dropout(attention_dropout_rate)
self.output_layer = nn.Linear(num_heads * att_size, hidden_size)
def forward(self, q, k, v, attn_bias=None):
orig_q_size = q.size()
d_k = self.att_size
d_v = self.att_size
batch_size = q.size(0)
q = self.linear_q(q).view(batch_size, -1, self.num_heads, d_k)
k = self.linear_k(k).view(batch_size, -1, self.num_heads, d_k)
v = self.linear_v(v).view(batch_size, -1, self.num_heads, d_v)
q = q.transpose(1, 2)
v = v.transpose(1, 2)
k = k.transpose(1, 2).transpose(2, 3)
q = q * self.scale
x = torch.matmul(q, k)
if attn_bias is not None:
x = x + attn_bias
x = torch.softmax(x, dim=3)
x = self.att_dropout(x)
x = x.matmul(v)
x = x.transpose(1, 2).contiguous()
x = x.view(batch_size, -1, self.num_heads * d_v)
x = self.output_layer(x)
assert x.size() == orig_q_size
return x
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'hidden_size': 4, 'attention_dropout_rate': 0.5,
'num_heads': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_mul_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_mul_0[grid(16, 4)](buf0, primals_3, buf3, 16,
4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_3
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf0
triton_poi_fused_clone_1[grid(16, 4)](buf1, primals_5, buf4, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused__softmax_3[grid(256)](buf6, buf7, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf6
buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf1
triton_poi_fused_clone_1[grid(16, 4)](buf2, primals_8, buf8, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_8
buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_4[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0)
del buf9
extern_kernels.addmm(primals_11, reinterpret_tensor(buf10, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf11)
del primals_11
return reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0
), buf7, reinterpret_tensor(buf10, (16, 4), (4, 1), 0
), primals_10, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0)
class MultiHeadAttentionNew(nn.Module):
def __init__(self, hidden_size, attention_dropout_rate, num_heads):
super(MultiHeadAttentionNew, self).__init__()
self.num_heads = num_heads
self.att_size = att_size = hidden_size // num_heads
self.scale = att_size ** -0.5
self.linear_q = nn.Linear(hidden_size, num_heads * att_size)
self.linear_k = nn.Linear(hidden_size, num_heads * att_size)
self.linear_v = nn.Linear(hidden_size, num_heads * att_size)
self.att_dropout = nn.Dropout(attention_dropout_rate)
self.output_layer = nn.Linear(num_heads * att_size, hidden_size)
def forward(self, input_0, input_1, input_2):
primals_2 = self.linear_q.weight
primals_3 = self.linear_q.bias
primals_4 = self.linear_k.weight
primals_5 = self.linear_k.bias
primals_7 = self.linear_v.weight
primals_8 = self.linear_v.bias
primals_10 = self.output_layer.weight
primals_11 = self.output_layer.bias
primals_1 = input_0
primals_6 = input_1
primals_9 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
Luo-Chang/Graphormer
|
MultiHeadAttention
| false
| 5,571
|
[
"MIT"
] | 1
|
b35b3ca6369e25cdae80e1617bfc3921feeb3158
|
https://github.com/Luo-Chang/Graphormer/tree/b35b3ca6369e25cdae80e1617bfc3921feeb3158
|
Discrete
|
import torch
import torch.nn as nn
class Discrete(nn.Module):
def __init__(self):
super(Discrete, self).__init__()
def forward(self, x):
return nn.functional.softmax(x, dim=0)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf0, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf0
return buf1,
class DiscreteNew(nn.Module):
def __init__(self):
super(DiscreteNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
MPGek/client
|
Discrete
| false
| 5,572
|
[
"Apache-2.0"
] | 1
|
541d760c5cb8776b1ad5fcf1362d7382811cbc61
|
https://github.com/MPGek/client/tree/541d760c5cb8776b1ad5fcf1362d7382811cbc61
|
AttentionPool2d
|
import math
import torch
import numpy as np
import torch as th
import torch.nn as nn
def count_flops_attn(model, _x, y):
"""
A counter for the `thop` package to count the operations in an
attention operation.
Meant to be used like:
macs, params = thop.profile(
model,
inputs=(inputs, timestamps),
custom_ops={QKVAttention: QKVAttention.count_flops},
)
"""
b, c, *spatial = y[0].shape
num_spatial = int(np.prod(spatial))
matmul_ops = 2 * b * num_spatial ** 2 * c
model.total_ops += th.DoubleTensor([matmul_ops])
def conv_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D convolution module.
"""
if dims == 1:
return nn.Conv1d(*args, **kwargs)
elif dims == 2:
return nn.Conv2d(*args, **kwargs)
elif dims == 3:
return nn.Conv3d(*args, **kwargs)
raise ValueError(f'unsupported dimensions: {dims}')
class QKVAttention(nn.Module):
"""
A module which performs QKV attention and splits in a different order.
"""
def __init__(self, n_heads):
super().__init__()
self.n_heads = n_heads
def forward(self, qkv):
"""
Apply QKV attention.
:param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.
:return: an [N x (H * C) x T] tensor after attention.
"""
bs, width, length = qkv.shape
assert width % (3 * self.n_heads) == 0
ch = width // (3 * self.n_heads)
q, k, v = qkv.chunk(3, dim=1)
scale = 1 / math.sqrt(math.sqrt(ch))
weight = th.einsum('bct,bcs->bts', (q * scale).view(bs * self.
n_heads, ch, length), (k * scale).view(bs * self.n_heads, ch,
length))
weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
a = th.einsum('bts,bcs->bct', weight, v.reshape(bs * self.n_heads,
ch, length))
return a.reshape(bs, -1, length)
@staticmethod
def count_flops(model, _x, y):
return count_flops_attn(model, _x, y)
class AttentionPool2d(nn.Module):
"""
Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
"""
def __init__(self, spacial_dim: 'int', embed_dim: 'int',
num_heads_channels: 'int', output_dim: 'int'=None):
super().__init__()
self.positional_embedding = nn.Parameter(th.randn(embed_dim,
spacial_dim ** 2 + 1) / embed_dim ** 0.5)
self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
self.num_heads = embed_dim // num_heads_channels
self.attention = QKVAttention(self.num_heads)
def forward(self, x):
b, c, *_spatial = x.shape
x = x.reshape(b, c, -1)
x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1)
x = x + self.positional_embedding[None, :, :]
x = self.qkv_proj(x)
x = self.attention(x)
x = self.c_proj(x)
return x[:, :, 0]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'spacial_dim': 4, 'embed_dim': 4, 'num_heads_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import numpy as np
import torch as th
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 272
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 17
x3 = xindex // 17
x4 = xindex % 68
x5 = xindex
tmp15 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x3, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = 16.0
tmp7 = tmp5 / tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tl.full([1], 17, tl.int64)
tmp13 = tl.load(in_ptr1 + (16 * x3 + (-1 + x0)), tmp10 & xmask,
eviction_policy='evict_last', other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tmp16 = tmp14 + tmp15
tl.store(out_ptr0 + x5, tmp16, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 272
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 68
x3 = xindex % 68
x1 = xindex // 17 % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3 + 204 * x2), xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.7071067811865475
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + x4, tmp4, xmask)
@triton.jit
def triton_poi_fused_mul_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 272
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 68
x3 = xindex % 68
x1 = xindex // 17 % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (68 + x3 + 204 * x2), xmask)
tmp1 = tl.load(in_ptr1 + (4 + x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.7071067811865475
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + x4, tmp4, xmask)
@triton.jit
def triton_per_fused__softmax_4(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 68
rnumel = 17
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 17 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + 17 * x0), tmp11, rmask & xmask)
@triton.jit
def triton_poi_fused_convolution_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 816
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 17 % 12
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_6(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 17
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 68 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 17 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 272
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 17 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 17), (17, 1))
assert_size_stride(primals_3, (12, 4, 1), (4, 1, 1))
assert_size_stride(primals_4, (12,), (1,))
assert_size_stride(primals_5, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](primals_1, buf0, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 17), (68, 17, 1), torch.float32)
triton_poi_fused_add_cat_1[grid(272)](buf0, primals_1, primals_2,
buf1, 272, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_1
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf2, (4, 12, 17), (204, 17, 1))
buf3 = empty_strided_cuda((4, 4, 17), (68, 17, 1), torch.float32)
triton_poi_fused_mul_2[grid(272)](buf2, primals_4, buf3, 272,
XBLOCK=128, num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 17), (68, 17, 1), torch.float32)
triton_poi_fused_mul_3[grid(272)](buf2, primals_4, buf4, 272,
XBLOCK=256, num_warps=4, num_stages=1)
buf5 = empty_strided_cuda((4, 17, 17), (289, 17, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (4, 17, 4), (68, 1, 17),
0), buf4, out=buf5)
buf8 = empty_strided_cuda((4, 17, 17), (289, 17, 1), torch.float32)
triton_per_fused__softmax_4[grid(68)](buf5, buf8, 68, 17, XBLOCK=8,
num_warps=2, num_stages=1)
del buf5
buf9 = buf2
del buf2
triton_poi_fused_convolution_5[grid(816)](buf9, primals_4, 816,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_4
buf10 = empty_strided_cuda((4, 17, 4), (68, 4, 1), torch.float32)
extern_kernels.bmm(buf8, reinterpret_tensor(buf9, (4, 17, 4), (204,
1, 17), 136), out=buf10)
buf11 = empty_strided_cuda((4, 4, 17), (68, 17, 1), torch.float32)
triton_poi_fused_convolution_6[grid(16, 17)](buf10, buf11, 16, 17,
XBLOCK=32, YBLOCK=16, num_warps=4, num_stages=1)
buf12 = extern_kernels.convolution(buf11, primals_5, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf12, (4, 4, 17), (68, 17, 1))
del buf11
buf13 = buf12
del buf12
triton_poi_fused_convolution_7[grid(272)](buf13, primals_6, 272,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_6
return reinterpret_tensor(buf13, (4, 4), (68, 17), 0
), primals_3, primals_5, buf1, buf8, reinterpret_tensor(buf10, (4,
4, 17), (68, 1, 4), 0), reinterpret_tensor(buf9, (4, 4, 17), (204,
17, 1), 136), buf3, reinterpret_tensor(buf4, (4, 17, 4), (68, 1, 17), 0
)
def count_flops_attn(model, _x, y):
"""
A counter for the `thop` package to count the operations in an
attention operation.
Meant to be used like:
macs, params = thop.profile(
model,
inputs=(inputs, timestamps),
custom_ops={QKVAttention: QKVAttention.count_flops},
)
"""
b, c, *spatial = y[0].shape
num_spatial = int(np.prod(spatial))
matmul_ops = 2 * b * num_spatial ** 2 * c
model.total_ops += th.DoubleTensor([matmul_ops])
def conv_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D convolution module.
"""
if dims == 1:
return nn.Conv1d(*args, **kwargs)
elif dims == 2:
return nn.Conv2d(*args, **kwargs)
elif dims == 3:
return nn.Conv3d(*args, **kwargs)
raise ValueError(f'unsupported dimensions: {dims}')
class QKVAttention(nn.Module):
"""
A module which performs QKV attention and splits in a different order.
"""
def __init__(self, n_heads):
super().__init__()
self.n_heads = n_heads
def forward(self, qkv):
"""
Apply QKV attention.
:param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.
:return: an [N x (H * C) x T] tensor after attention.
"""
bs, width, length = qkv.shape
assert width % (3 * self.n_heads) == 0
ch = width // (3 * self.n_heads)
q, k, v = qkv.chunk(3, dim=1)
scale = 1 / math.sqrt(math.sqrt(ch))
weight = th.einsum('bct,bcs->bts', (q * scale).view(bs * self.
n_heads, ch, length), (k * scale).view(bs * self.n_heads, ch,
length))
weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
a = th.einsum('bts,bcs->bct', weight, v.reshape(bs * self.n_heads,
ch, length))
return a.reshape(bs, -1, length)
@staticmethod
def count_flops(model, _x, y):
return count_flops_attn(model, _x, y)
class AttentionPool2dNew(nn.Module):
"""
Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
"""
def __init__(self, spacial_dim: 'int', embed_dim: 'int',
num_heads_channels: 'int', output_dim: 'int'=None):
super().__init__()
self.positional_embedding = nn.Parameter(th.randn(embed_dim,
spacial_dim ** 2 + 1) / embed_dim ** 0.5)
self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
self.num_heads = embed_dim // num_heads_channels
self.attention = QKVAttention(self.num_heads)
def forward(self, input_0):
primals_2 = self.positional_embedding
primals_3 = self.qkv_proj.weight
primals_4 = self.qkv_proj.bias
primals_5 = self.c_proj.weight
primals_6 = self.c_proj.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
Liujingxiu23/guided-diffusion
|
AttentionPool2d
| false
| 5,573
|
[
"MIT"
] | 1
|
0ba878e517b276c45d1195eb29f6f5f72659a05b
|
https://github.com/Liujingxiu23/guided-diffusion/tree/0ba878e517b276c45d1195eb29f6f5f72659a05b
|
CrossEntropyDiceLoss
|
import torch
from typing import Union
from typing import Optional
from typing import Iterable
from torch import nn
class FScoreLoss(nn.modules.loss._WeightedLoss):
"""Uses the 1 - F-score as a loss.
.. math::
F = rac{ (1 + eta^2) TP }{ (1 + eta^2) TP + eta^2 FN + FP }
Args:
beta: The beta in the above formula.
eps: Epsilon for numerical stability.
tp_bias: This is added to the TP count. Should add a little stability for
very small structures.
make_onehot: Convert the target segmentation to onehot internally. Turn this
off if the target is already onehot.
make_onehot_newaxis: 'newaxis' argument for the onehot conversion.
ignore_index: These classes will not contribute to the loss. Has no effect if
reduction is "none".
weight: Weights for the different classes. Has no effect if reduction is "none".
reduction: "mean", "sum", or "none".
"""
def __init__(self, beta: 'float'=1.0, eps: 'float'=1e-06, tp_bias:
'Union[int, float]'=0, make_onehot: 'bool'=False,
make_onehot_newaxis: 'bool'=False, ignore_index:
'Optional[Union[int, Iterable[int]]]'=None, weight:
'Optional[torch.Tensor]'=None, reduction='mean', **kwargs):
super(FScoreLoss, self).__init__(weight=weight, reduction=reduction)
self.beta = beta
self.eps = eps
self.tp_bias = tp_bias
self.make_onehot = make_onehot
self.make_onehot_newaxis = make_onehot_newaxis
self.ignore_index = ignore_index
if isinstance(ignore_index, int):
self.ignore_index = [ignore_index]
def forward(self, input_: 'torch.Tensor', target: 'torch.Tensor'
) ->torch.Tensor:
"""Forward pass through the loss.
Args:
input_: Input with expected shape (B, C, ...) with C the number of classes.
target: Target, either onehot with shape (B, C, ...), or not with shape
either (B, ...) or (B, 1, ...). Make sure to set the make_onehot and
make_onehot_newaxis arguments appropriately.
Returns:
The loss.
"""
assert not target.requires_grad
if self.make_onehot:
target = make_onehot_segmentation(target, range(input_.shape[1]
), newaxis=self.make_onehot_newaxis)
target = target.float()
tp = input_ * target
fn = (1 - input_) * target
fp = input_ * (1 - target)
while tp.dim() > 2:
tp = tp.sum(-1, keepdim=False)
fn = fn.sum(-1, keepdim=False)
fp = fp.sum(-1, keepdim=False)
tp += self.tp_bias
result = 1 - (1 + self.beta * self.beta) * tp / ((1 + self.beta *
self.beta) * tp + self.beta * self.beta * fn + fp + self.eps)
if self.reduction != 'none':
if self.weight is not None:
self.weight = self.weight.float()
self.weight = self.weight
weight = self.weight.expand_as(result)
result = result * weight
if self.ignore_index is not None:
for cls in sorted(self.ignore_index, reverse=True):
if cls == result.shape[1] - 1:
result = result[:, :-1]
elif cls == 0:
result = result[:, 1:]
else:
result = torch.cat([result[:, :cls], result[:, cls +
1:]], 1)
if self.reduction == 'mean':
result = torch.mean(result)
elif self.reduction == 'sum':
result = torch.sum(result)
else:
raise ValueError(
"reduction must be 'none', 'mean' or 'sum', but is {}".
format(self.reduction))
return result
class DiceLoss(FScoreLoss):
"""FScoreLoss with beta=1."""
def __init__(self, **kwargs):
kwargs['beta'] = 1.0
super(DiceLoss, self).__init__(**kwargs)
def forward(self, input_, target):
return super(DiceLoss, self).forward(input_, target)
class CrossEntropyDiceLoss(DiceLoss):
"""Weighted sum of CE and Dice losses.
Expects softmax inputs!
Args:
ce_weight: The weight for the CE loss. Weight for the Dice loss will be
1 - ce_weight.
"""
def __init__(self, ce_weight: 'float'=0.5, *args, **kwargs):
super().__init__(*args, **kwargs)
self.ce_weight = ce_weight
self.ce = nn.NLLLoss(weight=kwargs.get('weight', None))
def forward(self, input_: 'torch.Tensor', target: 'torch.Tensor'
) ->torch.Tensor:
"""Forward pass through the loss.
Args:
input_: Input with expected shape (B, C, ...) with C the number of classes.
target: Target, either onehot with shape (B, C, ...), or not with shape
either (B, ...) or (B, 1, ...). Make sure to set the make_onehot and
make_onehot_newaxis arguments appropriately.
Returns:
The loss.
"""
dice = super().forward(input_, target)
if self.make_onehot:
if self.make_onehot_newaxis:
pass
else:
target = target.squeeze()
else:
target = torch.argmax(target, 1, keepdim=False)
target = target.long()
ce = self.ce(torch.log(input_ + self.eps), target)
return (1 - self.ce_weight) * dice + self.ce_weight * ce
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
from typing import Union
from typing import Optional
from typing import Iterable
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mean_mul_rsub_sum_0(in_ptr0, in_ptr1, out_ptr3,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 16 * r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 16 * r0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 16 * r0), None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 16 * r0), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 16 * r0), None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 16 * r0), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 16 * r0), None, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 16 * r0), None, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr0 + (4 + 16 * r0), None, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr1 + (4 + 16 * r0), None, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr0 + (5 + 16 * r0), None, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr1 + (5 + 16 * r0), None, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr0 + (6 + 16 * r0), None, eviction_policy='evict_last'
)
tmp23 = tl.load(in_ptr1 + (6 + 16 * r0), None, eviction_policy='evict_last'
)
tmp26 = tl.load(in_ptr0 + (7 + 16 * r0), None, eviction_policy='evict_last'
)
tmp27 = tl.load(in_ptr1 + (7 + 16 * r0), None, eviction_policy='evict_last'
)
tmp31 = tl.load(in_ptr0 + (8 + 16 * r0), None, eviction_policy='evict_last'
)
tmp32 = tl.load(in_ptr1 + (8 + 16 * r0), None, eviction_policy='evict_last'
)
tmp34 = tl.load(in_ptr0 + (9 + 16 * r0), None, eviction_policy='evict_last'
)
tmp35 = tl.load(in_ptr1 + (9 + 16 * r0), None, eviction_policy='evict_last'
)
tmp38 = tl.load(in_ptr0 + (10 + 16 * r0), None, eviction_policy=
'evict_last')
tmp39 = tl.load(in_ptr1 + (10 + 16 * r0), None, eviction_policy=
'evict_last')
tmp42 = tl.load(in_ptr0 + (11 + 16 * r0), None, eviction_policy=
'evict_last')
tmp43 = tl.load(in_ptr1 + (11 + 16 * r0), None, eviction_policy=
'evict_last')
tmp47 = tl.load(in_ptr0 + (12 + 16 * r0), None, eviction_policy=
'evict_last')
tmp48 = tl.load(in_ptr1 + (12 + 16 * r0), None, eviction_policy=
'evict_last')
tmp50 = tl.load(in_ptr0 + (13 + 16 * r0), None, eviction_policy=
'evict_last')
tmp51 = tl.load(in_ptr1 + (13 + 16 * r0), None, eviction_policy=
'evict_last')
tmp54 = tl.load(in_ptr0 + (14 + 16 * r0), None, eviction_policy=
'evict_last')
tmp55 = tl.load(in_ptr1 + (14 + 16 * r0), None, eviction_policy=
'evict_last')
tmp58 = tl.load(in_ptr0 + (15 + 16 * r0), None, eviction_policy=
'evict_last')
tmp59 = tl.load(in_ptr1 + (15 + 16 * r0), None, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp17 = tmp15 * tmp16
tmp20 = tmp18 * tmp19
tmp21 = tmp17 + tmp20
tmp24 = tmp22 * tmp23
tmp25 = tmp21 + tmp24
tmp28 = tmp26 * tmp27
tmp29 = tmp25 + tmp28
tmp30 = tmp14 + tmp29
tmp33 = tmp31 * tmp32
tmp36 = tmp34 * tmp35
tmp37 = tmp33 + tmp36
tmp40 = tmp38 * tmp39
tmp41 = tmp37 + tmp40
tmp44 = tmp42 * tmp43
tmp45 = tmp41 + tmp44
tmp46 = tmp30 + tmp45
tmp49 = tmp47 * tmp48
tmp52 = tmp50 * tmp51
tmp53 = tmp49 + tmp52
tmp56 = tmp54 * tmp55
tmp57 = tmp53 + tmp56
tmp60 = tmp58 * tmp59
tmp61 = tmp57 + tmp60
tmp62 = tmp46 + tmp61
tmp63 = 1.0
tmp64 = tmp63 - tmp0
tmp65 = tmp64 * tmp1
tmp66 = tmp63 - tmp3
tmp67 = tmp66 * tmp4
tmp68 = tmp65 + tmp67
tmp69 = tmp63 - tmp7
tmp70 = tmp69 * tmp8
tmp71 = tmp68 + tmp70
tmp72 = tmp63 - tmp11
tmp73 = tmp72 * tmp12
tmp74 = tmp71 + tmp73
tmp75 = tmp63 - tmp15
tmp76 = tmp75 * tmp16
tmp77 = tmp63 - tmp18
tmp78 = tmp77 * tmp19
tmp79 = tmp76 + tmp78
tmp80 = tmp63 - tmp22
tmp81 = tmp80 * tmp23
tmp82 = tmp79 + tmp81
tmp83 = tmp63 - tmp26
tmp84 = tmp83 * tmp27
tmp85 = tmp82 + tmp84
tmp86 = tmp74 + tmp85
tmp87 = tmp63 - tmp31
tmp88 = tmp87 * tmp32
tmp89 = tmp63 - tmp34
tmp90 = tmp89 * tmp35
tmp91 = tmp88 + tmp90
tmp92 = tmp63 - tmp38
tmp93 = tmp92 * tmp39
tmp94 = tmp91 + tmp93
tmp95 = tmp63 - tmp42
tmp96 = tmp95 * tmp43
tmp97 = tmp94 + tmp96
tmp98 = tmp86 + tmp97
tmp99 = tmp63 - tmp47
tmp100 = tmp99 * tmp48
tmp101 = tmp63 - tmp50
tmp102 = tmp101 * tmp51
tmp103 = tmp100 + tmp102
tmp104 = tmp63 - tmp54
tmp105 = tmp104 * tmp55
tmp106 = tmp103 + tmp105
tmp107 = tmp63 - tmp58
tmp108 = tmp107 * tmp59
tmp109 = tmp106 + tmp108
tmp110 = tmp98 + tmp109
tmp111 = tmp63 - tmp1
tmp112 = tmp0 * tmp111
tmp113 = tmp63 - tmp4
tmp114 = tmp3 * tmp113
tmp115 = tmp112 + tmp114
tmp116 = tmp63 - tmp8
tmp117 = tmp7 * tmp116
tmp118 = tmp115 + tmp117
tmp119 = tmp63 - tmp12
tmp120 = tmp11 * tmp119
tmp121 = tmp118 + tmp120
tmp122 = tmp63 - tmp16
tmp123 = tmp15 * tmp122
tmp124 = tmp63 - tmp19
tmp125 = tmp18 * tmp124
tmp126 = tmp123 + tmp125
tmp127 = tmp63 - tmp23
tmp128 = tmp22 * tmp127
tmp129 = tmp126 + tmp128
tmp130 = tmp63 - tmp27
tmp131 = tmp26 * tmp130
tmp132 = tmp129 + tmp131
tmp133 = tmp121 + tmp132
tmp134 = tmp63 - tmp32
tmp135 = tmp31 * tmp134
tmp136 = tmp63 - tmp35
tmp137 = tmp34 * tmp136
tmp138 = tmp135 + tmp137
tmp139 = tmp63 - tmp39
tmp140 = tmp38 * tmp139
tmp141 = tmp138 + tmp140
tmp142 = tmp63 - tmp43
tmp143 = tmp42 * tmp142
tmp144 = tmp141 + tmp143
tmp145 = tmp133 + tmp144
tmp146 = tmp63 - tmp48
tmp147 = tmp47 * tmp146
tmp148 = tmp63 - tmp51
tmp149 = tmp50 * tmp148
tmp150 = tmp147 + tmp149
tmp151 = tmp63 - tmp55
tmp152 = tmp54 * tmp151
tmp153 = tmp150 + tmp152
tmp154 = tmp63 - tmp59
tmp155 = tmp58 * tmp154
tmp156 = tmp153 + tmp155
tmp157 = tmp145 + tmp156
tmp158 = 0.0
tmp159 = tmp62 + tmp158
tmp160 = 2.0
tmp161 = tmp159 * tmp160
tmp162 = tmp110 * tmp63
tmp163 = tmp161 + tmp162
tmp164 = tmp163 + tmp157
tmp165 = 1e-06
tmp166 = tmp164 + tmp165
tmp167 = tmp161 / tmp166
tmp168 = tmp63 - tmp167
tmp169 = tl.broadcast_to(tmp168, [XBLOCK, RBLOCK])
tmp171 = tl.sum(tmp169, 1)[:, None]
tl.store(out_ptr3 + tl.full([XBLOCK, 1], 0, tl.int32), tmp171, None)
@triton.jit
def triton_per_fused_add_argmax_div_mean_mul_nll_loss2d_forward_rsub_1(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp1 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp17 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp32 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp69 = tl.load(in_out_ptr0 + 0)
tmp70 = tl.broadcast_to(tmp69, [XBLOCK, 1])
tmp2 = tmp0 > tmp1
tmp3 = tmp0 == tmp1
tmp4 = tmp0 != tmp0
tmp5 = tmp1 != tmp1
tmp6 = tmp4 > tmp5
tmp7 = tmp2 | tmp6
tmp8 = tmp4 & tmp5
tmp9 = tmp3 | tmp8
tmp10 = tl.full([1, 1], 0, tl.int64)
tmp11 = tl.full([1, 1], 1, tl.int64)
tmp12 = tmp10 < tmp11
tmp13 = tmp9 & tmp12
tmp14 = tmp7 | tmp13
tmp15 = tl.where(tmp14, tmp0, tmp1)
tmp16 = tl.where(tmp14, tmp10, tmp11)
tmp18 = tmp15 > tmp17
tmp19 = tmp15 == tmp17
tmp20 = tmp15 != tmp15
tmp21 = tmp17 != tmp17
tmp22 = tmp20 > tmp21
tmp23 = tmp18 | tmp22
tmp24 = tmp20 & tmp21
tmp25 = tmp19 | tmp24
tmp26 = tl.full([1, 1], 2, tl.int64)
tmp27 = tmp16 < tmp26
tmp28 = tmp25 & tmp27
tmp29 = tmp23 | tmp28
tmp30 = tl.where(tmp29, tmp15, tmp17)
tmp31 = tl.where(tmp29, tmp16, tmp26)
tmp33 = tmp30 > tmp32
tmp34 = tmp30 == tmp32
tmp35 = tmp30 != tmp30
tmp36 = tmp32 != tmp32
tmp37 = tmp35 > tmp36
tmp38 = tmp33 | tmp37
tmp39 = tmp35 & tmp36
tmp40 = tmp34 | tmp39
tmp41 = tl.full([1, 1], 3, tl.int64)
tmp42 = tmp31 < tmp41
tmp43 = tmp40 & tmp42
tmp44 = tmp38 | tmp43
tl.where(tmp44, tmp30, tmp32)
tmp46 = tl.where(tmp44, tmp31, tmp41)
tmp47 = tl.full([1, 1], -100, tl.int64)
tmp48 = tmp46 != tmp47
tmp49 = tl.where(tmp48, tmp46, tmp10)
tmp50 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp51 = tmp49 + tmp50
tmp52 = tmp49 < 0
tmp53 = tl.where(tmp52, tmp51, tmp49)
tl.device_assert((0 <= tmp53) & (tmp53 < 4),
'index out of bounds: 0 <= tmp53 < 4')
tmp55 = tl.load(in_ptr1 + (r0 + 16 * tmp53 + 64 * r1), None)
tmp56 = 1e-06
tmp57 = tmp55 + tmp56
tmp58 = tl_math.log(tmp57)
tmp59 = -tmp58
tmp60 = 0.0
tmp61 = tl.where(tmp48, tmp59, tmp60)
tmp62 = tl.broadcast_to(tmp61, [XBLOCK, RBLOCK])
tmp64 = tl.sum(tmp62, 1)[:, None]
tmp65 = tmp48.to(tl.int64)
tmp66 = tl.broadcast_to(tmp65, [XBLOCK, RBLOCK])
tmp68 = tl.sum(tmp66, 1)[:, None]
tmp71 = 16.0
tmp72 = tmp70 / tmp71
tmp73 = 0.5
tmp74 = tmp72 * tmp73
tmp75 = tmp68.to(tl.float32)
tmp76 = tmp64 / tmp75
tmp77 = tmp76 * tmp73
tmp78 = tmp74 + tmp77
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp78, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf3 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_add_div_mean_mul_rsub_sum_0[grid(1)](arg1_1,
arg0_1, buf3, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
buf7 = buf3
del buf3
triton_per_fused_add_argmax_div_mean_mul_nll_loss2d_forward_rsub_1[grid
(1)](buf7, arg0_1, arg1_1, 1, 64, XBLOCK=1, num_warps=2,
num_stages=1)
del arg0_1
del arg1_1
return buf7,
class FScoreLoss(nn.modules.loss._WeightedLoss):
"""Uses the 1 - F-score as a loss.
.. math::
F = rac{ (1 + eta^2) TP }{ (1 + eta^2) TP + eta^2 FN + FP }
Args:
beta: The beta in the above formula.
eps: Epsilon for numerical stability.
tp_bias: This is added to the TP count. Should add a little stability for
very small structures.
make_onehot: Convert the target segmentation to onehot internally. Turn this
off if the target is already onehot.
make_onehot_newaxis: 'newaxis' argument for the onehot conversion.
ignore_index: These classes will not contribute to the loss. Has no effect if
reduction is "none".
weight: Weights for the different classes. Has no effect if reduction is "none".
reduction: "mean", "sum", or "none".
"""
def __init__(self, beta: 'float'=1.0, eps: 'float'=1e-06, tp_bias:
'Union[int, float]'=0, make_onehot: 'bool'=False,
make_onehot_newaxis: 'bool'=False, ignore_index:
'Optional[Union[int, Iterable[int]]]'=None, weight:
'Optional[torch.Tensor]'=None, reduction='mean', **kwargs):
super(FScoreLoss, self).__init__(weight=weight, reduction=reduction)
self.beta = beta
self.eps = eps
self.tp_bias = tp_bias
self.make_onehot = make_onehot
self.make_onehot_newaxis = make_onehot_newaxis
self.ignore_index = ignore_index
if isinstance(ignore_index, int):
self.ignore_index = [ignore_index]
def forward(self, input_: 'torch.Tensor', target: 'torch.Tensor'
) ->torch.Tensor:
"""Forward pass through the loss.
Args:
input_: Input with expected shape (B, C, ...) with C the number of classes.
target: Target, either onehot with shape (B, C, ...), or not with shape
either (B, ...) or (B, 1, ...). Make sure to set the make_onehot and
make_onehot_newaxis arguments appropriately.
Returns:
The loss.
"""
assert not target.requires_grad
if self.make_onehot:
target = make_onehot_segmentation(target, range(input_.shape[1]
), newaxis=self.make_onehot_newaxis)
target = target.float()
tp = input_ * target
fn = (1 - input_) * target
fp = input_ * (1 - target)
while tp.dim() > 2:
tp = tp.sum(-1, keepdim=False)
fn = fn.sum(-1, keepdim=False)
fp = fp.sum(-1, keepdim=False)
tp += self.tp_bias
result = 1 - (1 + self.beta * self.beta) * tp / ((1 + self.beta *
self.beta) * tp + self.beta * self.beta * fn + fp + self.eps)
if self.reduction != 'none':
if self.weight is not None:
self.weight = self.weight.float()
self.weight = self.weight
weight = self.weight.expand_as(result)
result = result * weight
if self.ignore_index is not None:
for cls in sorted(self.ignore_index, reverse=True):
if cls == result.shape[1] - 1:
result = result[:, :-1]
elif cls == 0:
result = result[:, 1:]
else:
result = torch.cat([result[:, :cls], result[:, cls +
1:]], 1)
if self.reduction == 'mean':
result = torch.mean(result)
elif self.reduction == 'sum':
result = torch.sum(result)
else:
raise ValueError(
"reduction must be 'none', 'mean' or 'sum', but is {}".
format(self.reduction))
return result
class DiceLoss(FScoreLoss):
"""FScoreLoss with beta=1."""
def __init__(self, **kwargs):
kwargs['beta'] = 1.0
super(DiceLoss, self).__init__(**kwargs)
def forward(self, input_, target):
return super(DiceLoss, self).forward(input_, target)
class CrossEntropyDiceLossNew(DiceLoss):
"""Weighted sum of CE and Dice losses.
Expects softmax inputs!
Args:
ce_weight: The weight for the CE loss. Weight for the Dice loss will be
1 - ce_weight.
"""
def __init__(self, ce_weight: 'float'=0.5, *args, **kwargs):
super().__init__(*args, **kwargs)
self.ce_weight = ce_weight
self.ce = nn.NLLLoss(weight=kwargs.get('weight', None))
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
MIC-DKFZ/image-time-series
|
CrossEntropyDiceLoss
| false
| 5,574
|
[
"MIT"
] | 1
|
0480d5cb6936c7d9e839b6741f18c10893d78d8a
|
https://github.com/MIC-DKFZ/image-time-series/tree/0480d5cb6936c7d9e839b6741f18c10893d78d8a
|
AvgPoolPad
|
import torch
from torch import nn
class AvgPoolPad(nn.Module):
def __init__(self, stride=2, padding=1):
super(AvgPoolPad, self).__init__()
self.pad = nn.ZeroPad2d((1, 0, 1, 0))
self.pool = nn.AvgPool2d(3, stride=stride, padding=padding,
count_include_pad=False)
def forward(self, x):
x = self.pad(x)
x = self.pool(x)
x = x[:, :, 1:, 1:]
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_avg_pool2d_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 3 % 3
x0 = xindex % 3
x2 = xindex // 9
x4 = xindex
tmp0 = -1 + 2 * x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 5, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + 2 * x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = -2 + 2 * x1
tmp12 = tmp11 >= tmp1
tmp13 = -2 + 2 * x0
tmp14 = tmp13 >= tmp1
tmp15 = tmp12 & tmp14
tmp16 = tmp15 & tmp10
tmp17 = tl.load(in_ptr0 + (-10 + 2 * x0 + 8 * x1 + 16 * x2), tmp16 &
xmask, eviction_policy='evict_last', other=0.0)
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp10, tmp17, tmp18)
tmp20 = 2 * x0
tmp21 = tmp20 >= tmp1
tmp22 = tmp20 < tmp3
tmp23 = tmp21 & tmp22
tmp24 = tmp5 & tmp23
tmp25 = tmp12 & tmp7
tmp26 = tmp25 & tmp24
tmp27 = tl.load(in_ptr0 + (-9 + 2 * x0 + 8 * x1 + 16 * x2), tmp26 &
xmask, eviction_policy='evict_last', other=0.0)
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp24, tmp27, tmp28)
tmp30 = tmp29 + tmp19
tmp31 = 1 + 2 * x0
tmp32 = tmp31 >= tmp1
tmp33 = tmp31 < tmp3
tmp34 = tmp32 & tmp33
tmp35 = tmp5 & tmp34
tmp36 = tmp12 & tmp21
tmp37 = tmp36 & tmp35
tmp38 = tl.load(in_ptr0 + (-8 + 2 * x0 + 8 * x1 + 16 * x2), tmp37 &
xmask, eviction_policy='evict_last', other=0.0)
tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype)
tmp40 = tl.where(tmp35, tmp38, tmp39)
tmp41 = tmp40 + tmp30
tmp42 = 2 * x1
tmp43 = tmp42 >= tmp1
tmp44 = tmp42 < tmp3
tmp45 = tmp43 & tmp44
tmp46 = tmp45 & tmp9
tmp47 = tmp2 & tmp14
tmp48 = tmp47 & tmp46
tmp49 = tl.load(in_ptr0 + (-6 + 2 * x0 + 8 * x1 + 16 * x2), tmp48 &
xmask, eviction_policy='evict_last', other=0.0)
tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype)
tmp51 = tl.where(tmp46, tmp49, tmp50)
tmp52 = tmp51 + tmp41
tmp53 = tmp45 & tmp23
tmp54 = tmp2 & tmp7
tmp55 = tmp54 & tmp53
tmp56 = tl.load(in_ptr0 + (-5 + 2 * x0 + 8 * x1 + 16 * x2), tmp55 &
xmask, eviction_policy='evict_last', other=0.0)
tmp57 = tl.full(tmp56.shape, 0.0, tmp56.dtype)
tmp58 = tl.where(tmp53, tmp56, tmp57)
tmp59 = tmp58 + tmp52
tmp60 = tmp45 & tmp34
tmp61 = tmp2 & tmp21
tmp62 = tmp61 & tmp60
tmp63 = tl.load(in_ptr0 + (-4 + 2 * x0 + 8 * x1 + 16 * x2), tmp62 &
xmask, eviction_policy='evict_last', other=0.0)
tmp64 = tl.full(tmp63.shape, 0.0, tmp63.dtype)
tmp65 = tl.where(tmp60, tmp63, tmp64)
tmp66 = tmp65 + tmp59
tmp67 = 1 + 2 * x1
tmp68 = tmp67 >= tmp1
tmp69 = tmp67 < tmp3
tmp70 = tmp68 & tmp69
tmp71 = tmp70 & tmp9
tmp72 = tmp43 & tmp14
tmp73 = tmp72 & tmp71
tmp74 = tl.load(in_ptr0 + (-2 + 2 * x0 + 8 * x1 + 16 * x2), tmp73 &
xmask, eviction_policy='evict_last', other=0.0)
tmp75 = tl.full(tmp74.shape, 0.0, tmp74.dtype)
tmp76 = tl.where(tmp71, tmp74, tmp75)
tmp77 = tmp76 + tmp66
tmp78 = tmp70 & tmp23
tmp79 = tmp43 & tmp7
tmp80 = tmp79 & tmp78
tmp81 = tl.load(in_ptr0 + (-1 + 2 * x0 + 8 * x1 + 16 * x2), tmp80 &
xmask, eviction_policy='evict_last', other=0.0)
tmp82 = tl.full(tmp81.shape, 0.0, tmp81.dtype)
tmp83 = tl.where(tmp78, tmp81, tmp82)
tmp84 = tmp83 + tmp77
tmp85 = tmp70 & tmp34
tmp86 = tmp43 & tmp21
tmp87 = tmp86 & tmp85
tmp88 = tl.load(in_ptr0 + (2 * x0 + 8 * x1 + 16 * x2), tmp87 & xmask,
eviction_policy='evict_last', other=0.0)
tmp89 = tl.full(tmp88.shape, 0.0, tmp88.dtype)
tmp90 = tl.where(tmp85, tmp88, tmp89)
tmp91 = tmp90 + tmp84
tmp92 = (0 * (0 >= -1 + 2 * x0) + (-1 + 2 * x0) * (-1 + 2 * x0 > 0)) * (
0 * (0 >= -1 + 2 * x1) + (-1 + 2 * x1) * (-1 + 2 * x1 > 0)) + (5 *
(5 <= 2 + 2 * x0) + (2 + 2 * x0) * (2 + 2 * x0 < 5)) * (5 * (5 <= 2 +
2 * x1) + (2 + 2 * x1) * (2 + 2 * x1 < 5)) + -1 * (0 * (0 >= -1 + 2 *
x0) + (-1 + 2 * x0) * (-1 + 2 * x0 > 0)) * (5 * (5 <= 2 + 2 * x1) +
(2 + 2 * x1) * (2 + 2 * x1 < 5)) + -1 * (0 * (0 >= -1 + 2 * x1) + (
-1 + 2 * x1) * (-1 + 2 * x1 > 0)) * (5 * (5 <= 2 + 2 * x0) + (2 + 2 *
x0) * (2 + 2 * x0 < 5))
tmp93 = tmp91 / tmp92
tl.store(out_ptr0 + x4, tmp93, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_avg_pool2d_constant_pad_nd_0[grid(144)](arg0_1,
buf0, 144, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 4, 2, 2), (36, 9, 3, 1), 4),
class AvgPoolPadNew(nn.Module):
def __init__(self, stride=2, padding=1):
super(AvgPoolPadNew, self).__init__()
self.pad = nn.ZeroPad2d((1, 0, 1, 0))
self.pool = nn.AvgPool2d(3, stride=stride, padding=padding,
count_include_pad=False)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
MarioProjects/pytorchlib
|
AvgPoolPad
| false
| 5,575
|
[
"MIT"
] | 1
|
81ea32304d899fbd10ae1efe1d124c0d7bc96f5c
|
https://github.com/MarioProjects/pytorchlib/tree/81ea32304d899fbd10ae1efe1d124c0d7bc96f5c
|
MaxPoolPad
|
import torch
from torch import nn
class MaxPoolPad(nn.Module):
def __init__(self):
super(MaxPoolPad, self).__init__()
self.pad = nn.ZeroPad2d((1, 0, 1, 0))
self.pool = nn.MaxPool2d(3, stride=2, padding=1)
def forward(self, x):
x = self.pad(x)
x = self.pool(x)
x = x[:, :, 1:, 1:]
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_0(in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 3 % 3
x0 = xindex % 3
x2 = xindex // 9
x4 = xindex
tmp0 = -1 + 2 * x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 5, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + 2 * x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = -2 + 2 * x1
tmp12 = tmp11 >= tmp1
tmp13 = -2 + 2 * x0
tmp14 = tmp13 >= tmp1
tmp15 = tmp12 & tmp14
tmp16 = tmp15 & tmp10
tmp17 = tl.load(in_ptr0 + (-10 + 2 * x0 + 8 * x1 + 16 * x2), tmp16 &
xmask, eviction_policy='evict_last', other=0.0)
tmp18 = tl.full(tmp17.shape, float('-inf'), tmp17.dtype)
tmp19 = tl.where(tmp10, tmp17, tmp18)
tmp20 = 2 * x0
tmp21 = tmp20 >= tmp1
tmp22 = tmp20 < tmp3
tmp23 = tmp21 & tmp22
tmp24 = tmp5 & tmp23
tmp25 = tmp12 & tmp7
tmp26 = tmp25 & tmp24
tmp27 = tl.load(in_ptr0 + (-9 + 2 * x0 + 8 * x1 + 16 * x2), tmp26 &
xmask, eviction_policy='evict_last', other=0.0)
tmp28 = tl.full(tmp27.shape, float('-inf'), tmp27.dtype)
tmp29 = tl.where(tmp24, tmp27, tmp28)
tmp30 = triton_helpers.maximum(tmp29, tmp19)
tmp31 = 1 + 2 * x0
tmp32 = tmp31 >= tmp1
tmp33 = tmp31 < tmp3
tmp34 = tmp32 & tmp33
tmp35 = tmp5 & tmp34
tmp36 = tmp12 & tmp21
tmp37 = tmp36 & tmp35
tmp38 = tl.load(in_ptr0 + (-8 + 2 * x0 + 8 * x1 + 16 * x2), tmp37 &
xmask, eviction_policy='evict_last', other=0.0)
tmp39 = tl.full(tmp38.shape, float('-inf'), tmp38.dtype)
tmp40 = tl.where(tmp35, tmp38, tmp39)
tmp41 = triton_helpers.maximum(tmp40, tmp30)
tmp42 = 2 * x1
tmp43 = tmp42 >= tmp1
tmp44 = tmp42 < tmp3
tmp45 = tmp43 & tmp44
tmp46 = tmp45 & tmp9
tmp47 = tmp2 & tmp14
tmp48 = tmp47 & tmp46
tmp49 = tl.load(in_ptr0 + (-6 + 2 * x0 + 8 * x1 + 16 * x2), tmp48 &
xmask, eviction_policy='evict_last', other=0.0)
tmp50 = tl.full(tmp49.shape, float('-inf'), tmp49.dtype)
tmp51 = tl.where(tmp46, tmp49, tmp50)
tmp52 = triton_helpers.maximum(tmp51, tmp41)
tmp53 = tmp45 & tmp23
tmp54 = tmp2 & tmp7
tmp55 = tmp54 & tmp53
tmp56 = tl.load(in_ptr0 + (-5 + 2 * x0 + 8 * x1 + 16 * x2), tmp55 &
xmask, eviction_policy='evict_last', other=0.0)
tmp57 = tl.full(tmp56.shape, float('-inf'), tmp56.dtype)
tmp58 = tl.where(tmp53, tmp56, tmp57)
tmp59 = triton_helpers.maximum(tmp58, tmp52)
tmp60 = tmp45 & tmp34
tmp61 = tmp2 & tmp21
tmp62 = tmp61 & tmp60
tmp63 = tl.load(in_ptr0 + (-4 + 2 * x0 + 8 * x1 + 16 * x2), tmp62 &
xmask, eviction_policy='evict_last', other=0.0)
tmp64 = tl.full(tmp63.shape, float('-inf'), tmp63.dtype)
tmp65 = tl.where(tmp60, tmp63, tmp64)
tmp66 = triton_helpers.maximum(tmp65, tmp59)
tmp67 = 1 + 2 * x1
tmp68 = tmp67 >= tmp1
tmp69 = tmp67 < tmp3
tmp70 = tmp68 & tmp69
tmp71 = tmp70 & tmp9
tmp72 = tmp43 & tmp14
tmp73 = tmp72 & tmp71
tmp74 = tl.load(in_ptr0 + (-2 + 2 * x0 + 8 * x1 + 16 * x2), tmp73 &
xmask, eviction_policy='evict_last', other=0.0)
tmp75 = tl.full(tmp74.shape, float('-inf'), tmp74.dtype)
tmp76 = tl.where(tmp71, tmp74, tmp75)
tmp77 = triton_helpers.maximum(tmp76, tmp66)
tmp78 = tmp70 & tmp23
tmp79 = tmp43 & tmp7
tmp80 = tmp79 & tmp78
tmp81 = tl.load(in_ptr0 + (-1 + 2 * x0 + 8 * x1 + 16 * x2), tmp80 &
xmask, eviction_policy='evict_last', other=0.0)
tmp82 = tl.full(tmp81.shape, float('-inf'), tmp81.dtype)
tmp83 = tl.where(tmp78, tmp81, tmp82)
tmp84 = triton_helpers.maximum(tmp83, tmp77)
tmp85 = tmp70 & tmp34
tmp86 = tmp43 & tmp21
tmp87 = tmp86 & tmp85
tmp88 = tl.load(in_ptr0 + (2 * x0 + 8 * x1 + 16 * x2), tmp87 & xmask,
eviction_policy='evict_last', other=0.0)
tmp89 = tl.full(tmp88.shape, float('-inf'), tmp88.dtype)
tmp90 = tl.where(tmp85, tmp88, tmp89)
tmp91 = triton_helpers.maximum(tmp90, tmp84)
tl.store(out_ptr0 + x4, tmp91, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_0[grid(144)](
arg0_1, buf0, 144, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 4, 2, 2), (36, 9, 3, 1), 4),
class MaxPoolPadNew(nn.Module):
def __init__(self):
super(MaxPoolPadNew, self).__init__()
self.pad = nn.ZeroPad2d((1, 0, 1, 0))
self.pool = nn.MaxPool2d(3, stride=2, padding=1)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
MarioProjects/pytorchlib
|
MaxPoolPad
| false
| 5,576
|
[
"MIT"
] | 1
|
81ea32304d899fbd10ae1efe1d124c0d7bc96f5c
|
https://github.com/MarioProjects/pytorchlib/tree/81ea32304d899fbd10ae1efe1d124c0d7bc96f5c
|
Conv2d
|
import torch
import torch.nn as nn
from math import sqrt as sqrt
from itertools import product as product
class Conv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, NL
='relu', same_padding=False, bn=False):
super(Conv2d, self).__init__()
padding = int((kernel_size - 1) / 2) if same_padding else 0
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding=padding)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0,
affine=True) if bn else None
if NL == 'relu':
self.relu = nn.ReLU(inplace=True)
elif NL == 'prelu':
self.relu = nn.PReLU()
else:
self.relu = None
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from math import sqrt as sqrt
from itertools import product as product
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_convolution_relu_threshold_backward_0[grid(16)](buf1,
primals_2, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
return buf1, primals_1, primals_3, buf2
class Conv2dNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, NL
='relu', same_padding=False, bn=False):
super(Conv2dNew, self).__init__()
padding = int((kernel_size - 1) / 2) if same_padding else 0
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding=padding)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0,
affine=True) if bn else None
if NL == 'relu':
self.relu = nn.ReLU(inplace=True)
elif NL == 'prelu':
self.relu = nn.PReLU()
else:
self.relu = None
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
MONICA-Project/sfn
|
Conv2d
| false
| 5,577
|
[
"Apache-2.0"
] | 1
|
40509e520e83441068b5a2d151864fe3a5814d5e
|
https://github.com/MONICA-Project/sfn/tree/40509e520e83441068b5a2d151864fe3a5814d5e
|
Policy
|
import torch
from copy import deepcopy
import torch.nn as nn
class Policy(nn.Module):
def __init__(self, max_nodes, search_space):
super(Policy, self).__init__()
self.max_nodes = max_nodes
self.search_space = deepcopy(search_space)
self.edge2index = {}
for i in range(1, max_nodes):
for j in range(i):
node_str = '{:}<-{:}'.format(i, j)
self.edge2index[node_str] = len(self.edge2index)
self.arch_parameters = nn.Parameter(0.001 * torch.randn(len(self.
edge2index), len(search_space)))
def generate_arch(self, actions):
genotypes = []
for i in range(1, self.max_nodes):
xlist = []
for j in range(i):
node_str = '{:}<-{:}'.format(i, j)
op_name = self.search_space[actions[self.edge2index[node_str]]]
xlist.append((op_name, j))
genotypes.append(tuple(xlist))
return CellStructure(genotypes)
def genotype(self):
genotypes = []
for i in range(1, self.max_nodes):
xlist = []
for j in range(i):
node_str = '{:}<-{:}'.format(i, j)
with torch.no_grad():
weights = self.arch_parameters[self.edge2index[node_str]]
op_name = self.search_space[weights.argmax().item()]
xlist.append((op_name, j))
genotypes.append(tuple(xlist))
return CellStructure(genotypes)
def forward(self):
alphas = nn.functional.softmax(self.arch_parameters, dim=-1)
return alphas
def get_inputs():
return []
def get_init_inputs():
return [[], {'max_nodes': 4, 'search_space': [4, 4]}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from copy import deepcopy
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 12
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 2
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 2 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 2 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp1 - tmp3
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp2 - tmp3
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tmp5 / tmp10
tl.store(out_ptr0 + x2, tmp11, xmask)
def call(args):
primals_1, = args
args.clear()
assert_size_stride(primals_1, (6, 2), (2, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((6, 2), (2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(12)](primals_1, buf0, 12, XBLOCK=
16, num_warps=1, num_stages=1)
del primals_1
return buf0, buf0
class PolicyNew(nn.Module):
def __init__(self, max_nodes, search_space):
super(PolicyNew, self).__init__()
self.max_nodes = max_nodes
self.search_space = deepcopy(search_space)
self.edge2index = {}
for i in range(1, max_nodes):
for j in range(i):
node_str = '{:}<-{:}'.format(i, j)
self.edge2index[node_str] = len(self.edge2index)
self.arch_parameters = nn.Parameter(0.001 * torch.randn(len(self.
edge2index), len(search_space)))
def generate_arch(self, actions):
genotypes = []
for i in range(1, self.max_nodes):
xlist = []
for j in range(i):
node_str = '{:}<-{:}'.format(i, j)
op_name = self.search_space[actions[self.edge2index[node_str]]]
xlist.append((op_name, j))
genotypes.append(tuple(xlist))
return CellStructure(genotypes)
def genotype(self):
genotypes = []
for i in range(1, self.max_nodes):
xlist = []
for j in range(i):
node_str = '{:}<-{:}'.format(i, j)
with torch.no_grad():
weights = self.arch_parameters[self.edge2index[node_str]]
op_name = self.search_space[weights.argmax().item()]
xlist.append((op_name, j))
genotypes.append(tuple(xlist))
return CellStructure(genotypes)
def forward(self):
primals_1 = self.arch_parameters
output = call([primals_1])
return output[0]
|
MUST-AI-Lab/NAS-Projects
|
Policy
| false
| 5,578
|
[
"MIT"
] | 1
|
fcb2aae34a2b3c02877fbdb41cda45e1e73327a6
|
https://github.com/MUST-AI-Lab/NAS-Projects/tree/fcb2aae34a2b3c02877fbdb41cda45e1e73327a6
|
FScoreLoss
|
import torch
from typing import Union
from typing import Optional
from typing import Iterable
from torch import nn
class FScoreLoss(nn.modules.loss._WeightedLoss):
"""Uses the 1 - F-score as a loss.
.. math::
F = rac{ (1 + eta^2) TP }{ (1 + eta^2) TP + eta^2 FN + FP }
Args:
beta: The beta in the above formula.
eps: Epsilon for numerical stability.
tp_bias: This is added to the TP count. Should add a little stability for
very small structures.
make_onehot: Convert the target segmentation to onehot internally. Turn this
off if the target is already onehot.
make_onehot_newaxis: 'newaxis' argument for the onehot conversion.
ignore_index: These classes will not contribute to the loss. Has no effect if
reduction is "none".
weight: Weights for the different classes. Has no effect if reduction is "none".
reduction: "mean", "sum", or "none".
"""
def __init__(self, beta: 'float'=1.0, eps: 'float'=1e-06, tp_bias:
'Union[int, float]'=0, make_onehot: 'bool'=False,
make_onehot_newaxis: 'bool'=False, ignore_index:
'Optional[Union[int, Iterable[int]]]'=None, weight:
'Optional[torch.Tensor]'=None, reduction='mean', **kwargs):
super(FScoreLoss, self).__init__(weight=weight, reduction=reduction)
self.beta = beta
self.eps = eps
self.tp_bias = tp_bias
self.make_onehot = make_onehot
self.make_onehot_newaxis = make_onehot_newaxis
self.ignore_index = ignore_index
if isinstance(ignore_index, int):
self.ignore_index = [ignore_index]
def forward(self, input_: 'torch.Tensor', target: 'torch.Tensor'
) ->torch.Tensor:
"""Forward pass through the loss.
Args:
input_: Input with expected shape (B, C, ...) with C the number of classes.
target: Target, either onehot with shape (B, C, ...), or not with shape
either (B, ...) or (B, 1, ...). Make sure to set the make_onehot and
make_onehot_newaxis arguments appropriately.
Returns:
The loss.
"""
assert not target.requires_grad
if self.make_onehot:
target = make_onehot_segmentation(target, range(input_.shape[1]
), newaxis=self.make_onehot_newaxis)
target = target.float()
tp = input_ * target
fn = (1 - input_) * target
fp = input_ * (1 - target)
while tp.dim() > 2:
tp = tp.sum(-1, keepdim=False)
fn = fn.sum(-1, keepdim=False)
fp = fp.sum(-1, keepdim=False)
tp += self.tp_bias
result = 1 - (1 + self.beta * self.beta) * tp / ((1 + self.beta *
self.beta) * tp + self.beta * self.beta * fn + fp + self.eps)
if self.reduction != 'none':
if self.weight is not None:
self.weight = self.weight.float()
self.weight = self.weight
weight = self.weight.expand_as(result)
result = result * weight
if self.ignore_index is not None:
for cls in sorted(self.ignore_index, reverse=True):
if cls == result.shape[1] - 1:
result = result[:, :-1]
elif cls == 0:
result = result[:, 1:]
else:
result = torch.cat([result[:, :cls], result[:, cls +
1:]], 1)
if self.reduction == 'mean':
result = torch.mean(result)
elif self.reduction == 'sum':
result = torch.sum(result)
else:
raise ValueError(
"reduction must be 'none', 'mean' or 'sum', but is {}".
format(self.reduction))
return result
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from typing import Union
from typing import Optional
from typing import Iterable
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mean_mul_rsub_sum_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 16 * r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 16 * r0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 16 * r0), None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 16 * r0), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 16 * r0), None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 16 * r0), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 16 * r0), None, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 16 * r0), None, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr0 + (4 + 16 * r0), None, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr1 + (4 + 16 * r0), None, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr0 + (5 + 16 * r0), None, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr1 + (5 + 16 * r0), None, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr0 + (6 + 16 * r0), None, eviction_policy='evict_last'
)
tmp23 = tl.load(in_ptr1 + (6 + 16 * r0), None, eviction_policy='evict_last'
)
tmp26 = tl.load(in_ptr0 + (7 + 16 * r0), None, eviction_policy='evict_last'
)
tmp27 = tl.load(in_ptr1 + (7 + 16 * r0), None, eviction_policy='evict_last'
)
tmp31 = tl.load(in_ptr0 + (8 + 16 * r0), None, eviction_policy='evict_last'
)
tmp32 = tl.load(in_ptr1 + (8 + 16 * r0), None, eviction_policy='evict_last'
)
tmp34 = tl.load(in_ptr0 + (9 + 16 * r0), None, eviction_policy='evict_last'
)
tmp35 = tl.load(in_ptr1 + (9 + 16 * r0), None, eviction_policy='evict_last'
)
tmp38 = tl.load(in_ptr0 + (10 + 16 * r0), None, eviction_policy=
'evict_last')
tmp39 = tl.load(in_ptr1 + (10 + 16 * r0), None, eviction_policy=
'evict_last')
tmp42 = tl.load(in_ptr0 + (11 + 16 * r0), None, eviction_policy=
'evict_last')
tmp43 = tl.load(in_ptr1 + (11 + 16 * r0), None, eviction_policy=
'evict_last')
tmp47 = tl.load(in_ptr0 + (12 + 16 * r0), None, eviction_policy=
'evict_last')
tmp48 = tl.load(in_ptr1 + (12 + 16 * r0), None, eviction_policy=
'evict_last')
tmp50 = tl.load(in_ptr0 + (13 + 16 * r0), None, eviction_policy=
'evict_last')
tmp51 = tl.load(in_ptr1 + (13 + 16 * r0), None, eviction_policy=
'evict_last')
tmp54 = tl.load(in_ptr0 + (14 + 16 * r0), None, eviction_policy=
'evict_last')
tmp55 = tl.load(in_ptr1 + (14 + 16 * r0), None, eviction_policy=
'evict_last')
tmp58 = tl.load(in_ptr0 + (15 + 16 * r0), None, eviction_policy=
'evict_last')
tmp59 = tl.load(in_ptr1 + (15 + 16 * r0), None, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp17 = tmp15 * tmp16
tmp20 = tmp18 * tmp19
tmp21 = tmp17 + tmp20
tmp24 = tmp22 * tmp23
tmp25 = tmp21 + tmp24
tmp28 = tmp26 * tmp27
tmp29 = tmp25 + tmp28
tmp30 = tmp14 + tmp29
tmp33 = tmp31 * tmp32
tmp36 = tmp34 * tmp35
tmp37 = tmp33 + tmp36
tmp40 = tmp38 * tmp39
tmp41 = tmp37 + tmp40
tmp44 = tmp42 * tmp43
tmp45 = tmp41 + tmp44
tmp46 = tmp30 + tmp45
tmp49 = tmp47 * tmp48
tmp52 = tmp50 * tmp51
tmp53 = tmp49 + tmp52
tmp56 = tmp54 * tmp55
tmp57 = tmp53 + tmp56
tmp60 = tmp58 * tmp59
tmp61 = tmp57 + tmp60
tmp62 = tmp46 + tmp61
tmp63 = 1.0
tmp64 = tmp63 - tmp0
tmp65 = tmp64 * tmp1
tmp66 = tmp63 - tmp3
tmp67 = tmp66 * tmp4
tmp68 = tmp65 + tmp67
tmp69 = tmp63 - tmp7
tmp70 = tmp69 * tmp8
tmp71 = tmp68 + tmp70
tmp72 = tmp63 - tmp11
tmp73 = tmp72 * tmp12
tmp74 = tmp71 + tmp73
tmp75 = tmp63 - tmp15
tmp76 = tmp75 * tmp16
tmp77 = tmp63 - tmp18
tmp78 = tmp77 * tmp19
tmp79 = tmp76 + tmp78
tmp80 = tmp63 - tmp22
tmp81 = tmp80 * tmp23
tmp82 = tmp79 + tmp81
tmp83 = tmp63 - tmp26
tmp84 = tmp83 * tmp27
tmp85 = tmp82 + tmp84
tmp86 = tmp74 + tmp85
tmp87 = tmp63 - tmp31
tmp88 = tmp87 * tmp32
tmp89 = tmp63 - tmp34
tmp90 = tmp89 * tmp35
tmp91 = tmp88 + tmp90
tmp92 = tmp63 - tmp38
tmp93 = tmp92 * tmp39
tmp94 = tmp91 + tmp93
tmp95 = tmp63 - tmp42
tmp96 = tmp95 * tmp43
tmp97 = tmp94 + tmp96
tmp98 = tmp86 + tmp97
tmp99 = tmp63 - tmp47
tmp100 = tmp99 * tmp48
tmp101 = tmp63 - tmp50
tmp102 = tmp101 * tmp51
tmp103 = tmp100 + tmp102
tmp104 = tmp63 - tmp54
tmp105 = tmp104 * tmp55
tmp106 = tmp103 + tmp105
tmp107 = tmp63 - tmp58
tmp108 = tmp107 * tmp59
tmp109 = tmp106 + tmp108
tmp110 = tmp98 + tmp109
tmp111 = tmp63 - tmp1
tmp112 = tmp0 * tmp111
tmp113 = tmp63 - tmp4
tmp114 = tmp3 * tmp113
tmp115 = tmp112 + tmp114
tmp116 = tmp63 - tmp8
tmp117 = tmp7 * tmp116
tmp118 = tmp115 + tmp117
tmp119 = tmp63 - tmp12
tmp120 = tmp11 * tmp119
tmp121 = tmp118 + tmp120
tmp122 = tmp63 - tmp16
tmp123 = tmp15 * tmp122
tmp124 = tmp63 - tmp19
tmp125 = tmp18 * tmp124
tmp126 = tmp123 + tmp125
tmp127 = tmp63 - tmp23
tmp128 = tmp22 * tmp127
tmp129 = tmp126 + tmp128
tmp130 = tmp63 - tmp27
tmp131 = tmp26 * tmp130
tmp132 = tmp129 + tmp131
tmp133 = tmp121 + tmp132
tmp134 = tmp63 - tmp32
tmp135 = tmp31 * tmp134
tmp136 = tmp63 - tmp35
tmp137 = tmp34 * tmp136
tmp138 = tmp135 + tmp137
tmp139 = tmp63 - tmp39
tmp140 = tmp38 * tmp139
tmp141 = tmp138 + tmp140
tmp142 = tmp63 - tmp43
tmp143 = tmp42 * tmp142
tmp144 = tmp141 + tmp143
tmp145 = tmp133 + tmp144
tmp146 = tmp63 - tmp48
tmp147 = tmp47 * tmp146
tmp148 = tmp63 - tmp51
tmp149 = tmp50 * tmp148
tmp150 = tmp147 + tmp149
tmp151 = tmp63 - tmp55
tmp152 = tmp54 * tmp151
tmp153 = tmp150 + tmp152
tmp154 = tmp63 - tmp59
tmp155 = tmp58 * tmp154
tmp156 = tmp153 + tmp155
tmp157 = tmp145 + tmp156
tmp158 = 0.0
tmp159 = tmp62 + tmp158
tmp160 = 2.0
tmp161 = tmp159 * tmp160
tmp162 = tmp110 * tmp63
tmp163 = tmp161 + tmp162
tmp164 = tmp163 + tmp157
tmp165 = 1e-06
tmp166 = tmp164 + tmp165
tmp167 = tmp161 / tmp166
tmp168 = tmp63 - tmp167
tmp169 = tl.broadcast_to(tmp168, [XBLOCK, RBLOCK])
tmp171 = tl.sum(tmp169, 1)[:, None]
tmp172 = 16.0
tmp173 = tmp171 / tmp172
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp173, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3
del buf3
get_raw_stream(0)
triton_per_fused_add_div_mean_mul_rsub_sum_0[grid(1)](buf4, arg1_1,
arg0_1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf4,
class FScoreLossNew(nn.modules.loss._WeightedLoss):
"""Uses the 1 - F-score as a loss.
.. math::
F = rac{ (1 + eta^2) TP }{ (1 + eta^2) TP + eta^2 FN + FP }
Args:
beta: The beta in the above formula.
eps: Epsilon for numerical stability.
tp_bias: This is added to the TP count. Should add a little stability for
very small structures.
make_onehot: Convert the target segmentation to onehot internally. Turn this
off if the target is already onehot.
make_onehot_newaxis: 'newaxis' argument for the onehot conversion.
ignore_index: These classes will not contribute to the loss. Has no effect if
reduction is "none".
weight: Weights for the different classes. Has no effect if reduction is "none".
reduction: "mean", "sum", or "none".
"""
def __init__(self, beta: 'float'=1.0, eps: 'float'=1e-06, tp_bias:
'Union[int, float]'=0, make_onehot: 'bool'=False,
make_onehot_newaxis: 'bool'=False, ignore_index:
'Optional[Union[int, Iterable[int]]]'=None, weight:
'Optional[torch.Tensor]'=None, reduction='mean', **kwargs):
super(FScoreLossNew, self).__init__(weight=weight, reduction=reduction)
self.beta = beta
self.eps = eps
self.tp_bias = tp_bias
self.make_onehot = make_onehot
self.make_onehot_newaxis = make_onehot_newaxis
self.ignore_index = ignore_index
if isinstance(ignore_index, int):
self.ignore_index = [ignore_index]
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
MIC-DKFZ/image-time-series
|
FScoreLoss
| false
| 5,579
|
[
"MIT"
] | 1
|
0480d5cb6936c7d9e839b6741f18c10893d78d8a
|
https://github.com/MIC-DKFZ/image-time-series/tree/0480d5cb6936c7d9e839b6741f18c10893d78d8a
|
Conv_Block_gn
|
import torch
import torch.nn as nn
from torch.autograd.variable import *
class Conv_Block_gn(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, groups, stride=1
):
super(Conv_Block_gn, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=
kernel_size, stride=stride, padding=1)
self.relu = torch.nn.LeakyReLU()
self.bn = nn.GroupNorm(groups, out_channels)
def forward(self, x):
x = self.conv(x)
x = self.relu(x)
x = self.bn(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4,
'groups': 1}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from torch.autograd.variable import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_convolution_native_group_norm_0(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 4
rnumel = 36
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r3 = rindex
x0 = xindex
r2 = rindex // 9
tmp0 = tl.load(in_out_ptr0 + (r3 + 36 * x0), rmask & xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + r2, rmask, eviction_policy='evict_last', other=0.0
)
tmp31 = tl.load(in_ptr1 + r2, rmask, eviction_policy='evict_last',
other=0.0)
tmp33 = tl.load(in_ptr2 + r2, rmask, eviction_policy='evict_last',
other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tl.where(rmask & xmask, tmp8, 0)
tmp11 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp13 = tl.where(rmask & xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tmp15 = tl.full([XBLOCK, 1], 36, tl.int32)
tmp16 = tmp15.to(tl.float32)
tmp17 = tmp14 / tmp16
tmp18 = tmp8 - tmp17
tmp19 = tmp18 * tmp18
tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK])
tmp22 = tl.where(rmask & xmask, tmp20, 0)
tmp23 = tl.sum(tmp22, 1)[:, None]
tmp24 = tmp7 - tmp17
tmp25 = 36.0
tmp26 = tmp23 / tmp25
tmp27 = 1e-05
tmp28 = tmp26 + tmp27
tmp29 = libdevice.rsqrt(tmp28)
tmp30 = tmp24 * tmp29
tmp32 = tmp30 * tmp31
tmp34 = tmp32 + tmp33
tl.store(in_out_ptr0 + (r3 + 36 * x0), tmp2, rmask & xmask)
tl.store(out_ptr2 + (r3 + 36 * x0), tmp34, rmask & xmask)
tl.store(out_ptr3 + x0, tmp29, xmask)
tl.store(out_ptr0 + x0, tmp17, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 3, 3), (36, 9, 3, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf5 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32)
buf6 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
get_raw_stream(0)
triton_per_fused_convolution_native_group_norm_0[grid(4)](buf1,
primals_2, primals_4, primals_5, buf2, buf5, buf6, 4, 36,
XBLOCK=1, num_warps=2, num_stages=1)
del primals_2
del primals_5
return buf5, primals_1, primals_3, primals_4, buf1, reinterpret_tensor(buf2
, (4, 1), (1, 1), 0), reinterpret_tensor(buf6, (4, 1), (1, 1), 0)
class Conv_Block_gnNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, groups, stride=1
):
super(Conv_Block_gnNew, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=
kernel_size, stride=stride, padding=1)
self.relu = torch.nn.LeakyReLU()
self.bn = nn.GroupNorm(groups, out_channels)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_4 = self.bn.weight
primals_5 = self.bn.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
MRLoghmani/Separate_to_Adapt
|
Conv_Block_gn
| false
| 5,580
|
[
"MIT"
] | 1
|
09c734448aa22b3879186f59952d9fd596d4a1f8
|
https://github.com/MRLoghmani/Separate_to_Adapt/tree/09c734448aa22b3879186f59952d9fd596d4a1f8
|
FCNet
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import *
class FCNet(nn.Module):
def __init__(self, input_size, output_size):
super().__init__()
self.l1 = nn.Linear(input_size, 5)
self.relu = nn.ReLU()
self.l2 = nn.Linear(5, output_size)
def forward(self, x):
output = self.l1(x)
output = self.relu(output)
output = self.l2(output)
return output.view(-1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'output_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 5
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (5, 4), (4, 1))
assert_size_stride(primals_2, (5,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 5), (5, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 5), (5, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 5), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 5), (80, 20, 5, 1), 0)
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(320)](buf1,
primals_2, buf3, 320, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 5), (
5, 1), 0), reinterpret_tensor(primals_4, (5, 4), (1, 5), 0),
alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (256,), (1,), 0), reinterpret_tensor(
primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 5), (
5, 1), 0), primals_4, buf3
class FCNetNew(nn.Module):
def __init__(self, input_size, output_size):
super().__init__()
self.l1 = nn.Linear(input_size, 5)
self.relu = nn.ReLU()
self.l2 = nn.Linear(5, output_size)
def forward(self, input_0):
primals_1 = self.l1.weight
primals_2 = self.l1.bias
primals_4 = self.l2.weight
primals_5 = self.l2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Markus92/nni
|
FCNet
| false
| 5,581
|
[
"MIT"
] | 1
|
2641c7343f4b411b002bea4f5648941268194ed7
|
https://github.com/Markus92/nni/tree/2641c7343f4b411b002bea4f5648941268194ed7
|
PFLDLoss
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import *
class PFLDLoss(nn.Module):
"""Weighted loss of L2 distance with the pose angle for PFLD."""
def __init__(self):
super(PFLDLoss, self).__init__()
def forward(self, landmark_gt, euler_angle_gt, angle, landmarks):
"""
Calculate weighted L2 loss for PFLD.
Parameters
----------
landmark_gt : tensor
the ground truth of landmarks
euler_angle_gt : tensor
the ground truth of pose angle
angle : tensor
the predicted pose angle
landmarks : float32
the predicted landmarks
Returns
-------
output: tensor
the weighted L2 loss
output: tensor
the normal L2 loss
"""
weight_angle = torch.sum(1 - torch.cos(angle - euler_angle_gt), axis=1)
l2_distant = torch.sum((landmark_gt - landmarks) ** 2, axis=1)
return torch.mean(weight_angle * l2_distant), torch.mean(l2_distant)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_cos_mean_mul_pow_rsub_sub_sum_0(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK:
tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp4 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp5 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp9 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp10 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp14 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp15 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp19 = tl.load(in_ptr2 + (r0 + 64 * r1), None)
tmp20 = tl.load(in_ptr3 + (r0 + 64 * r1), None)
tmp25 = tl.load(in_ptr2 + (16 + r0 + 64 * r1), None)
tmp26 = tl.load(in_ptr3 + (16 + r0 + 64 * r1), None)
tmp31 = tl.load(in_ptr2 + (32 + r0 + 64 * r1), None)
tmp32 = tl.load(in_ptr3 + (32 + r0 + 64 * r1), None)
tmp37 = tl.load(in_ptr2 + (48 + r0 + 64 * r1), None)
tmp38 = tl.load(in_ptr3 + (48 + r0 + 64 * r1), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp21 = tmp19 - tmp20
tmp22 = tl_math.cos(tmp21)
tmp23 = 1.0
tmp24 = tmp23 - tmp22
tmp27 = tmp25 - tmp26
tmp28 = tl_math.cos(tmp27)
tmp29 = tmp23 - tmp28
tmp30 = tmp24 + tmp29
tmp33 = tmp31 - tmp32
tmp34 = tl_math.cos(tmp33)
tmp35 = tmp23 - tmp34
tmp36 = tmp30 + tmp35
tmp39 = tmp37 - tmp38
tmp40 = tl_math.cos(tmp39)
tmp41 = tmp23 - tmp40
tmp42 = tmp36 + tmp41
tmp43 = tmp42 * tmp18
tmp44 = tl.broadcast_to(tmp43, [XBLOCK, RBLOCK])
tmp46 = tl.sum(tmp44, 1)[:, None]
tmp47 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK])
tmp49 = tl.sum(tmp47, 1)[:, None]
tmp50 = 64.0
tmp51 = tmp46 / tmp50
tmp52 = tmp49 / tmp50
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp51, None)
tl.debug_barrier()
tl.store(in_out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp52, None)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf2
del buf2
buf5 = buf3
del buf3
get_raw_stream(0)
triton_per_fused_cos_mean_mul_pow_rsub_sub_sum_0[grid(1)](buf4,
buf5, arg2_1, arg3_1, arg0_1, arg1_1, 1, 64, XBLOCK=1,
num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
return buf4, buf5
class PFLDLossNew(nn.Module):
"""Weighted loss of L2 distance with the pose angle for PFLD."""
def __init__(self):
super(PFLDLossNew, self).__init__()
def forward(self, input_0, input_1, input_2, input_3):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0], output[1]
|
Markus92/nni
|
PFLDLoss
| false
| 5,582
|
[
"MIT"
] | 1
|
2641c7343f4b411b002bea4f5648941268194ed7
|
https://github.com/Markus92/nni/tree/2641c7343f4b411b002bea4f5648941268194ed7
|
VarifocalLoss
|
import torch
import torch.nn.functional as F
import torch.nn as nn
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
loss = loss * weight
if avg_factor is None:
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
loss = loss.sum() / avg_factor
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def varifocal_loss(pred, target, weight=None, alpha=0.75, gamma=2.0,
iou_weighted=True, reduction='mean', avg_factor=None):
"""`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the
number of classes
target (torch.Tensor): The learning target of the iou-aware
classification score with shape (N, C), C is the number of classes.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
alpha (float, optional): A balance factor for the negative part of
Varifocal Loss, which is different from the alpha of Focal Loss.
Defaults to 0.75.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
iou_weighted (bool, optional): Whether to weight the loss of the
positive example with the iou target. Defaults to True.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'. Options are "none", "mean" and
"sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
"""
assert pred.size() == target.size()
pred_sigmoid = pred.sigmoid()
target = target.type_as(pred)
if iou_weighted:
focal_weight = target * (target > 0.0).float() + alpha * (pred_sigmoid
- target).abs().pow(gamma) * (target <= 0.0).float()
else:
focal_weight = (target > 0.0).float() + alpha * (pred_sigmoid - target
).abs().pow(gamma) * (target <= 0.0).float()
loss = F.binary_cross_entropy_with_logits(pred, target, reduction='none'
) * focal_weight
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
class VarifocalLoss(nn.Module):
def __init__(self, use_sigmoid=True, alpha=0.75, gamma=2.0,
iou_weighted=True, reduction='mean', loss_weight=1.0):
"""`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_
Args:
use_sigmoid (bool, optional): Whether the prediction is
used for sigmoid or softmax. Defaults to True.
alpha (float, optional): A balance factor for the negative part of
Varifocal Loss, which is different from the alpha of Focal
Loss. Defaults to 0.75.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
iou_weighted (bool, optional): Whether to weight the loss of the
positive examples with the iou target. Defaults to True.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'. Options are "none", "mean" and
"sum".
loss_weight (float, optional): Weight of loss. Defaults to 1.0.
"""
super(VarifocalLoss, self).__init__()
assert use_sigmoid is True, 'Only sigmoid varifocal loss supported now.'
assert alpha >= 0.0
self.use_sigmoid = use_sigmoid
self.alpha = alpha
self.gamma = gamma
self.iou_weighted = iou_weighted
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, pred, target, weight=None, avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Options are "none", "mean" and "sum".
Returns:
torch.Tensor: The calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (reduction_override if reduction_override else self.
reduction)
if self.use_sigmoid:
loss_cls = self.loss_weight * varifocal_loss(pred, target,
weight, alpha=self.alpha, gamma=self.gamma, iou_weighted=
self.iou_weighted, reduction=reduction, avg_factor=avg_factor)
else:
raise NotImplementedError
return loss_cls
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn.functional as F
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused__to_copy_abs_add_binary_cross_entropy_with_logits_gt_le_mean_mul_pow_sigmoid_sub_0(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp13 = tmp0 > tmp5
tmp14 = tmp13.to(tl.float32)
tmp15 = tmp0 * tmp14
tmp16 = tl.sigmoid(tmp3)
tmp17 = tmp16 - tmp0
tmp18 = tl_math.abs(tmp17)
tmp19 = tmp18 * tmp18
tmp20 = 0.75
tmp21 = tmp19 * tmp20
tmp22 = tmp0 <= tmp5
tmp23 = tmp22.to(tl.float32)
tmp24 = tmp21 * tmp23
tmp25 = tmp15 + tmp24
tmp26 = tmp12 * tmp25
tmp27 = tl.broadcast_to(tmp26, [RBLOCK])
tmp29 = triton_helpers.promote_to_tensor(tl.sum(tmp27, 0))
tmp30 = 256.0
tmp31 = tmp29 / tmp30
tmp32 = tmp31 * tmp1
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp32, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused__to_copy_abs_add_binary_cross_entropy_with_logits_gt_le_mean_mul_pow_sigmoid_sub_0[
grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
loss = loss * weight
if avg_factor is None:
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
loss = loss.sum() / avg_factor
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def varifocal_loss(pred, target, weight=None, alpha=0.75, gamma=2.0,
iou_weighted=True, reduction='mean', avg_factor=None):
"""`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the
number of classes
target (torch.Tensor): The learning target of the iou-aware
classification score with shape (N, C), C is the number of classes.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
alpha (float, optional): A balance factor for the negative part of
Varifocal Loss, which is different from the alpha of Focal Loss.
Defaults to 0.75.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
iou_weighted (bool, optional): Whether to weight the loss of the
positive example with the iou target. Defaults to True.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'. Options are "none", "mean" and
"sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
"""
assert pred.size() == target.size()
pred_sigmoid = pred.sigmoid()
target = target.type_as(pred)
if iou_weighted:
focal_weight = target * (target > 0.0).float() + alpha * (pred_sigmoid
- target).abs().pow(gamma) * (target <= 0.0).float()
else:
focal_weight = (target > 0.0).float() + alpha * (pred_sigmoid - target
).abs().pow(gamma) * (target <= 0.0).float()
loss = F.binary_cross_entropy_with_logits(pred, target, reduction='none'
) * focal_weight
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
class VarifocalLossNew(nn.Module):
def __init__(self, use_sigmoid=True, alpha=0.75, gamma=2.0,
iou_weighted=True, reduction='mean', loss_weight=1.0):
"""`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_
Args:
use_sigmoid (bool, optional): Whether the prediction is
used for sigmoid or softmax. Defaults to True.
alpha (float, optional): A balance factor for the negative part of
Varifocal Loss, which is different from the alpha of Focal
Loss. Defaults to 0.75.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
iou_weighted (bool, optional): Whether to weight the loss of the
positive examples with the iou target. Defaults to True.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'. Options are "none", "mean" and
"sum".
loss_weight (float, optional): Weight of loss. Defaults to 1.0.
"""
super(VarifocalLossNew, self).__init__()
assert use_sigmoid is True, 'Only sigmoid varifocal loss supported now.'
assert alpha >= 0.0
self.use_sigmoid = use_sigmoid
self.alpha = alpha
self.gamma = gamma
self.iou_weighted = iou_weighted
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
MatthewInkawhich/object_localization_network
|
VarifocalLoss
| false
| 5,583
|
[
"Apache-2.0"
] | 1
|
3fddaacfcef33f03af48b746e95ebd7d74dbb27f
|
https://github.com/MatthewInkawhich/object_localization_network/tree/3fddaacfcef33f03af48b746e95ebd7d74dbb27f
|
EncoderLayer
|
import torch
import torch.nn as nn
class FeedForwardNetwork(nn.Module):
def __init__(self, hidden_size, ffn_size, dropout_rate):
super(FeedForwardNetwork, self).__init__()
self.layer1 = nn.Linear(hidden_size, ffn_size)
self.gelu = nn.GELU()
self.layer2 = nn.Linear(ffn_size, hidden_size)
def forward(self, x):
x = self.layer1(x)
x = self.gelu(x)
x = self.layer2(x)
return x
class MultiHeadAttention(nn.Module):
def __init__(self, hidden_size, attention_dropout_rate, num_heads):
super(MultiHeadAttention, self).__init__()
self.num_heads = num_heads
self.att_size = att_size = hidden_size // num_heads
self.scale = att_size ** -0.5
self.linear_q = nn.Linear(hidden_size, num_heads * att_size)
self.linear_k = nn.Linear(hidden_size, num_heads * att_size)
self.linear_v = nn.Linear(hidden_size, num_heads * att_size)
self.att_dropout = nn.Dropout(attention_dropout_rate)
self.output_layer = nn.Linear(num_heads * att_size, hidden_size)
def forward(self, q, k, v, attn_bias=None):
orig_q_size = q.size()
d_k = self.att_size
d_v = self.att_size
batch_size = q.size(0)
q = self.linear_q(q).view(batch_size, -1, self.num_heads, d_k)
k = self.linear_k(k).view(batch_size, -1, self.num_heads, d_k)
v = self.linear_v(v).view(batch_size, -1, self.num_heads, d_v)
q = q.transpose(1, 2)
v = v.transpose(1, 2)
k = k.transpose(1, 2).transpose(2, 3)
q = q * self.scale
x = torch.matmul(q, k)
if attn_bias is not None:
x = x + attn_bias
x = torch.softmax(x, dim=3)
x = self.att_dropout(x)
x = x.matmul(v)
x = x.transpose(1, 2).contiguous()
x = x.view(batch_size, -1, self.num_heads * d_v)
x = self.output_layer(x)
assert x.size() == orig_q_size
return x
class EncoderLayer(nn.Module):
def __init__(self, hidden_size, ffn_size, dropout_rate,
attention_dropout_rate, num_heads):
super(EncoderLayer, self).__init__()
self.self_attention_norm = nn.LayerNorm(hidden_size)
self.self_attention = MultiHeadAttention(hidden_size,
attention_dropout_rate, num_heads)
self.self_attention_dropout = nn.Dropout(dropout_rate)
self.ffn_norm = nn.LayerNorm(hidden_size)
self.ffn = FeedForwardNetwork(hidden_size, ffn_size, dropout_rate)
self.ffn_dropout = nn.Dropout(dropout_rate)
def forward(self, x, attn_bias=None):
y = self.self_attention_norm(x)
y = self.self_attention(y, y, y, attn_bias)
y = self.self_attention_dropout(y)
x = x + y
y = self.ffn_norm(x)
y = self.ffn(y)
y = self.ffn_dropout(y)
x = x + y
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4, 'ffn_size': 4, 'dropout_rate': 0.5,
'attention_dropout_rate': 0.5, 'num_heads': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_mul_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_gelu_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_out_ptr0 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (4, 4), (4, 1))
assert_size_stride(primals_15, (4,), (1,))
assert_size_stride(primals_16, (4, 4), (4, 1))
assert_size_stride(primals_17, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(16)](primals_3, buf0,
buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(64)](primals_3, buf0,
buf1, primals_1, primals_2, buf2, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del primals_1
del primals_2
buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4)
buf5 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_mul_2[grid(16, 4)](buf3, primals_5, buf6, 16,
4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf7 = reinterpret_tensor(buf3, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf3
triton_poi_fused_clone_3[grid(16, 4)](buf4, primals_7, buf7, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_7
buf8 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf7, (16, 1, 4), (4, 0, 1), 0), out=buf8)
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(256)](buf8, buf9, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf10 = reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf8
triton_poi_fused__softmax_5[grid(256)](buf9, buf10, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf9
buf11 = reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf4
triton_poi_fused_clone_3[grid(16, 4)](buf5, primals_9, buf11, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_9
buf12 = reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 1), 0)
del buf5
extern_kernels.bmm(reinterpret_tensor(buf10, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf11, (16, 4, 1), (4, 1, 0), 0), out=buf12)
buf13 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_6[grid(16, 4)](buf12, buf13, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf14 = reinterpret_tensor(buf12, (16, 4), (4, 1), 0)
del buf12
extern_kernels.addmm(primals_11, reinterpret_tensor(buf13, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf14)
del primals_11
buf15 = buf1
del buf1
buf16 = buf0
del buf0
triton_poi_fused_add_native_layer_norm_7[grid(16)](primals_3, buf14,
buf15, buf16, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf17 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_8[grid(64)](primals_3, buf14,
buf15, buf16, primals_12, primals_13, buf17, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf15
del buf16
del primals_13
buf18 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_15, reinterpret_tensor(buf17, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_14, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf18)
del primals_15
buf19 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_gelu_9[grid(64)](buf18, buf19, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf20 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf19, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_16, (4, 4), (1, 4), 0), out=buf20)
buf21 = reinterpret_tensor(buf20, (4, 4, 4), (16, 4, 1), 0)
del buf20
triton_poi_fused_add_10[grid(64)](buf21, primals_3, buf14,
primals_17, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_17
return buf21, primals_3, primals_12, reinterpret_tensor(buf2, (16, 4),
(4, 1), 0), buf10, reinterpret_tensor(buf13, (16, 4), (4, 1), 0
), buf14, reinterpret_tensor(buf17, (16, 4), (4, 1), 0
), buf18, reinterpret_tensor(buf19, (16, 4), (4, 1), 0
), primals_16, primals_14, primals_10, reinterpret_tensor(buf11, (
16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf6, (16, 1, 4), (4,
1, 1), 0), reinterpret_tensor(buf7, (16, 4, 1), (4, 1, 4), 0
), primals_8, primals_6, primals_4
class FeedForwardNetwork(nn.Module):
def __init__(self, hidden_size, ffn_size, dropout_rate):
super(FeedForwardNetwork, self).__init__()
self.layer1 = nn.Linear(hidden_size, ffn_size)
self.gelu = nn.GELU()
self.layer2 = nn.Linear(ffn_size, hidden_size)
def forward(self, x):
x = self.layer1(x)
x = self.gelu(x)
x = self.layer2(x)
return x
class MultiHeadAttention(nn.Module):
def __init__(self, hidden_size, attention_dropout_rate, num_heads):
super(MultiHeadAttention, self).__init__()
self.num_heads = num_heads
self.att_size = att_size = hidden_size // num_heads
self.scale = att_size ** -0.5
self.linear_q = nn.Linear(hidden_size, num_heads * att_size)
self.linear_k = nn.Linear(hidden_size, num_heads * att_size)
self.linear_v = nn.Linear(hidden_size, num_heads * att_size)
self.att_dropout = nn.Dropout(attention_dropout_rate)
self.output_layer = nn.Linear(num_heads * att_size, hidden_size)
def forward(self, q, k, v, attn_bias=None):
orig_q_size = q.size()
d_k = self.att_size
d_v = self.att_size
batch_size = q.size(0)
q = self.linear_q(q).view(batch_size, -1, self.num_heads, d_k)
k = self.linear_k(k).view(batch_size, -1, self.num_heads, d_k)
v = self.linear_v(v).view(batch_size, -1, self.num_heads, d_v)
q = q.transpose(1, 2)
v = v.transpose(1, 2)
k = k.transpose(1, 2).transpose(2, 3)
q = q * self.scale
x = torch.matmul(q, k)
if attn_bias is not None:
x = x + attn_bias
x = torch.softmax(x, dim=3)
x = self.att_dropout(x)
x = x.matmul(v)
x = x.transpose(1, 2).contiguous()
x = x.view(batch_size, -1, self.num_heads * d_v)
x = self.output_layer(x)
assert x.size() == orig_q_size
return x
class EncoderLayerNew(nn.Module):
def __init__(self, hidden_size, ffn_size, dropout_rate,
attention_dropout_rate, num_heads):
super(EncoderLayerNew, self).__init__()
self.self_attention_norm = nn.LayerNorm(hidden_size)
self.self_attention = MultiHeadAttention(hidden_size,
attention_dropout_rate, num_heads)
self.self_attention_dropout = nn.Dropout(dropout_rate)
self.ffn_norm = nn.LayerNorm(hidden_size)
self.ffn = FeedForwardNetwork(hidden_size, ffn_size, dropout_rate)
self.ffn_dropout = nn.Dropout(dropout_rate)
def forward(self, input_0):
primals_1 = self.self_attention_norm.weight
primals_2 = self.self_attention_norm.bias
primals_4 = self.self_attention.linear_q.weight
primals_5 = self.self_attention.linear_q.bias
primals_6 = self.self_attention.linear_k.weight
primals_7 = self.self_attention.linear_k.bias
primals_8 = self.self_attention.linear_v.weight
primals_9 = self.self_attention.linear_v.bias
primals_10 = self.self_attention.output_layer.weight
primals_11 = self.self_attention.output_layer.bias
primals_12 = self.ffn_norm.weight
primals_13 = self.ffn_norm.bias
primals_14 = self.ffn.layer1.weight
primals_15 = self.ffn.layer1.bias
primals_16 = self.ffn.layer2.weight
primals_17 = self.ffn.layer2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17])
return output[0]
|
Luo-Chang/Graphormer
|
EncoderLayer
| false
| 5,584
|
[
"MIT"
] | 1
|
b35b3ca6369e25cdae80e1617bfc3921feeb3158
|
https://github.com/Luo-Chang/Graphormer/tree/b35b3ca6369e25cdae80e1617bfc3921feeb3158
|
Net
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(4, 32, 3, padding=1)
self.conv2 = nn.Conv2d(32, 64, 3, padding=1)
self.conv3 = nn.Conv2d(64, 128, 3, padding=1)
self.p_fc1 = nn.Conv2d(128, 4, 1)
self.p_fc2 = nn.Linear(4 * 8 * 8, 64)
self.v_fc1 = nn.Conv2d(128, 2, 1)
self.v_fc2 = nn.Linear(2 * 8 * 8, 1)
def forward(self, x):
a1 = F.relu(self.conv1(x))
a2 = F.relu(self.conv2(a1))
a3 = F.relu(self.conv3(a2))
p1 = F.relu(self.p_fc1(a3))
p_act = p1.view(-1, 4 * 8 * 8)
p_out = F.softmax(self.p_fc2(p_act), dim=0)
v1 = F.relu(self.v_fc1(a3))
v_act = v1.view(-1, 2 * 8 * 8)
v_out = torch.tanh(self.v_fc2(v_act))
return p_out, v_out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 128
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 4 * x2 + 36 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask)
tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 32 * x2 + 288 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_7(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.
constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2 + 16 * y3), tmp4, xmask & ymask)
tl.store(out_ptr1 + (y0 + 4 * x2 + 64 * y1), tmp6, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_8(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tmp0 - tmp0
tmp2 = tl_math.exp(tmp1)
tmp3 = tmp2 / tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_9(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.
constexpr):
ynumel = 8
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 2
y1 = yindex // 2
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 2 * x2 + 32 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2 + 16 * y3), tmp4, xmask & ymask)
tl.store(out_ptr1 + (y0 + 2 * x2 + 32 * y1), tmp6, xmask & ymask)
@triton.jit
def triton_poi_fused_tanh_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
tmp0 = tl.load(in_out_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr0 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = libdevice.tanh(tmp4)
tl.store(in_out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp5, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15) = args
args.clear()
assert_size_stride(primals_1, (32, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (4, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (64, 256), (256, 1))
assert_size_stride(primals_11, (64,), (1,))
assert_size_stride(primals_12, (2, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_13, (2,), (1,))
assert_size_stride(primals_14, (1, 128), (128, 1))
assert_size_stride(primals_15, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((32, 4, 3, 3), (36, 1, 12, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(128, 9)](primals_1, buf0, 128, 9, XBLOCK=16,
YBLOCK=64, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
triton_poi_fused_1[grid(16, 16)](primals_3, buf1, 16, 16, XBLOCK=16,
YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 32, 3, 3), (288, 1, 96, 32), torch.
float32)
triton_poi_fused_2[grid(2048, 9)](primals_4, buf2, 2048, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch
.float32)
triton_poi_fused_3[grid(8192, 9)](primals_6, buf3, 8192, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf4 = extern_kernels.convolution(buf1, buf0, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 32, 4, 4), (512, 1, 128, 32))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_4[grid(2048)](buf5, primals_2,
2048, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf6 = extern_kernels.convolution(buf5, buf2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 64, 4, 4), (1024, 1, 256, 64))
buf7 = buf6
del buf6
triton_poi_fused_convolution_relu_5[grid(4096)](buf7, primals_5,
4096, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf8 = extern_kernels.convolution(buf7, buf3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 128, 4, 4), (2048, 1, 512, 128))
buf9 = buf8
del buf8
triton_poi_fused_convolution_relu_6[grid(8192)](buf9, primals_7,
8192, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf10 = extern_kernels.convolution(buf9, primals_8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 4, 4, 4), (64, 1, 16, 4))
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf19 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_7[grid(16, 16)](
buf10, primals_9, buf11, buf19, 16, 16, XBLOCK=16, YBLOCK=16,
num_warps=4, num_stages=1)
del buf10
del primals_9
buf12 = empty_strided_cuda((1, 64), (64, 1), torch.float32)
extern_kernels.addmm(primals_11, reinterpret_tensor(buf11, (1, 256),
(0, 1), 0), reinterpret_tensor(primals_10, (256, 64), (1, 256),
0), alpha=1, beta=1, out=buf12)
del primals_11
buf13 = buf12
del buf12
triton_poi_fused__softmax_8[grid(64)](buf13, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf14 = extern_kernels.convolution(buf9, primals_12, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 2, 4, 4), (32, 1, 8, 2))
buf15 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
buf18 = empty_strided_cuda((4, 2, 4, 4), (32, 1, 8, 2), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_9[grid(8, 16)](
buf14, primals_13, buf15, buf18, 8, 16, XBLOCK=16, YBLOCK=8,
num_warps=4, num_stages=1)
del buf14
del primals_13
buf16 = empty_strided_cuda((1, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf15, (1, 128), (0, 1), 0),
reinterpret_tensor(primals_14, (128, 1), (1, 128), 0), out=buf16)
buf17 = buf16
del buf16
triton_poi_fused_tanh_10[grid(1)](buf17, primals_15, 1, XBLOCK=1,
num_warps=1, num_stages=1)
del primals_15
return (buf13, buf17, buf0, buf1, buf2, buf3, primals_8, primals_12,
buf5, buf7, buf9, reinterpret_tensor(buf11, (1, 256), (256, 1), 0),
buf13, reinterpret_tensor(buf15, (1, 128), (128, 1), 0), buf17,
primals_14, buf18, primals_10, buf19)
class NetNew(nn.Module):
def __init__(self):
super(NetNew, self).__init__()
self.conv1 = nn.Conv2d(4, 32, 3, padding=1)
self.conv2 = nn.Conv2d(32, 64, 3, padding=1)
self.conv3 = nn.Conv2d(64, 128, 3, padding=1)
self.p_fc1 = nn.Conv2d(128, 4, 1)
self.p_fc2 = nn.Linear(4 * 8 * 8, 64)
self.v_fc1 = nn.Conv2d(128, 2, 1)
self.v_fc2 = nn.Linear(2 * 8 * 8, 1)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.p_fc1.weight
primals_9 = self.p_fc1.bias
primals_10 = self.p_fc2.weight
primals_11 = self.p_fc2.bias
primals_12 = self.v_fc1.weight
primals_13 = self.v_fc1.bias
primals_14 = self.v_fc2.weight
primals_15 = self.v_fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15])
return output[0], output[1]
|
LouisCaixuran/gomoku
|
Net
| false
| 5,585
|
[
"Apache-2.0"
] | 1
|
c1b6d508522d9e8c78be827f326bbee54c4dfd8b
|
https://github.com/LouisCaixuran/gomoku/tree/c1b6d508522d9e8c78be827f326bbee54c4dfd8b
|
Binarizer
|
from torch.autograd import Function
import torch
import torch.nn as nn
import torch.nn.functional as F
class SignFunction(Function):
def __init__(self):
super(SignFunction, self).__init__()
@staticmethod
def forward(ctx, input, is_training=True):
if is_training:
prob = input.new(input.size()).uniform_()
x = input.clone()
x[(1 - input) / 2 <= prob] = 1
x[(1 - input) / 2 > prob] = -1
return x
else:
return input.sign()
@staticmethod
def backward(ctx, grad_output):
return grad_output, None
class Sign(nn.Module):
def __init__(self):
super(Sign, self).__init__()
def forward(self, x):
return SignFunction.apply(x, self.training)
class Binarizer(nn.Module):
def __init__(self, in_channels, out_channels):
super(Binarizer, self).__init__()
self.sign = Sign()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1,
bias=False)
def forward(self, x):
x = self.conv1(x)
x = F.tanh(x)
return self.sign(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch.autograd import Function
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_sign_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tmp2 = tl.full([1], 0, tl.int32)
tmp3 = tmp2 < tmp1
tmp4 = tmp3.to(tl.int8)
tmp5 = tmp1 < tmp2
tmp6 = tmp5.to(tl.int8)
tmp7 = tmp4 - tmp6
tmp8 = tmp7.to(tmp1.dtype)
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_sign_tanh_0[grid(256)](buf0, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
return buf1, primals_1, primals_2, buf0
class SignFunction(Function):
def __init__(self):
super(SignFunction, self).__init__()
@staticmethod
def forward(ctx, input, is_training=True):
if is_training:
prob = input.new(input.size()).uniform_()
x = input.clone()
x[(1 - input) / 2 <= prob] = 1
x[(1 - input) / 2 > prob] = -1
return x
else:
return input.sign()
@staticmethod
def backward(ctx, grad_output):
return grad_output, None
class Sign(nn.Module):
def __init__(self):
super(Sign, self).__init__()
def forward(self, x):
return SignFunction.apply(x, self.training)
class BinarizerNew(nn.Module):
def __init__(self, in_channels, out_channels):
super(BinarizerNew, self).__init__()
self.sign = Sign()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1,
bias=False)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
MeMihir/SuperResCompression
|
Binarizer
| false
| 5,586
|
[
"MIT"
] | 1
|
c76bcf6b12d56ce3ad81ebb1b204fc0425f0e633
|
https://github.com/MeMihir/SuperResCompression/tree/c76bcf6b12d56ce3ad81ebb1b204fc0425f0e633
|
Sign
|
from torch.autograd import Function
import torch
import torch.nn as nn
class SignFunction(Function):
def __init__(self):
super(SignFunction, self).__init__()
@staticmethod
def forward(ctx, input, is_training=True):
if is_training:
prob = input.new(input.size()).uniform_()
x = input.clone()
x[(1 - input) / 2 <= prob] = 1
x[(1 - input) / 2 > prob] = -1
return x
else:
return input.sign()
@staticmethod
def backward(ctx, grad_output):
return grad_output, None
class Sign(nn.Module):
def __init__(self):
super(Sign, self).__init__()
def forward(self, x):
return SignFunction.apply(x, self.training)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.autograd import Function
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_sign_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp1 < tmp0
tmp3 = tmp2.to(tl.int8)
tmp4 = tmp0 < tmp1
tmp5 = tmp4.to(tl.int8)
tmp6 = tmp3 - tmp5
tmp7 = tmp6.to(tmp0.dtype)
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_sign_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class SignFunction(Function):
def __init__(self):
super(SignFunction, self).__init__()
@staticmethod
def forward(ctx, input, is_training=True):
if is_training:
prob = input.new(input.size()).uniform_()
x = input.clone()
x[(1 - input) / 2 <= prob] = 1
x[(1 - input) / 2 > prob] = -1
return x
else:
return input.sign()
@staticmethod
def backward(ctx, grad_output):
return grad_output, None
class SignNew(nn.Module):
def __init__(self):
super(SignNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
MeMihir/SuperResCompression
|
Sign
| false
| 5,587
|
[
"MIT"
] | 1
|
c76bcf6b12d56ce3ad81ebb1b204fc0425f0e633
|
https://github.com/MeMihir/SuperResCompression/tree/c76bcf6b12d56ce3ad81ebb1b204fc0425f0e633
|
GLU
|
import torch
import torch.nn as nn
class GLU(nn.Module):
def __init__(self, input_channel, output_channel):
super(GLU, self).__init__()
self.linear_left = nn.Linear(input_channel, output_channel)
self.linear_right = nn.Linear(input_channel, output_channel)
def forward(self, x):
return torch.mul(self.linear_left(x), torch.sigmoid(self.
linear_right(x)))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_channel': 4, 'output_channel': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0[grid(256)](buf0, buf1, buf2, 256,
XBLOCK=128, num_warps=4, num_stages=1)
return buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, buf1
class GLUNew(nn.Module):
def __init__(self, input_channel, output_channel):
super(GLUNew, self).__init__()
self.linear_left = nn.Linear(input_channel, output_channel)
self.linear_right = nn.Linear(input_channel, output_channel)
def forward(self, input_0):
primals_1 = self.linear_left.weight
primals_2 = self.linear_left.bias
primals_4 = self.linear_right.weight
primals_5 = self.linear_right.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
MichaelHopwood/GLRM
|
GLU
| false
| 5,588
|
[
"MIT"
] | 1
|
80930762e6964afb8ef0db9e5ae3a10cfcc975b2
|
https://github.com/MichaelHopwood/GLRM/tree/80930762e6964afb8ef0db9e5ae3a10cfcc975b2
|
AverageAttention
|
import torch
import torch.nn as nn
import torch.cuda
import torch.distributed
class ActivationFunction(object):
relu = 'relu'
gelu = 'gelu'
class PositionwiseFeedForward(nn.Module):
""" A two-layer Feed-Forward-Network with residual layer norm.
Args:
d_model (int): the size of input for the first-layer of the FFN.
d_ff (int): the hidden layer size of the second-layer
of the FNN.
dropout (float): dropout probability in :math:`[0, 1)`.
activation_fn (ActivationFunction): activation function used.
"""
def __init__(self, d_model, d_ff, dropout=0.1, activation_fn=
ActivationFunction.relu):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-06)
self.dropout_1 = nn.Dropout(dropout)
self.activation = ACTIVATION_FUNCTIONS[activation_fn]
self.dropout_2 = nn.Dropout(dropout)
def forward(self, x):
"""Layer definition.
Args:
x: ``(batch_size, input_len, model_dim)``
Returns:
(FloatTensor): Output ``(batch_size, input_len, model_dim)``.
"""
inter = self.dropout_1(self.activation(self.w_1(self.layer_norm(x))))
output = self.dropout_2(self.w_2(inter))
return output + x
def update_dropout(self, dropout):
self.dropout_1.p = dropout
self.dropout_2.p = dropout
class AverageAttention(nn.Module):
"""
Average Attention module from
"Accelerating Neural Transformer via an Average Attention Network"
:cite:`DBLP:journals/corr/abs-1805-00631`.
Args:
model_dim (int): the dimension of keys/values/queries,
must be divisible by head_count
dropout (float): dropout parameter
pos_ffn_activation_fn (ActivationFunction):
activation function choice for PositionwiseFeedForward layer
"""
def __init__(self, model_dim, dropout=0.1, aan_useffn=False,
pos_ffn_activation_fn=ActivationFunction.relu):
self.model_dim = model_dim
self.aan_useffn = aan_useffn
super(AverageAttention, self).__init__()
if aan_useffn:
self.average_layer = PositionwiseFeedForward(model_dim,
model_dim, dropout, pos_ffn_activation_fn)
self.gating_layer = nn.Linear(model_dim * 2, model_dim * 2)
def cumulative_average_mask(self, batch_size, inputs_len, device):
"""
Builds the mask to compute the cumulative average as described in
:cite:`DBLP:journals/corr/abs-1805-00631` -- Figure 3
Args:
batch_size (int): batch size
inputs_len (int): length of the inputs
Returns:
(FloatTensor):
* A Tensor of shape ``(batch_size, input_len, input_len)``
"""
triangle = torch.tril(torch.ones(inputs_len, inputs_len, dtype=
torch.float, device=device))
weights = torch.ones(1, inputs_len, dtype=torch.float, device=device
) / torch.arange(1, inputs_len + 1, dtype=torch.float, device=
device)
mask = triangle * weights.transpose(0, 1)
return mask.unsqueeze(0).expand(batch_size, inputs_len, inputs_len)
def cumulative_average(self, inputs, mask_or_step, layer_cache=None,
step=None):
"""
Computes the cumulative average as described in
:cite:`DBLP:journals/corr/abs-1805-00631` -- Equations (1) (5) (6)
Args:
inputs (FloatTensor): sequence to average
``(batch_size, input_len, dimension)``
mask_or_step: if cache is set, this is assumed
to be the current step of the
dynamic decoding. Otherwise, it is the mask matrix
used to compute the cumulative average.
layer_cache: a dictionary containing the cumulative average
of the previous step.
Returns:
a tensor of the same shape and type as ``inputs``.
"""
if layer_cache is not None:
step = mask_or_step
average_attention = (inputs + step * layer_cache['prev_g']) / (step
+ 1)
layer_cache['prev_g'] = average_attention
return average_attention
else:
mask = mask_or_step
return torch.matmul(mask, inputs)
def forward(self, inputs, mask=None, layer_cache=None, step=None):
"""
Args:
inputs (FloatTensor): ``(batch_size, input_len, model_dim)``
Returns:
(FloatTensor, FloatTensor):
* gating_outputs ``(batch_size, input_len, model_dim)``
* average_outputs average attention
``(batch_size, input_len, model_dim)``
"""
batch_size = inputs.size(0)
inputs_len = inputs.size(1)
average_outputs = self.cumulative_average(inputs, self.
cumulative_average_mask(batch_size, inputs_len, inputs.device) if
layer_cache is None else step, layer_cache=layer_cache)
if self.aan_useffn:
average_outputs = self.average_layer(average_outputs)
gating_outputs = self.gating_layer(torch.cat((inputs,
average_outputs), -1))
input_gate, forget_gate = torch.chunk(gating_outputs, 2, dim=2)
gating_outputs = torch.sigmoid(input_gate) * inputs + torch.sigmoid(
forget_gate) * average_outputs
return gating_outputs, average_outputs
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'model_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.cuda
import torch.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_ones_tril_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = x0 + -1 * x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 <= tmp1
tmp3 = 1.0
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = 1 + x1
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp3 / tmp7
tmp9 = tmp5 * tmp8
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_sigmoid_backward_2(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 8 * x1), xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + x2, xmask)
tmp6 = tl.load(in_ptr0 + (4 + x0 + 8 * x1), xmask)
tmp7 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tmp5 = tmp3 * tmp4
tmp8 = tmp6 + tmp7
tmp9 = tl.sigmoid(tmp8)
tmp11 = tmp9 * tmp10
tmp12 = tmp5 + tmp11
tmp13 = 1.0
tmp14 = tmp13 - tmp9
tmp15 = tmp9 * tmp14
tmp16 = tmp13 - tmp3
tmp17 = tmp3 * tmp16
tl.store(out_ptr0 + x2, tmp12, xmask)
tl.store(out_ptr1 + x2, tmp15, xmask)
tl.store(out_ptr2 + x2, tmp17, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (8, 8), (8, 1))
assert_size_stride(primals_3, (8,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_ones_tril_0[grid(16)](buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (0, 4, 1), 0
), primals_1, out=buf1)
del buf0
buf2 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
triton_poi_fused_cat_1[grid(128)](primals_1, buf1, buf2, 128,
XBLOCK=128, num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((16, 8), (8, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 8), (8, 1), 0),
reinterpret_tensor(primals_2, (8, 8), (1, 8), 0), out=buf3)
del primals_2
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_mul_sigmoid_sigmoid_backward_2[grid(64)](buf3,
primals_3, primals_1, buf1, buf4, buf5, buf6, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf3
del primals_3
return buf4, buf1, primals_1, buf1, reinterpret_tensor(buf2, (16, 8), (
8, 1), 0), buf5, buf6
class ActivationFunction(object):
relu = 'relu'
gelu = 'gelu'
class PositionwiseFeedForward(nn.Module):
""" A two-layer Feed-Forward-Network with residual layer norm.
Args:
d_model (int): the size of input for the first-layer of the FFN.
d_ff (int): the hidden layer size of the second-layer
of the FNN.
dropout (float): dropout probability in :math:`[0, 1)`.
activation_fn (ActivationFunction): activation function used.
"""
def __init__(self, d_model, d_ff, dropout=0.1, activation_fn=
ActivationFunction.relu):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-06)
self.dropout_1 = nn.Dropout(dropout)
self.activation = ACTIVATION_FUNCTIONS[activation_fn]
self.dropout_2 = nn.Dropout(dropout)
def forward(self, x):
"""Layer definition.
Args:
x: ``(batch_size, input_len, model_dim)``
Returns:
(FloatTensor): Output ``(batch_size, input_len, model_dim)``.
"""
inter = self.dropout_1(self.activation(self.w_1(self.layer_norm(x))))
output = self.dropout_2(self.w_2(inter))
return output + x
def update_dropout(self, dropout):
self.dropout_1.p = dropout
self.dropout_2.p = dropout
class AverageAttentionNew(nn.Module):
"""
Average Attention module from
"Accelerating Neural Transformer via an Average Attention Network"
:cite:`DBLP:journals/corr/abs-1805-00631`.
Args:
model_dim (int): the dimension of keys/values/queries,
must be divisible by head_count
dropout (float): dropout parameter
pos_ffn_activation_fn (ActivationFunction):
activation function choice for PositionwiseFeedForward layer
"""
def __init__(self, model_dim, dropout=0.1, aan_useffn=False,
pos_ffn_activation_fn=ActivationFunction.relu):
self.model_dim = model_dim
self.aan_useffn = aan_useffn
super(AverageAttentionNew, self).__init__()
if aan_useffn:
self.average_layer = PositionwiseFeedForward(model_dim,
model_dim, dropout, pos_ffn_activation_fn)
self.gating_layer = nn.Linear(model_dim * 2, model_dim * 2)
def cumulative_average_mask(self, batch_size, inputs_len, device):
"""
Builds the mask to compute the cumulative average as described in
:cite:`DBLP:journals/corr/abs-1805-00631` -- Figure 3
Args:
batch_size (int): batch size
inputs_len (int): length of the inputs
Returns:
(FloatTensor):
* A Tensor of shape ``(batch_size, input_len, input_len)``
"""
triangle = torch.tril(torch.ones(inputs_len, inputs_len, dtype=
torch.float, device=device))
weights = torch.ones(1, inputs_len, dtype=torch.float, device=device
) / torch.arange(1, inputs_len + 1, dtype=torch.float, device=
device)
mask = triangle * weights.transpose(0, 1)
return mask.unsqueeze(0).expand(batch_size, inputs_len, inputs_len)
def cumulative_average(self, inputs, mask_or_step, layer_cache=None,
step=None):
"""
Computes the cumulative average as described in
:cite:`DBLP:journals/corr/abs-1805-00631` -- Equations (1) (5) (6)
Args:
inputs (FloatTensor): sequence to average
``(batch_size, input_len, dimension)``
mask_or_step: if cache is set, this is assumed
to be the current step of the
dynamic decoding. Otherwise, it is the mask matrix
used to compute the cumulative average.
layer_cache: a dictionary containing the cumulative average
of the previous step.
Returns:
a tensor of the same shape and type as ``inputs``.
"""
if layer_cache is not None:
step = mask_or_step
average_attention = (inputs + step * layer_cache['prev_g']) / (step
+ 1)
layer_cache['prev_g'] = average_attention
return average_attention
else:
mask = mask_or_step
return torch.matmul(mask, inputs)
def forward(self, input_0):
primals_2 = self.gating_layer.weight
primals_3 = self.gating_layer.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0], output[1]
|
MaxatTezekbayev/OpenNMT-py-lexical
|
AverageAttention
| false
| 5,589
|
[
"MIT"
] | 1
|
44182999b863fc4074d67e0281c5bdab19abddfe
|
https://github.com/MaxatTezekbayev/OpenNMT-py-lexical/tree/44182999b863fc4074d67e0281c5bdab19abddfe
|
SRNet
|
import torch
import torch.nn as nn
import torch.optim
class SRNet(nn.Module):
def __init__(self):
super(SRNet, self).__init__()
self.relu = nn.ReLU(inplace=True)
self.Conv1 = nn.Conv2d(3, 64, 3, 1, 1, bias=True)
self.Conv2 = nn.Conv2d(64, 64, 3, 1, 1, bias=True)
self.Conv3 = nn.Conv2d(64, 64, 3, 1, 1, bias=True)
self.Conv4 = nn.Conv2d(64, 64, 3, 1, 1, bias=True)
self.Conv5 = nn.Conv2d(64, 64, 3, 1, 1, bias=True)
self.Conv6 = nn.Conv2d(64, 64, 3, 1, 1, bias=True)
self.Conv7 = nn.Conv2d(64, 64, 3, 1, 1, bias=True)
self.Conv8 = nn.Conv2d(64, 3, 3, 1, 1, bias=True)
def forward(self, LR_img):
x = self.relu(self.Conv1(LR_img))
x = self.relu(self.Conv2(x))
x = self.relu(self.Conv3(x))
x = self.relu(self.Conv4(x))
x = self.relu(self.Conv5(x))
x = self.relu(self.Conv6(x))
x = self.relu(self.Conv7(x))
x = self.Conv8(x)
SR_img = LR_img + x
return SR_img
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_add_convolution_1(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 3
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_out_ptr0 + x3, None)
tmp2 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x3, tmp4, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17) = args
args.clear()
assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_9, (64,), (1,))
assert_size_stride(primals_10, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_11, (64,), (1,))
assert_size_stride(primals_12, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_13, (64,), (1,))
assert_size_stride(primals_14, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_15, (64,), (1,))
assert_size_stride(primals_16, (3, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_17, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(1048576)](buf1, primals_2,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_0[grid(1048576)](buf3, primals_5,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_0[grid(1048576)](buf5, primals_7,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_7
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_relu_0[grid(1048576)](buf7, primals_9,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_9
buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_relu_0[grid(1048576)](buf9, primals_11,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_11
buf10 = extern_kernels.convolution(buf9, primals_12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf11 = buf10
del buf10
triton_poi_fused_convolution_relu_0[grid(1048576)](buf11,
primals_13, 1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_13
buf12 = extern_kernels.convolution(buf11, primals_14, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf13 = buf12
del buf12
triton_poi_fused_convolution_relu_0[grid(1048576)](buf13,
primals_15, 1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_15
buf14 = extern_kernels.convolution(buf13, primals_16, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 3, 64, 64), (12288, 4096, 64, 1))
buf15 = buf14
del buf14
triton_poi_fused_add_convolution_1[grid(49152)](buf15, primals_3,
primals_17, 49152, XBLOCK=256, num_warps=4, num_stages=1)
del primals_17
return (buf15, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, primals_12, primals_14, primals_16, buf1, buf3, buf5,
buf7, buf9, buf11, buf13)
class SRNetNew(nn.Module):
def __init__(self):
super(SRNetNew, self).__init__()
self.relu = nn.ReLU(inplace=True)
self.Conv1 = nn.Conv2d(3, 64, 3, 1, 1, bias=True)
self.Conv2 = nn.Conv2d(64, 64, 3, 1, 1, bias=True)
self.Conv3 = nn.Conv2d(64, 64, 3, 1, 1, bias=True)
self.Conv4 = nn.Conv2d(64, 64, 3, 1, 1, bias=True)
self.Conv5 = nn.Conv2d(64, 64, 3, 1, 1, bias=True)
self.Conv6 = nn.Conv2d(64, 64, 3, 1, 1, bias=True)
self.Conv7 = nn.Conv2d(64, 64, 3, 1, 1, bias=True)
self.Conv8 = nn.Conv2d(64, 3, 3, 1, 1, bias=True)
def forward(self, input_0):
primals_1 = self.Conv1.weight
primals_2 = self.Conv1.bias
primals_4 = self.Conv2.weight
primals_5 = self.Conv2.bias
primals_6 = self.Conv3.weight
primals_7 = self.Conv3.bias
primals_8 = self.Conv4.weight
primals_9 = self.Conv4.bias
primals_10 = self.Conv5.weight
primals_11 = self.Conv5.bias
primals_12 = self.Conv6.weight
primals_13 = self.Conv6.bias
primals_14 = self.Conv7.weight
primals_15 = self.Conv7.bias
primals_16 = self.Conv8.weight
primals_17 = self.Conv8.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17])
return output[0]
|
MayankSingal/PyTorch-Zero-Shot-Super-Resolution
|
SRNet
| false
| 5,590
|
[
"MIT"
] | 1
|
3521b02fd338fc90eef88c551a8bed4afc54c8c6
|
https://github.com/MayankSingal/PyTorch-Zero-Shot-Super-Resolution/tree/3521b02fd338fc90eef88c551a8bed4afc54c8c6
|
Sparsemax
|
import torch
import torch.nn as nn
class Sparsemax(nn.Module):
"""Sparsemax function."""
def __init__(self, dim=None):
"""Initialize sparsemax activation
Args:
dim (int, optional): The dimension over which to apply the sparsemax function.
"""
super(Sparsemax, self).__init__()
self.dim = -1 if dim is None else dim
def forward(self, input):
"""Forward function.
Args:
input (torch.Tensor): Input tensor. First dimension should be the batch size
Returns:
torch.Tensor: [batch_size x number_of_logits] Output tensor
"""
original_size = input.size()
input = input.view(-1, input.size(self.dim))
dim = 1
number_of_logits = input.size(dim)
input = input - torch.max(input, dim=dim, keepdim=True)[0].expand_as(
input)
zs = torch.sort(input=input, dim=dim, descending=True)[0]
range = torch.range(start=1, end=number_of_logits, device=input.device
).view(1, -1)
range = range.expand_as(zs)
bound = 1 + range * zs
cumulative_sum_zs = torch.cumsum(zs, dim)
is_gt = torch.gt(bound, cumulative_sum_zs).type(input.type())
k = torch.max(is_gt * range, dim, keepdim=True)[0]
zs_sparse = is_gt * zs
taus = (torch.sum(zs_sparse, dim, keepdim=True) - 1) / k
taus = taus.expand_as(input)
self.output = torch.max(torch.zeros_like(input), input - taus)
output = self.output.view(original_size)
return output
def backward(self, grad_output):
"""Backward function."""
dim = 1
nonzeros = torch.ne(self.output, 0)
sum = torch.sum(grad_output * nonzeros, dim=dim) / torch.sum(nonzeros,
dim=dim)
self.grad_input = nonzeros * (grad_output - sum.expand_as(grad_output))
return self.grad_input
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch import device
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def _triton_helper_fn_add0(arg0_0, arg1_0):
tmp0 = arg0_0 + arg1_0
return tmp0
@triton.jit
def triton_per_fused_cumsum_sort_sub_0(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = r1
tmp10 = tmp9.to(tl.int16)
tmp11 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp12 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13, _tmp14 = triton_helpers.sort_with_index(tmp11, tmp12, None, 1,
stable=False, descending=True)
tmp15 = tmp13.to(tl.float32)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp17, = tl.associative_scan((tmp16,), 1, _triton_helper_fn_add0)
tl.store(out_ptr0 + (r1 + 4 * x0), tmp8, xmask)
tl.store(out_ptr1 + (r1 + 4 * x0), tmp13, xmask)
tl.store(out_ptr2 + (r1 + 4 * x0), tmp17, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_gt_max_mul_sum_1(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + 1)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp12 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr0 + 2)
tmp21 = tl.broadcast_to(tmp20, [XBLOCK])
tmp22 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp30 = tl.load(in_ptr0 + 3)
tmp31 = tl.broadcast_to(tmp30, [XBLOCK])
tmp32 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp35 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tmp1 * tmp2
tmp4 = 1.0
tmp5 = tmp3 + tmp4
tmp7 = tmp5 > tmp6
tmp8 = tmp7.to(tl.float32)
tmp9 = tmp8 * tmp1
tmp13 = tmp11 * tmp12
tmp14 = tmp13 + tmp4
tmp16 = tmp14 > tmp15
tmp17 = tmp16.to(tl.float32)
tmp18 = tmp17 * tmp11
tmp19 = triton_helpers.maximum(tmp9, tmp18)
tmp23 = tmp21 * tmp22
tmp24 = tmp23 + tmp4
tmp26 = tmp24 > tmp25
tmp27 = tmp26.to(tl.float32)
tmp28 = tmp27 * tmp21
tmp29 = triton_helpers.maximum(tmp19, tmp28)
tmp33 = tmp31 * tmp32
tmp34 = tmp33 + tmp4
tmp36 = tmp34 > tmp35
tmp37 = tmp36.to(tl.float32)
tmp38 = tmp37 * tmp31
tmp39 = triton_helpers.maximum(tmp29, tmp38)
tmp40 = tmp8 * tmp2
tmp41 = tmp17 * tmp12
tmp42 = tmp40 + tmp41
tmp43 = tmp27 * tmp22
tmp44 = tmp42 + tmp43
tmp45 = tmp37 * tmp32
tmp46 = tmp44 + tmp45
tl.store(out_ptr0 + x0, tmp39, xmask)
tl.store(out_ptr1 + x0, tmp46, xmask)
@triton.jit
def triton_poi_fused_maximum_sub_zeros_like_2(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = 1.0
tmp3 = tmp1 - tmp2
tmp5 = tmp3 / tmp4
tmp6 = tmp0 - tmp5
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_cumsum_sort_sub_0[grid(64)](arg0_1, buf0, buf1,
buf5, 64, 4, XBLOCK=8, num_warps=2, num_stages=1)
del arg0_1
buf3 = torch.ops.aten.range.step(1, 4, dtype=torch.float32, layout=
torch.strided, device=device(type='cuda', index=0))
buf4 = buf3
del buf3
buf6 = empty_strided_cuda((64, 1), (1, 64), torch.float32)
buf7 = empty_strided_cuda((64, 1), (1, 64), torch.float32)
triton_poi_fused__to_copy_add_gt_max_mul_sum_1[grid(64)](buf4, buf1,
buf5, buf6, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf1
del buf4
buf8 = buf5
del buf5
triton_poi_fused_maximum_sub_zeros_like_2[grid(256)](buf0, buf7,
buf6, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del buf6
del buf7
return reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf8
class SparsemaxNew(nn.Module):
"""Sparsemax function."""
def __init__(self, dim=None):
"""Initialize sparsemax activation
Args:
dim (int, optional): The dimension over which to apply the sparsemax function.
"""
super(SparsemaxNew, self).__init__()
self.dim = -1 if dim is None else dim
def backward(self, grad_output):
"""Backward function."""
dim = 1
nonzeros = torch.ne(self.output, 0)
sum = torch.sum(grad_output * nonzeros, dim=dim) / torch.sum(nonzeros,
dim=dim)
self.grad_input = nonzeros * (grad_output - sum.expand_as(grad_output))
return self.grad_input
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Max-luo-song/fs-map-project
|
Sparsemax
| false
| 5,591
|
[
"Apache-2.0"
] | 1
|
4e9d86e182d9a4b969e86b12d72f227e4fd4fd09
|
https://github.com/Max-luo-song/fs-map-project/tree/4e9d86e182d9a4b969e86b12d72f227e4fd4fd09
|
NTXent
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class NTXent(nn.Module):
def __init__(self, metric: 'str'='CosineSimilarity', temperature:
'float'=0.5, reduction: 'str'='mean'):
super().__init__()
if metric not in ['CosineSimilarity']:
raise ValueError('Undefined metric!')
if reduction not in ['AvgNonInf', 'mean', 'sum', 'none']:
raise ValueError('Undefined reduction!')
self.metric = metric
self.t = temperature
self.reduction = reduction
def forward(self, embedding, label):
if self.metric == 'CosineSimilarity':
emb = F.normalize(embedding, dim=1)
sim_mat = F.cosine_similarity(emb.unsqueeze(1), emb.unsqueeze(0
), dim=2)
sim_mat = torch.exp(sim_mat / self.t)
pos = torch.sum(sim_mat * label, dim=1)
inner = pos / torch.sum(sim_mat, dim=1)
loss = -torch.log(inner)
if self.reduction == 'AvgNonInf':
non_inf = inner > 0
loss = loss * non_inf
loss = loss.sum() / torch.sum(non_inf) if torch.sum(non_inf
) > 0 else loss.mean()
elif self.reduction == 'mean':
loss = loss.mean()
elif self.reduction == 'sum':
loss = loss.sum()
elif self.reduction == 'none':
pass
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x3, tmp15, xmask)
@triton.jit
def triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_1(in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 256
x4 = xindex % 64
x0 = xindex % 16
x6 = xindex % 256
x2 = xindex // 64 % 4
x7 = xindex
tmp0 = tl.load(in_ptr0 + (x4 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr0 + x6, xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp22 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-08
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tmp18 = tmp17 * tmp17
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = libdevice.sqrt(tmp27)
tmp29 = triton_helpers.maximum(tmp28, tmp13)
tmp30 = tmp16 / tmp29
tmp31 = tmp15 * tmp30
tl.store(out_ptr0 + x7, tmp31, xmask)
@triton.jit
def triton_per_fused_div_exp_log_mean_mul_neg_sum_2(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 256 * r1), None)
tmp1 = tl.load(in_ptr0 + (16 + r0 + 256 * r1), None)
tmp3 = tl.load(in_ptr0 + (32 + r0 + 256 * r1), None)
tmp5 = tl.load(in_ptr0 + (48 + r0 + 256 * r1), None)
tmp10 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp12 = tl.load(in_ptr0 + (64 + r0 + 256 * r1), None)
tmp13 = tl.load(in_ptr0 + (80 + r0 + 256 * r1), None)
tmp15 = tl.load(in_ptr0 + (96 + r0 + 256 * r1), None)
tmp17 = tl.load(in_ptr0 + (112 + r0 + 256 * r1), None)
tmp21 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp24 = tl.load(in_ptr0 + (128 + r0 + 256 * r1), None)
tmp25 = tl.load(in_ptr0 + (144 + r0 + 256 * r1), None)
tmp27 = tl.load(in_ptr0 + (160 + r0 + 256 * r1), None)
tmp29 = tl.load(in_ptr0 + (176 + r0 + 256 * r1), None)
tmp33 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp36 = tl.load(in_ptr0 + (192 + r0 + 256 * r1), None)
tmp37 = tl.load(in_ptr0 + (208 + r0 + 256 * r1), None)
tmp39 = tl.load(in_ptr0 + (224 + r0 + 256 * r1), None)
tmp41 = tl.load(in_ptr0 + (240 + r0 + 256 * r1), None)
tmp45 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 2.0
tmp8 = tmp6 * tmp7
tmp9 = tl_math.exp(tmp8)
tmp11 = tmp9 * tmp10
tmp14 = tmp12 + tmp13
tmp16 = tmp14 + tmp15
tmp18 = tmp16 + tmp17
tmp19 = tmp18 * tmp7
tmp20 = tl_math.exp(tmp19)
tmp22 = tmp20 * tmp21
tmp23 = tmp11 + tmp22
tmp26 = tmp24 + tmp25
tmp28 = tmp26 + tmp27
tmp30 = tmp28 + tmp29
tmp31 = tmp30 * tmp7
tmp32 = tl_math.exp(tmp31)
tmp34 = tmp32 * tmp33
tmp35 = tmp23 + tmp34
tmp38 = tmp36 + tmp37
tmp40 = tmp38 + tmp39
tmp42 = tmp40 + tmp41
tmp43 = tmp42 * tmp7
tmp44 = tl_math.exp(tmp43)
tmp46 = tmp44 * tmp45
tmp47 = tmp35 + tmp46
tmp48 = tmp9 + tmp20
tmp49 = tmp48 + tmp32
tmp50 = tmp49 + tmp44
tmp51 = tmp47 / tmp50
tmp52 = tl_math.log(tmp51)
tmp53 = -tmp52
tmp54 = tl.broadcast_to(tmp53, [XBLOCK, RBLOCK])
tmp56 = tl.sum(tmp54, 1)[:, None]
tmp57 = 64.0
tmp58 = tmp56 / tmp57
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp58, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_1[grid(1024)](
buf0, buf1, 1024, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
buf4 = empty_strided_cuda((), (), torch.float32)
buf5 = buf4
del buf4
triton_per_fused_div_exp_log_mean_mul_neg_sum_2[grid(1)](buf5, buf1,
arg1_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg1_1
del buf1
return buf5,
class NTXentNew(nn.Module):
def __init__(self, metric: 'str'='CosineSimilarity', temperature:
'float'=0.5, reduction: 'str'='mean'):
super().__init__()
if metric not in ['CosineSimilarity']:
raise ValueError('Undefined metric!')
if reduction not in ['AvgNonInf', 'mean', 'sum', 'none']:
raise ValueError('Undefined reduction!')
self.metric = metric
self.t = temperature
self.reduction = reduction
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Meteor-han/ReLMole
|
NTXent
| false
| 5,592
|
[
"MIT"
] | 1
|
ec8f2d3ec7b8edb6cd34aede36a980bab3dc35c2
|
https://github.com/Meteor-han/ReLMole/tree/ec8f2d3ec7b8edb6cd34aede36a980bab3dc35c2
|
GlobalAttention
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.cuda
import torch.distributed
def aeq(*args):
"""
Assert all arguments have the same value
"""
arguments = (arg for arg in args)
first = next(arguments)
assert all(arg == first for arg in arguments
), 'Not all arguments have the same value: ' + str(args)
def sequence_mask(lengths, max_len=None):
"""
Creates a boolean mask from sequence lengths.
"""
batch_size = lengths.numel()
max_len = max_len or lengths.max()
return torch.arange(0, max_len, device=lengths.device).type_as(lengths
).repeat(batch_size, 1).lt(lengths.unsqueeze(1))
class GlobalAttention(nn.Module):
"""
Global attention takes a matrix and a query vector. It
then computes a parameterized convex combination of the matrix
based on the input query.
Constructs a unit mapping a query `q` of size `dim`
and a source matrix `H` of size `n x dim`, to an output
of size `dim`.
.. mermaid::
graph BT
A[Query]
subgraph RNN
C[H 1]
D[H 2]
E[H N]
end
F[Attn]
G[Output]
A --> F
C --> F
D --> F
E --> F
C -.-> G
D -.-> G
E -.-> G
F --> G
All models compute the output as
:math:`c = \\sum_{j=1}^{\\text{SeqLength}} a_j H_j` where
:math:`a_j` is the softmax of a score function.
Then then apply a projection layer to [q, c].
However they
differ on how they compute the attention score.
* Luong Attention (dot, general):
* dot: :math:`\\text{score}(H_j,q) = H_j^T q`
* general: :math:`\\text{score}(H_j, q) = H_j^T W_a q`
* Bahdanau Attention (mlp):
* :math:`\\text{score}(H_j, q) = v_a^T \\text{tanh}(W_a q + U_a h_j)`
Args:
dim (int): dimensionality of query and key
coverage (bool): use coverage term
attn_type (str): type of attention to use, options [dot,general,mlp]
attn_func (str): attention function to use, options [softmax,sparsemax]
"""
def __init__(self, dim, coverage=False, attn_type='dot', attn_func=
'softmax'):
super(GlobalAttention, self).__init__()
self.dim = dim
assert attn_type in ['dot', 'general', 'mlp'
], 'Please select a valid attention type (got {:s}).'.format(
attn_type)
self.attn_type = attn_type
assert attn_func in ['softmax', 'sparsemax'
], 'Please select a valid attention function.'
self.attn_func = attn_func
if self.attn_type == 'general':
self.linear_in = nn.Linear(dim, dim, bias=False)
elif self.attn_type == 'mlp':
self.linear_context = nn.Linear(dim, dim, bias=False)
self.linear_query = nn.Linear(dim, dim, bias=True)
self.v = nn.Linear(dim, 1, bias=False)
out_bias = self.attn_type == 'mlp'
self.linear_out = nn.Linear(dim * 2, dim, bias=out_bias)
if coverage:
self.linear_cover = nn.Linear(1, dim, bias=False)
def score(self, h_t, h_s):
"""
Args:
h_t (FloatTensor): sequence of queries ``(batch, tgt_len, dim)``
h_s (FloatTensor): sequence of sources ``(batch, src_len, dim``
Returns:
FloatTensor: raw attention scores (unnormalized) for each src index
``(batch, tgt_len, src_len)``
"""
src_batch, src_len, src_dim = h_s.size()
tgt_batch, tgt_len, tgt_dim = h_t.size()
aeq(src_batch, tgt_batch)
aeq(src_dim, tgt_dim)
aeq(self.dim, src_dim)
if self.attn_type in ['general', 'dot']:
if self.attn_type == 'general':
h_t_ = h_t.view(tgt_batch * tgt_len, tgt_dim)
h_t_ = self.linear_in(h_t_)
h_t = h_t_.view(tgt_batch, tgt_len, tgt_dim)
h_s_ = h_s.transpose(1, 2)
return torch.bmm(h_t, h_s_)
else:
dim = self.dim
wq = self.linear_query(h_t.view(-1, dim))
wq = wq.view(tgt_batch, tgt_len, 1, dim)
wq = wq.expand(tgt_batch, tgt_len, src_len, dim)
uh = self.linear_context(h_s.contiguous().view(-1, dim))
uh = uh.view(src_batch, 1, src_len, dim)
uh = uh.expand(src_batch, tgt_len, src_len, dim)
wquh = torch.tanh(wq + uh)
return self.v(wquh.view(-1, dim)).view(tgt_batch, tgt_len, src_len)
def forward(self, source, memory_bank, memory_lengths=None, coverage=None):
"""
Args:
source (FloatTensor): query vectors ``(batch, tgt_len, dim)``
memory_bank (FloatTensor): source vectors ``(batch, src_len, dim)``
memory_lengths (LongTensor): the source context lengths ``(batch,)``
coverage (FloatTensor): None (not supported yet)
Returns:
(FloatTensor, FloatTensor):
* Computed vector ``(tgt_len, batch, dim)``
* Attention distribtutions for each query
``(tgt_len, batch, src_len)``
"""
if source.dim() == 2:
one_step = True
source = source.unsqueeze(1)
else:
one_step = False
batch, source_l, dim = memory_bank.size()
batch_, target_l, dim_ = source.size()
aeq(batch, batch_)
aeq(dim, dim_)
aeq(self.dim, dim)
if coverage is not None:
batch_, source_l_ = coverage.size()
aeq(batch, batch_)
aeq(source_l, source_l_)
if coverage is not None:
cover = coverage.view(-1).unsqueeze(1)
memory_bank += self.linear_cover(cover).view_as(memory_bank)
memory_bank = torch.tanh(memory_bank)
align = self.score(source, memory_bank)
if memory_lengths is not None:
mask = sequence_mask(memory_lengths, max_len=align.size(-1))
mask = mask.unsqueeze(1)
align.masked_fill_(~mask, -float('inf'))
if self.attn_func == 'softmax':
align_vectors = F.softmax(align.view(batch * target_l, source_l
), -1)
else:
align_vectors = sparsemax(align.view(batch * target_l, source_l
), -1)
align_vectors = align_vectors.view(batch, target_l, source_l)
c = torch.bmm(align_vectors, memory_bank)
concat_c = torch.cat([c, source], 2).view(batch * target_l, dim * 2)
attn_h = self.linear_out(concat_c).view(batch, target_l, dim)
if self.attn_type in ['general', 'dot']:
attn_h = torch.tanh(attn_h)
if one_step:
attn_h = attn_h.squeeze(1)
align_vectors = align_vectors.squeeze(1)
batch_, dim_ = attn_h.size()
aeq(batch, batch_)
aeq(dim, dim_)
batch_, source_l_ = align_vectors.size()
aeq(batch, batch_)
aeq(source_l, source_l_)
else:
attn_h = attn_h.transpose(0, 1).contiguous()
align_vectors = align_vectors.transpose(0, 1).contiguous()
target_l_, batch_, dim_ = attn_h.size()
aeq(target_l, target_l_)
aeq(batch, batch_)
aeq(dim, dim_)
target_l_, batch_, source_l_ = align_vectors.size()
aeq(target_l, target_l_)
aeq(batch, batch_)
aeq(source_l, source_l_)
return attn_h, align_vectors
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.cuda
import torch.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(out_ptr0 + x3, tmp1, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask)
tl.store(out_ptr0 + x3, tmp0, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(primals_1, reinterpret_tensor(primals_2, (4, 4,
4), (16, 1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf0, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf2 = reinterpret_tensor(buf0, (16, 4), (4, 1), 0)
del buf0
triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0)
del buf1
extern_kernels.bmm(reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1),
0), primals_2, out=buf3)
del primals_2
buf4 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
triton_poi_fused_cat_2[grid(128)](buf3, primals_1, buf4, 128,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf5 = reinterpret_tensor(buf3, (16, 4), (4, 1), 0)
del buf3
extern_kernels.mm(reinterpret_tensor(buf4, (16, 8), (8, 1), 0),
reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf5)
del primals_3
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_3[grid(64)](buf5, buf6, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_4[grid(64)](buf2, buf7, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf2
return buf6, buf7, reinterpret_tensor(buf4, (16, 8), (8, 1), 0), buf5
def aeq(*args):
"""
Assert all arguments have the same value
"""
arguments = (arg for arg in args)
first = next(arguments)
assert all(arg == first for arg in arguments
), 'Not all arguments have the same value: ' + str(args)
def sequence_mask(lengths, max_len=None):
"""
Creates a boolean mask from sequence lengths.
"""
batch_size = lengths.numel()
max_len = max_len or lengths.max()
return torch.arange(0, max_len, device=lengths.device).type_as(lengths
).repeat(batch_size, 1).lt(lengths.unsqueeze(1))
class GlobalAttentionNew(nn.Module):
"""
Global attention takes a matrix and a query vector. It
then computes a parameterized convex combination of the matrix
based on the input query.
Constructs a unit mapping a query `q` of size `dim`
and a source matrix `H` of size `n x dim`, to an output
of size `dim`.
.. mermaid::
graph BT
A[Query]
subgraph RNN
C[H 1]
D[H 2]
E[H N]
end
F[Attn]
G[Output]
A --> F
C --> F
D --> F
E --> F
C -.-> G
D -.-> G
E -.-> G
F --> G
All models compute the output as
:math:`c = \\sum_{j=1}^{\\text{SeqLength}} a_j H_j` where
:math:`a_j` is the softmax of a score function.
Then then apply a projection layer to [q, c].
However they
differ on how they compute the attention score.
* Luong Attention (dot, general):
* dot: :math:`\\text{score}(H_j,q) = H_j^T q`
* general: :math:`\\text{score}(H_j, q) = H_j^T W_a q`
* Bahdanau Attention (mlp):
* :math:`\\text{score}(H_j, q) = v_a^T \\text{tanh}(W_a q + U_a h_j)`
Args:
dim (int): dimensionality of query and key
coverage (bool): use coverage term
attn_type (str): type of attention to use, options [dot,general,mlp]
attn_func (str): attention function to use, options [softmax,sparsemax]
"""
def __init__(self, dim, coverage=False, attn_type='dot', attn_func=
'softmax'):
super(GlobalAttentionNew, self).__init__()
self.dim = dim
assert attn_type in ['dot', 'general', 'mlp'
], 'Please select a valid attention type (got {:s}).'.format(
attn_type)
self.attn_type = attn_type
assert attn_func in ['softmax', 'sparsemax'
], 'Please select a valid attention function.'
self.attn_func = attn_func
if self.attn_type == 'general':
self.linear_in = nn.Linear(dim, dim, bias=False)
elif self.attn_type == 'mlp':
self.linear_context = nn.Linear(dim, dim, bias=False)
self.linear_query = nn.Linear(dim, dim, bias=True)
self.v = nn.Linear(dim, 1, bias=False)
out_bias = self.attn_type == 'mlp'
self.linear_out = nn.Linear(dim * 2, dim, bias=out_bias)
if coverage:
self.linear_cover = nn.Linear(1, dim, bias=False)
def score(self, h_t, h_s):
"""
Args:
h_t (FloatTensor): sequence of queries ``(batch, tgt_len, dim)``
h_s (FloatTensor): sequence of sources ``(batch, src_len, dim``
Returns:
FloatTensor: raw attention scores (unnormalized) for each src index
``(batch, tgt_len, src_len)``
"""
src_batch, src_len, src_dim = h_s.size()
tgt_batch, tgt_len, tgt_dim = h_t.size()
aeq(src_batch, tgt_batch)
aeq(src_dim, tgt_dim)
aeq(self.dim, src_dim)
if self.attn_type in ['general', 'dot']:
if self.attn_type == 'general':
h_t_ = h_t.view(tgt_batch * tgt_len, tgt_dim)
h_t_ = self.linear_in(h_t_)
h_t = h_t_.view(tgt_batch, tgt_len, tgt_dim)
h_s_ = h_s.transpose(1, 2)
return torch.bmm(h_t, h_s_)
else:
dim = self.dim
wq = self.linear_query(h_t.view(-1, dim))
wq = wq.view(tgt_batch, tgt_len, 1, dim)
wq = wq.expand(tgt_batch, tgt_len, src_len, dim)
uh = self.linear_context(h_s.contiguous().view(-1, dim))
uh = uh.view(src_batch, 1, src_len, dim)
uh = uh.expand(src_batch, tgt_len, src_len, dim)
wquh = torch.tanh(wq + uh)
return self.v(wquh.view(-1, dim)).view(tgt_batch, tgt_len, src_len)
def forward(self, input_0, input_1):
primals_3 = self.linear_out.weight
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0], output[1]
|
MaxatTezekbayev/OpenNMT-py-lexical
|
GlobalAttention
| false
| 5,593
|
[
"MIT"
] | 1
|
44182999b863fc4074d67e0281c5bdab19abddfe
|
https://github.com/MaxatTezekbayev/OpenNMT-py-lexical/tree/44182999b863fc4074d67e0281c5bdab19abddfe
|
NetModel
|
import torch
import torch.nn.functional as F
import torch.utils.data.dataloader
class NetModel(torch.nn.Module):
def __init__(self):
super(NetModel, self).__init__()
self.hidden = torch.nn.Linear(28 * 28, 300)
self.output = torch.nn.Linear(300, 10)
def forward(self, x):
x = x.view(-1, 28 * 28)
x = self.hidden(x)
x = F.relu(x)
x = self.output(x)
return x
def get_inputs():
return [torch.rand([4, 784])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data.dataloader
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 300
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 784), (784, 1))
assert_size_stride(primals_2, (300, 784), (784, 1))
assert_size_stride(primals_3, (300,), (1,))
assert_size_stride(primals_4, (10, 300), (300, 1))
assert_size_stride(primals_5, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 300), (300, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784,
300), (1, 784), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(1200)](buf1, primals_3, 1200, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4,
(300, 10), (1, 300), 0), alpha=1, beta=1, out=buf2)
del primals_5
return buf2, primals_1, buf1, primals_4
class NetModelNew(torch.nn.Module):
def __init__(self):
super(NetModelNew, self).__init__()
self.hidden = torch.nn.Linear(28 * 28, 300)
self.output = torch.nn.Linear(300, 10)
def forward(self, input_0):
primals_2 = self.hidden.weight
primals_3 = self.hidden.bias
primals_4 = self.output.weight
primals_5 = self.output.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Michaelzhouisnotwhite/Learning-Gan
|
NetModel
| false
| 5,594
|
[
"MIT"
] | 1
|
cf1cff1f2afba296489db55f5de9ebb8405feb0e
|
https://github.com/Michaelzhouisnotwhite/Learning-Gan/tree/cf1cff1f2afba296489db55f5de9ebb8405feb0e
|
PairwiseLoss
|
import torch
import torch.utils.data
import torch.nn as nn
import torch.nn.parallel
class PairwiseLoss(nn.Module):
def __init__(self):
super(PairwiseLoss, self).__init__()
def forward(self, x, y):
diff = x - y
return torch.sum(diff * diff)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
import torch.nn as nn
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mul_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp6, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_mul_sub_sum_0[grid(1)](arg0_1, arg1_1, buf0, 1,
256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class PairwiseLossNew(nn.Module):
def __init__(self):
super(PairwiseLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
MinesNicaicai/large-scale-pointcloud-matching
|
PairwiseLoss
| false
| 5,595
|
[
"MIT"
] | 1
|
cfe140f2be1110ed75b6edd27538021e513a31c9
|
https://github.com/MinesNicaicai/large-scale-pointcloud-matching/tree/cfe140f2be1110ed75b6edd27538021e513a31c9
|
MLP
|
import torch
from torch import nn
import torch.utils
class MLP(torch.nn.Module):
def __init__(self, input_dim, output_dim):
super(MLP, self).__init__()
self.d1 = torch.nn.Linear(input_dim, 32)
self.d2 = torch.nn.Linear(32, 16)
self.d3 = torch.nn.Linear(16, output_dim)
self.relu = nn.ReLU()
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
x = self.d1(x)
x = self.relu(x)
x = self.d2(x)
x = self.relu(x)
x = self.d3(x)
x = self.softmax(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'output_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
import torch.utils
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (32, 4), (4, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (16, 32), (32, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (4, 16), (16, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 32), (32, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 32), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 32), (512, 128, 32, 1), 0)
del buf0
buf8 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(2048)](buf1,
primals_2, buf8, 2048, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 32), (32, 1), 0),
reinterpret_tensor(primals_4, (32, 16), (1, 32), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 16), (256, 64, 16, 1), 0)
del buf2
buf7 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(1024)](buf3,
primals_5, buf7, 1024, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 16),
(16, 1), 0), reinterpret_tensor(primals_6, (16, 4), (1, 16), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(256)](buf4, buf5, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
triton_poi_fused__softmax_3[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf5
return buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 32), (32, 1), 0), reinterpret_tensor(
buf3, (64, 16), (16, 1), 0), buf6, primals_6, buf7, primals_4, buf8
class MLPNew(torch.nn.Module):
def __init__(self, input_dim, output_dim):
super(MLPNew, self).__init__()
self.d1 = torch.nn.Linear(input_dim, 32)
self.d2 = torch.nn.Linear(32, 16)
self.d3 = torch.nn.Linear(16, output_dim)
self.relu = nn.ReLU()
self.softmax = nn.Softmax(dim=1)
def forward(self, input_0):
primals_1 = self.d1.weight
primals_2 = self.d1.bias
primals_4 = self.d2.weight
primals_5 = self.d2.bias
primals_6 = self.d3.weight
primals_7 = self.d3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
MichaelLee-ceo/FedSAUC
|
MLP
| false
| 5,596
|
[
"Apache-2.0"
] | 1
|
8c00008772213562ff6a07bf9fa92c3831713118
|
https://github.com/MichaelLee-ceo/FedSAUC/tree/8c00008772213562ff6a07bf9fa92c3831713118
|
CNN_DropOut
|
import torch
from torch import nn
import torch.utils
class CNN_DropOut(torch.nn.Module):
"""
Recommended model by "Adaptive Federated Optimization" (https://arxiv.org/pdf/2003.00295.pdf)
Used for EMNIST experiments.
When `only_digits=True`, the summary of returned model is
```
Model:
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
reshape (Reshape) (None, 28, 28, 1) 0
_________________________________________________________________
conv2d (Conv2D) (None, 26, 26, 32) 320
_________________________________________________________________
conv2d_1 (Conv2D) (None, 24, 24, 64) 18496
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 12, 12, 64) 0
_________________________________________________________________
dropout (Dropout) (None, 12, 12, 64) 0
_________________________________________________________________
flatten (Flatten) (None, 9216) 0
_________________________________________________________________
dense (Dense) (None, 128) 1179776
_________________________________________________________________
dropout_1 (Dropout) (None, 128) 0
_________________________________________________________________
dense_1 (Dense) (None, 10) 1290
=================================================================
Total params: 1,199,882
Trainable params: 1,199,882
Non-trainable params: 0
```
Args:
only_digits: If True, uses a final layer with 10 outputs, for use with the
digits only MNIST dataset (http://yann.lecun.com/exdb/mnist/).
If False, uses 62 outputs for Federated Extended MNIST (FEMNIST)
EMNIST: Extending MNIST to handwritten letters: https://arxiv.org/abs/1702.05373.
Returns:
A `torch.nn.Module`.
"""
def __init__(self, only_digits=True):
super(CNN_DropOut, self).__init__()
self.conv2d_1 = torch.nn.Conv2d(1, 8, kernel_size=3)
self.max_pooling = nn.MaxPool2d(2, stride=2)
self.conv2d_2 = torch.nn.Conv2d(8, 16, kernel_size=3)
self.dropout_1 = nn.Dropout(0.25)
self.flatten = nn.Flatten()
self.linear_1 = nn.Linear(16 * 5 * 5, 64)
self.dropout_2 = nn.Dropout(0.25)
self.linear_2 = nn.Linear(64, 10)
self.relu = nn.ReLU()
def forward(self, x):
x = x.view(x.shape[0], 28, 28)
x = torch.unsqueeze(x, 1)
x = self.conv2d_1(x)
x = self.relu(x)
x = self.max_pooling(x)
x = self.conv2d_2(x)
x = self.relu(x)
x = self.max_pooling(x)
x = self.dropout_1(x)
x = self.flatten(x)
x = self.linear_1(x)
x = self.relu(x)
x = self.dropout_2(x)
x = self.linear_2(x)
return x
def get_inputs():
return [torch.rand([4, 28, 28])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
import torch.utils
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 21632
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 676 % 8
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 5408
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 13
x3 = xindex // 13
x2 = xindex // 1352
x4 = xindex % 1352
tmp0 = tl.load(in_ptr0 + (2 * x0 + 52 * x3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 52 * x3), xmask, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (26 + 2 * x0 + 52 * x3), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (27 + 2 * x0 + 52 * x3), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x4 + 1376 * x2), tmp6, xmask)
tl.store(out_ptr1 + (x4 + 1408 * x2), tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 7744
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 121 % 16
x2 = xindex // 1936
x4 = xindex % 1936
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x4 + 1952 * x2), tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = xindex // 5 % 5
x2 = xindex // 25 % 16
x3 = xindex // 400
x4 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 22 * x1 + 121 * x2 + 1952 * x3),
xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 22 * x1 + 121 * x2 + 1952 * x3),
xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (11 + 2 * x0 + 22 * x1 + 121 * x2 + 1952 * x3),
xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (12 + 2 * x0 + 22 * x1 + 121 * x2 + 1952 * x3
), xmask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x4, tmp15, xmask)
tl.store(out_ptr1 + x4, tmp16, xmask)
@triton.jit
def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 28, 28), (784, 28, 1))
assert_size_stride(primals_2, (8, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_3, (8,), (1,))
assert_size_stride(primals_4, (16, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (64, 400), (400, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (10, 64), (64, 1))
assert_size_stride(primals_9, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4,
1, 28, 28), (784, 784, 28, 1), 0), primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 8, 26, 26), (5408, 676, 26, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(21632)](buf1, primals_3,
21632, XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 8, 13, 13), (1376, 169, 13, 1), torch
.float32)
buf3 = empty_strided_cuda((4, 8, 13, 13), (1408, 169, 13, 1), torch
.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(5408)](buf1, buf2,
buf3, 5408, XBLOCK=128, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 11, 11), (1936, 121, 11, 1))
buf5 = empty_strided_cuda((4, 16, 11, 11), (1952, 121, 11, 1),
torch.float32)
triton_poi_fused_convolution_relu_2[grid(7744)](buf4, primals_5,
buf5, 7744, XBLOCK=256, num_warps=4, num_stages=1)
del buf4
del primals_5
buf6 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.int8)
buf7 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.float32
)
triton_poi_fused_max_pool2d_with_indices_3[grid(1600)](buf5, buf6,
buf7, 1600, XBLOCK=256, num_warps=4, num_stages=1)
buf8 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf7, (4, 400), (400, 1), 0),
reinterpret_tensor(primals_6, (400, 64), (1, 400), 0), out=buf8)
buf9 = buf8
del buf8
triton_poi_fused_relu_4[grid(256)](buf9, primals_7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_7
buf10 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_9, buf9, reinterpret_tensor(primals_8,
(64, 10), (1, 64), 0), alpha=1, beta=1, out=buf10)
del primals_9
return buf10, primals_2, primals_4, reinterpret_tensor(primals_1, (4, 1,
28, 28), (784, 784, 28, 1), 0
), buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (4, 400),
(400, 1), 0), buf9, primals_8, primals_6
class CNN_DropOutNew(torch.nn.Module):
"""
Recommended model by "Adaptive Federated Optimization" (https://arxiv.org/pdf/2003.00295.pdf)
Used for EMNIST experiments.
When `only_digits=True`, the summary of returned model is
```
Model:
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
reshape (Reshape) (None, 28, 28, 1) 0
_________________________________________________________________
conv2d (Conv2D) (None, 26, 26, 32) 320
_________________________________________________________________
conv2d_1 (Conv2D) (None, 24, 24, 64) 18496
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 12, 12, 64) 0
_________________________________________________________________
dropout (Dropout) (None, 12, 12, 64) 0
_________________________________________________________________
flatten (Flatten) (None, 9216) 0
_________________________________________________________________
dense (Dense) (None, 128) 1179776
_________________________________________________________________
dropout_1 (Dropout) (None, 128) 0
_________________________________________________________________
dense_1 (Dense) (None, 10) 1290
=================================================================
Total params: 1,199,882
Trainable params: 1,199,882
Non-trainable params: 0
```
Args:
only_digits: If True, uses a final layer with 10 outputs, for use with the
digits only MNIST dataset (http://yann.lecun.com/exdb/mnist/).
If False, uses 62 outputs for Federated Extended MNIST (FEMNIST)
EMNIST: Extending MNIST to handwritten letters: https://arxiv.org/abs/1702.05373.
Returns:
A `torch.nn.Module`.
"""
def __init__(self, only_digits=True):
super(CNN_DropOutNew, self).__init__()
self.conv2d_1 = torch.nn.Conv2d(1, 8, kernel_size=3)
self.max_pooling = nn.MaxPool2d(2, stride=2)
self.conv2d_2 = torch.nn.Conv2d(8, 16, kernel_size=3)
self.dropout_1 = nn.Dropout(0.25)
self.flatten = nn.Flatten()
self.linear_1 = nn.Linear(16 * 5 * 5, 64)
self.dropout_2 = nn.Dropout(0.25)
self.linear_2 = nn.Linear(64, 10)
self.relu = nn.ReLU()
def forward(self, input_0):
primals_2 = self.conv2d_1.weight
primals_3 = self.conv2d_1.bias
primals_4 = self.conv2d_2.weight
primals_5 = self.conv2d_2.bias
primals_6 = self.linear_1.weight
primals_7 = self.linear_1.bias
primals_8 = self.linear_2.weight
primals_9 = self.linear_2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
MichaelLee-ceo/FedSAUC
|
CNN_DropOut
| false
| 5,597
|
[
"Apache-2.0"
] | 1
|
8c00008772213562ff6a07bf9fa92c3831713118
|
https://github.com/MichaelLee-ceo/FedSAUC/tree/8c00008772213562ff6a07bf9fa92c3831713118
|
ContrastiveLoss
|
import torch
import torch.utils.data
import torch.nn.functional as F
import torch.nn.parallel
class ContrastiveLoss(torch.nn.Module):
"""
Contrastive loss function.
Based on: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
"""
def __init__(self, margin=2.0):
super(ContrastiveLoss, self).__init__()
self.margin = margin
def forward(self, output1, output2, is_negative):
euclidean_distance = F.pairwise_distance(output1, output2)
loss_contrastive = torch.mean((1 - is_negative) * torch.pow(
euclidean_distance, 2) + is_negative * torch.pow(torch.clamp(
self.margin - euclidean_distance, min=0.0), 2))
return loss_contrastive
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.utils.data
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_norm_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 - tmp1
tmp3 = 1e-06
tmp4 = tmp2 + tmp3
tmp5 = tmp4 * tmp4
tmp8 = tmp6 - tmp7
tmp9 = tmp8 + tmp3
tmp10 = tmp9 * tmp9
tmp11 = tmp5 + tmp10
tmp14 = tmp12 - tmp13
tmp15 = tmp14 + tmp3
tmp16 = tmp15 * tmp15
tmp17 = tmp11 + tmp16
tmp20 = tmp18 - tmp19
tmp21 = tmp20 + tmp3
tmp22 = tmp21 * tmp21
tmp23 = tmp17 + tmp22
tmp24 = libdevice.sqrt(tmp23)
tl.store(out_ptr0 + x0, tmp24, xmask)
@triton.jit
def triton_per_fused_add_clamp_mean_mul_pow_rsub_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex
r0 = rindex % 64
tmp0 = tl.load(in_ptr0 + r2, None)
tmp3 = tl.load(in_ptr1 + r0, None, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp3 * tmp3
tmp5 = tmp2 * tmp4
tmp6 = 2.0
tmp7 = tmp6 - tmp3
tmp8 = 0.0
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp10 = tmp9 * tmp9
tmp11 = tmp0 * tmp10
tmp12 = tmp5 + tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 256.0
tmp17 = tmp15 / tmp16
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_norm_sub_0[grid(64)](arg1_1, arg0_1, buf0, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused_add_clamp_mean_mul_pow_rsub_1[grid(1)](buf2,
arg2_1, buf0, 1, 256, num_warps=2, num_stages=1)
del arg2_1
del buf0
return buf2,
class ContrastiveLossNew(torch.nn.Module):
"""
Contrastive loss function.
Based on: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
"""
def __init__(self, margin=2.0):
super(ContrastiveLossNew, self).__init__()
self.margin = margin
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
MinesNicaicai/large-scale-pointcloud-matching
|
ContrastiveLoss
| false
| 5,598
|
[
"MIT"
] | 1
|
cfe140f2be1110ed75b6edd27538021e513a31c9
|
https://github.com/MinesNicaicai/large-scale-pointcloud-matching/tree/cfe140f2be1110ed75b6edd27538021e513a31c9
|
Attention
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class Attention(nn.Module):
def forward(self, query, key, value, mask=None, dropout=None):
"""Compute 'Scaled Dot Product Attention'"""
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, -1000000000.0)
p_attn = F.softmax(scores, dim=-1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x2, tmp17, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(arg1_1, (16, 4, 4), (16, 1, 4), 0),
out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](buf0, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused__softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0)
del buf1
extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf3
)
del arg2_1
return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf2
class AttentionNew(nn.Module):
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0], output[1]
|
Moymix/BERT-pytorch
|
Attention
| false
| 5,599
|
[
"Apache-2.0"
] | 1
|
f0b9c3ae53e05c00adcc761e0422e4222d8b5619
|
https://github.com/Moymix/BERT-pytorch/tree/f0b9c3ae53e05c00adcc761e0422e4222d8b5619
|
VDSR_F64B6
|
import torch
import torch.nn as nn
def load_param(model1_path, model2):
dict_param1 = torch.load(model1_path)
dict_param2 = dict(model2.named_parameters())
for name2 in dict_param2:
if name2 in dict_param1:
dict_param2[name2].data.copy_(dict_param1[name2].data)
model2.load_state_dict(dict_param2)
return model2
class VDSR_F64B6(nn.Module):
def __init__(self, model=False, fixed=False):
super(VDSR_F64B6, self).__init__()
self.fixed = fixed
self.conv1 = nn.Conv2d(1, 64, 3, 1, 1, bias=False)
self.conv2 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv3 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv4 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv5 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv6 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv7 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv8 = nn.Conv2d(64, 1, 3, 1, 1, bias=False)
self.relu = nn.ReLU(inplace=True)
if model:
load_param(model, self)
if fixed:
for param in self.parameters():
param.requires_grad = False
def forward(self, y):
y = self.relu(self.conv1(y))
y = self.relu(self.conv2(y))
y = self.relu(self.conv3(y))
y = self.relu(self.conv4(y))
y = self.relu(self.conv5(y))
y = self.relu(self.conv6(y))
y = self.relu(self.conv7(y))
y = self.conv8(y)
return y
def forward_stem(self, y):
y = self.relu(self.conv1(y))
out1 = y
y = self.relu(self.conv2(y))
out2 = y
y = self.relu(self.conv3(y))
out3 = y
y = self.relu(self.conv4(y))
out4 = y
y = self.relu(self.conv5(y))
out5 = y
y = self.relu(self.conv6(y))
out6 = y
y = self.relu(self.conv7(y))
out7 = y
y = self.conv8(y)
out8 = y
return out1, out2, out3, out4, out5, out6, out7, out8
def forward_dense(self, y):
y = self.relu(self.conv1(y))
out1 = y
y = self.relu(self.conv2(y))
out2 = y
y = self.relu(self.conv3(y))
out3 = y
y = self.relu(self.conv4(y))
out4 = y
y = self.relu(self.conv5(y))
out5 = y
y = self.relu(self.conv6(y))
out6 = y
y = self.relu(self.conv7(y))
out7 = y
y = self.conv8(y)
out8 = y
return out1, out2, out3, out4, out5, out6, out7, out8
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (64, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_3, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_9, (1, 64, 3, 3), (576, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(1048576)](buf1, 1048576, XBLOCK=512,
num_warps=8, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_relu_0[grid(1048576)](buf3, 1048576, XBLOCK=512,
num_warps=8, num_stages=1)
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf5 = buf4
del buf4
triton_poi_fused_relu_0[grid(1048576)](buf5, 1048576, XBLOCK=512,
num_warps=8, num_stages=1)
buf6 = extern_kernels.convolution(buf5, primals_5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf7 = buf6
del buf6
triton_poi_fused_relu_0[grid(1048576)](buf7, 1048576, XBLOCK=512,
num_warps=8, num_stages=1)
buf8 = extern_kernels.convolution(buf7, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf9 = buf8
del buf8
triton_poi_fused_relu_0[grid(1048576)](buf9, 1048576, XBLOCK=512,
num_warps=8, num_stages=1)
buf10 = extern_kernels.convolution(buf9, primals_7, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf11 = buf10
del buf10
triton_poi_fused_relu_0[grid(1048576)](buf11, 1048576, XBLOCK=512,
num_warps=8, num_stages=1)
buf12 = extern_kernels.convolution(buf11, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf13 = buf12
del buf12
triton_poi_fused_relu_0[grid(1048576)](buf13, 1048576, XBLOCK=512,
num_warps=8, num_stages=1)
buf14 = extern_kernels.convolution(buf13, primals_9, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 1, 64, 64), (4096, 4096, 64, 1))
return (buf14, primals_1, primals_2, primals_3, primals_4, primals_5,
primals_6, primals_7, primals_8, primals_9, buf1, buf3, buf5, buf7,
buf9, buf11, buf13)
def load_param(model1_path, model2):
dict_param1 = torch.load(model1_path)
dict_param2 = dict(model2.named_parameters())
for name2 in dict_param2:
if name2 in dict_param1:
dict_param2[name2].data.copy_(dict_param1[name2].data)
model2.load_state_dict(dict_param2)
return model2
class VDSR_F64B6New(nn.Module):
def __init__(self, model=False, fixed=False):
super(VDSR_F64B6New, self).__init__()
self.fixed = fixed
self.conv1 = nn.Conv2d(1, 64, 3, 1, 1, bias=False)
self.conv2 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv3 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv4 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv5 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv6 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv7 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv8 = nn.Conv2d(64, 1, 3, 1, 1, bias=False)
self.relu = nn.ReLU(inplace=True)
if model:
load_param(model, self)
if fixed:
for param in self.parameters():
param.requires_grad = False
def forward_stem(self, y):
y = self.relu(self.conv1(y))
out1 = y
y = self.relu(self.conv2(y))
out2 = y
y = self.relu(self.conv3(y))
out3 = y
y = self.relu(self.conv4(y))
out4 = y
y = self.relu(self.conv5(y))
out5 = y
y = self.relu(self.conv6(y))
out6 = y
y = self.relu(self.conv7(y))
out7 = y
y = self.conv8(y)
out8 = y
return out1, out2, out3, out4, out5, out6, out7, out8
def forward_dense(self, y):
y = self.relu(self.conv1(y))
out1 = y
y = self.relu(self.conv2(y))
out2 = y
y = self.relu(self.conv3(y))
out3 = y
y = self.relu(self.conv4(y))
out4 = y
y = self.relu(self.conv5(y))
out5 = y
y = self.relu(self.conv6(y))
out6 = y
y = self.relu(self.conv7(y))
out7 = y
y = self.conv8(y)
out8 = y
return out1, out2, out3, out4, out5, out6, out7, out8
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_3 = self.conv2.weight
primals_4 = self.conv3.weight
primals_5 = self.conv4.weight
primals_6 = self.conv5.weight
primals_7 = self.conv6.weight
primals_8 = self.conv7.weight
primals_9 = self.conv8.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
MingSun-Tse/pytorch-vdsr
|
VDSR_F64B6
| false
| 5,600
|
[
"MIT"
] | 1
|
597bacb4ec7385c8cc6cdf91e26e64ef2e6808b7
|
https://github.com/MingSun-Tse/pytorch-vdsr/tree/597bacb4ec7385c8cc6cdf91e26e64ef2e6808b7
|
SmallVDSR_16x
|
import torch
import torch.nn as nn
def load_param(model1_path, model2):
dict_param1 = torch.load(model1_path)
dict_param2 = dict(model2.named_parameters())
for name2 in dict_param2:
if name2 in dict_param1:
dict_param2[name2].data.copy_(dict_param1[name2].data)
model2.load_state_dict(dict_param2)
return model2
class SmallVDSR_16x(nn.Module):
def __init__(self, model=False, fixed=False):
super(SmallVDSR_16x, self).__init__()
self.fixed = fixed
self.conv1 = nn.Conv2d(1, 16, 3, 1, 1, bias=False)
self.conv2 = nn.Conv2d(16, 16, 3, 1, 1, bias=False)
self.conv3 = nn.Conv2d(16, 16, 3, 1, 1, bias=False)
self.conv4 = nn.Conv2d(16, 16, 3, 1, 1, bias=False)
self.conv5 = nn.Conv2d(16, 16, 3, 1, 1, bias=False)
self.conv6 = nn.Conv2d(16, 16, 3, 1, 1, bias=False)
self.conv7 = nn.Conv2d(16, 16, 3, 1, 1, bias=False)
self.conv8 = nn.Conv2d(16, 16, 3, 1, 1, bias=False)
self.conv9 = nn.Conv2d(16, 16, 3, 1, 1, bias=False)
self.conv10 = nn.Conv2d(16, 16, 3, 1, 1, bias=False)
self.conv11 = nn.Conv2d(16, 16, 3, 1, 1, bias=False)
self.conv12 = nn.Conv2d(16, 16, 3, 1, 1, bias=False)
self.conv13 = nn.Conv2d(16, 16, 3, 1, 1, bias=False)
self.conv14 = nn.Conv2d(16, 16, 3, 1, 1, bias=False)
self.conv15 = nn.Conv2d(16, 16, 3, 1, 1, bias=False)
self.conv16 = nn.Conv2d(16, 16, 3, 1, 1, bias=False)
self.conv17 = nn.Conv2d(16, 16, 3, 1, 1, bias=False)
self.conv18 = nn.Conv2d(16, 16, 3, 1, 1, bias=False)
self.conv19 = nn.Conv2d(16, 16, 3, 1, 1, bias=False)
self.conv20 = nn.Conv2d(16, 1, 3, 1, 1, bias=False)
self.relu = nn.ReLU()
if model:
load_param(model, self)
if fixed:
for param in self.parameters():
param.requires_grad = False
def forward_stem(self, y):
y = self.relu(self.conv1(y))
out1 = y
y = self.relu(self.conv2(y))
out2 = y
y = self.relu(self.conv3(y))
out3 = y
y = self.relu(self.conv4(y))
out4 = y
y = self.relu(self.conv5(y))
out5 = y
y = self.relu(self.conv6(y))
out6 = y
y = self.relu(self.conv7(y))
out7 = y
y = self.relu(self.conv8(y))
out8 = y
y = self.relu(self.conv9(y))
out9 = y
y = self.relu(self.conv10(y))
out10 = y
y = self.relu(self.conv11(y))
out11 = y
y = self.relu(self.conv12(y))
out12 = y
y = self.relu(self.conv13(y))
out13 = y
y = self.relu(self.conv14(y))
out14 = y
y = self.relu(self.conv15(y))
out15 = y
y = self.relu(self.conv16(y))
out16 = y
y = self.relu(self.conv17(y))
out17 = y
y = self.relu(self.conv18(y))
out18 = y
y = self.relu(self.conv19(y))
out19 = y
y = self.conv20(y)
return (out1, out2, out3, out4, out5, out6, out7, out8, out9, out10,
out11, out12, out13, out14, out15, out16, out17, out18, out19, y)
def forward_dense(self, y):
y = self.relu(self.conv1(y))
out1 = y
y = self.relu(self.conv2(y))
out2 = y
y = self.relu(self.conv3(y))
out3 = y
y = self.relu(self.conv4(y))
out4 = y
y = self.relu(self.conv5(y))
out5 = y
y = self.relu(self.conv6(y))
out6 = y
y = self.relu(self.conv7(y))
out7 = y
y = self.relu(self.conv8(y))
out8 = y
y = self.relu(self.conv9(y))
out9 = y
y = self.relu(self.conv10(y))
out10 = y
y = self.relu(self.conv11(y))
out11 = y
y = self.relu(self.conv12(y))
out12 = y
y = self.relu(self.conv13(y))
out13 = y
y = self.relu(self.conv14(y))
out14 = y
y = self.relu(self.conv15(y))
out15 = y
y = self.relu(self.conv16(y))
out16 = y
y = self.relu(self.conv17(y))
out17 = y
y = self.relu(self.conv18(y))
out18 = y
y = self.relu(self.conv19(y))
out19 = y
y = self.conv20(y)
out20 = y
return (out1, out2, out3, out4, out5, out6, out7, out8, out9, out10,
out11, out12, out13, out14, out15, out16, out17, out18, out19,
out20)
def forward(self, y):
y = self.relu(self.conv1(y))
y = self.relu(self.conv2(y))
y = self.relu(self.conv3(y))
y = self.relu(self.conv4(y))
y = self.relu(self.conv5(y))
y = self.relu(self.conv6(y))
y = self.relu(self.conv7(y))
y = self.relu(self.conv8(y))
y = self.relu(self.conv9(y))
y = self.relu(self.conv10(y))
y = self.relu(self.conv11(y))
y = self.relu(self.conv12(y))
y = self.relu(self.conv13(y))
y = self.relu(self.conv14(y))
y = self.relu(self.conv15(y))
y = self.relu(self.conv16(y))
y = self.relu(self.conv17(y))
y = self.relu(self.conv18(y))
y = self.relu(self.conv19(y))
y = self.conv20(y)
return y
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21) = args
args.clear()
assert_size_stride(primals_1, (16, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_3, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_4, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_5, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_6, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_7, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_8, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_9, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_10, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_11, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_12, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_13, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_14, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_15, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_16, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_17, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_18, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_19, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_20, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_21, (1, 16, 3, 3), (144, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(262144)](buf1, 262144, XBLOCK=512,
num_warps=8, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_relu_0[grid(262144)](buf3, 262144, XBLOCK=512,
num_warps=8, num_stages=1)
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf5 = buf4
del buf4
triton_poi_fused_relu_0[grid(262144)](buf5, 262144, XBLOCK=512,
num_warps=8, num_stages=1)
buf6 = extern_kernels.convolution(buf5, primals_5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf7 = buf6
del buf6
triton_poi_fused_relu_0[grid(262144)](buf7, 262144, XBLOCK=512,
num_warps=8, num_stages=1)
buf8 = extern_kernels.convolution(buf7, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf9 = buf8
del buf8
triton_poi_fused_relu_0[grid(262144)](buf9, 262144, XBLOCK=512,
num_warps=8, num_stages=1)
buf10 = extern_kernels.convolution(buf9, primals_7, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf11 = buf10
del buf10
triton_poi_fused_relu_0[grid(262144)](buf11, 262144, XBLOCK=512,
num_warps=8, num_stages=1)
buf12 = extern_kernels.convolution(buf11, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf13 = buf12
del buf12
triton_poi_fused_relu_0[grid(262144)](buf13, 262144, XBLOCK=512,
num_warps=8, num_stages=1)
buf14 = extern_kernels.convolution(buf13, primals_9, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf15 = buf14
del buf14
triton_poi_fused_relu_0[grid(262144)](buf15, 262144, XBLOCK=512,
num_warps=8, num_stages=1)
buf16 = extern_kernels.convolution(buf15, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf17 = buf16
del buf16
triton_poi_fused_relu_0[grid(262144)](buf17, 262144, XBLOCK=512,
num_warps=8, num_stages=1)
buf18 = extern_kernels.convolution(buf17, primals_11, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf19 = buf18
del buf18
triton_poi_fused_relu_0[grid(262144)](buf19, 262144, XBLOCK=512,
num_warps=8, num_stages=1)
buf20 = extern_kernels.convolution(buf19, primals_12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf21 = buf20
del buf20
triton_poi_fused_relu_0[grid(262144)](buf21, 262144, XBLOCK=512,
num_warps=8, num_stages=1)
buf22 = extern_kernels.convolution(buf21, primals_13, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf23 = buf22
del buf22
triton_poi_fused_relu_0[grid(262144)](buf23, 262144, XBLOCK=512,
num_warps=8, num_stages=1)
buf24 = extern_kernels.convolution(buf23, primals_14, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf25 = buf24
del buf24
triton_poi_fused_relu_0[grid(262144)](buf25, 262144, XBLOCK=512,
num_warps=8, num_stages=1)
buf26 = extern_kernels.convolution(buf25, primals_15, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf27 = buf26
del buf26
triton_poi_fused_relu_0[grid(262144)](buf27, 262144, XBLOCK=512,
num_warps=8, num_stages=1)
buf28 = extern_kernels.convolution(buf27, primals_16, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf29 = buf28
del buf28
triton_poi_fused_relu_0[grid(262144)](buf29, 262144, XBLOCK=512,
num_warps=8, num_stages=1)
buf30 = extern_kernels.convolution(buf29, primals_17, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf30, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf31 = buf30
del buf30
triton_poi_fused_relu_0[grid(262144)](buf31, 262144, XBLOCK=512,
num_warps=8, num_stages=1)
buf32 = extern_kernels.convolution(buf31, primals_18, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf32, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf33 = buf32
del buf32
triton_poi_fused_relu_0[grid(262144)](buf33, 262144, XBLOCK=512,
num_warps=8, num_stages=1)
buf34 = extern_kernels.convolution(buf33, primals_19, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf34, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf35 = buf34
del buf34
triton_poi_fused_relu_0[grid(262144)](buf35, 262144, XBLOCK=512,
num_warps=8, num_stages=1)
buf36 = extern_kernels.convolution(buf35, primals_20, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf36, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf37 = buf36
del buf36
triton_poi_fused_relu_0[grid(262144)](buf37, 262144, XBLOCK=512,
num_warps=8, num_stages=1)
buf38 = extern_kernels.convolution(buf37, primals_21, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf38, (4, 1, 64, 64), (4096, 4096, 64, 1))
return (buf38, primals_1, primals_2, primals_3, primals_4, primals_5,
primals_6, primals_7, primals_8, primals_9, primals_10, primals_11,
primals_12, primals_13, primals_14, primals_15, primals_16,
primals_17, primals_18, primals_19, primals_20, primals_21, buf1,
buf3, buf5, buf7, buf9, buf11, buf13, buf15, buf17, buf19, buf21,
buf23, buf25, buf27, buf29, buf31, buf33, buf35, buf37)
def load_param(model1_path, model2):
dict_param1 = torch.load(model1_path)
dict_param2 = dict(model2.named_parameters())
for name2 in dict_param2:
if name2 in dict_param1:
dict_param2[name2].data.copy_(dict_param1[name2].data)
model2.load_state_dict(dict_param2)
return model2
class SmallVDSR_16xNew(nn.Module):
def __init__(self, model=False, fixed=False):
super(SmallVDSR_16xNew, self).__init__()
self.fixed = fixed
self.conv1 = nn.Conv2d(1, 16, 3, 1, 1, bias=False)
self.conv2 = nn.Conv2d(16, 16, 3, 1, 1, bias=False)
self.conv3 = nn.Conv2d(16, 16, 3, 1, 1, bias=False)
self.conv4 = nn.Conv2d(16, 16, 3, 1, 1, bias=False)
self.conv5 = nn.Conv2d(16, 16, 3, 1, 1, bias=False)
self.conv6 = nn.Conv2d(16, 16, 3, 1, 1, bias=False)
self.conv7 = nn.Conv2d(16, 16, 3, 1, 1, bias=False)
self.conv8 = nn.Conv2d(16, 16, 3, 1, 1, bias=False)
self.conv9 = nn.Conv2d(16, 16, 3, 1, 1, bias=False)
self.conv10 = nn.Conv2d(16, 16, 3, 1, 1, bias=False)
self.conv11 = nn.Conv2d(16, 16, 3, 1, 1, bias=False)
self.conv12 = nn.Conv2d(16, 16, 3, 1, 1, bias=False)
self.conv13 = nn.Conv2d(16, 16, 3, 1, 1, bias=False)
self.conv14 = nn.Conv2d(16, 16, 3, 1, 1, bias=False)
self.conv15 = nn.Conv2d(16, 16, 3, 1, 1, bias=False)
self.conv16 = nn.Conv2d(16, 16, 3, 1, 1, bias=False)
self.conv17 = nn.Conv2d(16, 16, 3, 1, 1, bias=False)
self.conv18 = nn.Conv2d(16, 16, 3, 1, 1, bias=False)
self.conv19 = nn.Conv2d(16, 16, 3, 1, 1, bias=False)
self.conv20 = nn.Conv2d(16, 1, 3, 1, 1, bias=False)
self.relu = nn.ReLU()
if model:
load_param(model, self)
if fixed:
for param in self.parameters():
param.requires_grad = False
def forward_stem(self, y):
y = self.relu(self.conv1(y))
out1 = y
y = self.relu(self.conv2(y))
out2 = y
y = self.relu(self.conv3(y))
out3 = y
y = self.relu(self.conv4(y))
out4 = y
y = self.relu(self.conv5(y))
out5 = y
y = self.relu(self.conv6(y))
out6 = y
y = self.relu(self.conv7(y))
out7 = y
y = self.relu(self.conv8(y))
out8 = y
y = self.relu(self.conv9(y))
out9 = y
y = self.relu(self.conv10(y))
out10 = y
y = self.relu(self.conv11(y))
out11 = y
y = self.relu(self.conv12(y))
out12 = y
y = self.relu(self.conv13(y))
out13 = y
y = self.relu(self.conv14(y))
out14 = y
y = self.relu(self.conv15(y))
out15 = y
y = self.relu(self.conv16(y))
out16 = y
y = self.relu(self.conv17(y))
out17 = y
y = self.relu(self.conv18(y))
out18 = y
y = self.relu(self.conv19(y))
out19 = y
y = self.conv20(y)
return (out1, out2, out3, out4, out5, out6, out7, out8, out9, out10,
out11, out12, out13, out14, out15, out16, out17, out18, out19, y)
def forward_dense(self, y):
y = self.relu(self.conv1(y))
out1 = y
y = self.relu(self.conv2(y))
out2 = y
y = self.relu(self.conv3(y))
out3 = y
y = self.relu(self.conv4(y))
out4 = y
y = self.relu(self.conv5(y))
out5 = y
y = self.relu(self.conv6(y))
out6 = y
y = self.relu(self.conv7(y))
out7 = y
y = self.relu(self.conv8(y))
out8 = y
y = self.relu(self.conv9(y))
out9 = y
y = self.relu(self.conv10(y))
out10 = y
y = self.relu(self.conv11(y))
out11 = y
y = self.relu(self.conv12(y))
out12 = y
y = self.relu(self.conv13(y))
out13 = y
y = self.relu(self.conv14(y))
out14 = y
y = self.relu(self.conv15(y))
out15 = y
y = self.relu(self.conv16(y))
out16 = y
y = self.relu(self.conv17(y))
out17 = y
y = self.relu(self.conv18(y))
out18 = y
y = self.relu(self.conv19(y))
out19 = y
y = self.conv20(y)
out20 = y
return (out1, out2, out3, out4, out5, out6, out7, out8, out9, out10,
out11, out12, out13, out14, out15, out16, out17, out18, out19,
out20)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_3 = self.conv2.weight
primals_4 = self.conv3.weight
primals_5 = self.conv4.weight
primals_6 = self.conv5.weight
primals_7 = self.conv6.weight
primals_8 = self.conv7.weight
primals_9 = self.conv8.weight
primals_10 = self.conv9.weight
primals_11 = self.conv10.weight
primals_12 = self.conv11.weight
primals_13 = self.conv12.weight
primals_14 = self.conv13.weight
primals_15 = self.conv14.weight
primals_16 = self.conv15.weight
primals_17 = self.conv16.weight
primals_18 = self.conv17.weight
primals_19 = self.conv18.weight
primals_20 = self.conv19.weight
primals_21 = self.conv20.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21])
return output[0]
|
MingSun-Tse/pytorch-vdsr
|
SmallVDSR_16x
| false
| 5,601
|
[
"MIT"
] | 1
|
597bacb4ec7385c8cc6cdf91e26e64ef2e6808b7
|
https://github.com/MingSun-Tse/pytorch-vdsr/tree/597bacb4ec7385c8cc6cdf91e26e64ef2e6808b7
|
Squash
|
import torch
import torch.nn as nn
class Squash(nn.Module):
def forward(self, x, dim=-1):
squared_norm = (x ** 2).sum(dim=dim, keepdim=True)
scale = squared_norm / (1 + squared_norm)
return scale * x / (squared_norm.sqrt() + 1e-08)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mul_pow_sqrt_sum_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp10 / tmp12
tmp15 = tmp13 * tmp14
tmp16 = libdevice.sqrt(tmp10)
tmp17 = 1e-08
tmp18 = tmp16 + tmp17
tmp19 = tmp15 / tmp18
tl.store(out_ptr0 + x2, tmp19, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mul_pow_sqrt_sum_0[grid(256)](arg0_1, buf0,
256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class SquashNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
MobtgZhang/MWMLNet
|
Squash
| false
| 5,602
|
[
"MIT"
] | 1
|
125bb39935916b6b4be505c51cb6a04eb49b96d0
|
https://github.com/MobtgZhang/MWMLNet/tree/125bb39935916b6b4be505c51cb6a04eb49b96d0
|
ResConnectionLayer
|
import math
import torch
import torch.nn as nn
class LayerNorm(nn.Module):
"""Construct a layernorm module (See citation for details)."""
def __init__(self, features, eps=1e-06):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class GELU(nn.Module):
def forward(self, x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x +
0.044715 * torch.pow(x, 3))))
class FeedForwardNetwork(nn.Module):
def __init__(self, in_dim, hid_dim) ->None:
super().__init__()
self.lin1 = nn.Linear(in_dim, hid_dim)
self.lin2 = nn.Linear(hid_dim, in_dim)
self.gleu = GELU()
self.dropout = nn.Dropout()
def forward(self, inputs):
hid = self.gleu(self.lin1(inputs))
return self.lin2(self.dropout(hid))
class ResConnectionLayer(nn.Module):
def __init__(self, in_dim, dropout):
super(ResConnectionLayer, self).__init__()
self.norm = LayerNorm(in_dim)
self.dropout = nn.Dropout(dropout)
self.ffn = FeedForwardNetwork(in_dim, in_dim)
def forward(self, x):
"""Apply residual connection to any sublayer with the same size."""
return x + self.dropout(self.ffn(self.norm(x)))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4, 'dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp0 * tmp11
tmp13 = tmp2 - tmp10
tmp14 = tmp13 * tmp13
tmp15 = tmp3 - tmp10
tmp16 = tmp15 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = tmp5 - tmp10
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp21 = tmp7 - tmp10
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp24 = 3.0
tmp25 = tmp23 / tmp24
tmp26 = libdevice.sqrt(tmp25)
tmp27 = 1e-06
tmp28 = tmp26 + tmp27
tmp29 = tmp12 / tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + x2, tmp31, xmask)
@triton.jit
def triton_poi_fused_add_mul_pow_tanh_1(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tmp0 * tmp0
tmp4 = tmp3 * tmp0
tmp5 = 0.044715
tmp6 = tmp4 * tmp5
tmp7 = tmp0 + tmp6
tmp8 = 0.7978845608028654
tmp9 = tmp7 * tmp8
tmp10 = libdevice.tanh(tmp9)
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp2 * tmp12
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_std_sub_0[grid(256)](primals_2,
primals_1, primals_3, buf0, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_2
del primals_3
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf0, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf1)
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_pow_tanh_1[grid(256)](buf1, buf2, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf3
triton_poi_fused_add_2[grid(256)](buf4, primals_1, primals_7, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
return buf4, primals_1, reinterpret_tensor(buf0, (64, 4), (4, 1), 0
), buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0
), primals_6, primals_4
class LayerNorm(nn.Module):
"""Construct a layernorm module (See citation for details)."""
def __init__(self, features, eps=1e-06):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class GELU(nn.Module):
def forward(self, x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x +
0.044715 * torch.pow(x, 3))))
class FeedForwardNetwork(nn.Module):
def __init__(self, in_dim, hid_dim) ->None:
super().__init__()
self.lin1 = nn.Linear(in_dim, hid_dim)
self.lin2 = nn.Linear(hid_dim, in_dim)
self.gleu = GELU()
self.dropout = nn.Dropout()
def forward(self, inputs):
hid = self.gleu(self.lin1(inputs))
return self.lin2(self.dropout(hid))
class ResConnectionLayerNew(nn.Module):
def __init__(self, in_dim, dropout):
super(ResConnectionLayerNew, self).__init__()
self.norm = LayerNorm(in_dim)
self.dropout = nn.Dropout(dropout)
self.ffn = FeedForwardNetwork(in_dim, in_dim)
def forward(self, input_0):
primals_2 = self.norm.a_2
primals_3 = self.norm.b_2
primals_4 = self.ffn.lin1.weight
primals_5 = self.ffn.lin1.bias
primals_6 = self.ffn.lin2.weight
primals_7 = self.ffn.lin2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
MobtgZhang/MWMLNet
|
ResConnectionLayer
| false
| 5,603
|
[
"MIT"
] | 1
|
125bb39935916b6b4be505c51cb6a04eb49b96d0
|
https://github.com/MobtgZhang/MWMLNet/tree/125bb39935916b6b4be505c51cb6a04eb49b96d0
|
AE
|
import torch
from torch import nn
import torch.utils.data
import torch.utils.data.distributed
class AE(nn.Module):
def __init__(self):
super(AE, self).__init__()
self.conv1 = nn.Conv2d(1, 16, kernel_size=19, padding=9)
self.conv2 = nn.Conv2d(16, 4, kernel_size=15, padding=7)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True)
self.t_conv1 = nn.ConvTranspose2d(4, 16, kernel_size=15, padding=7)
self.t_conv2 = nn.ConvTranspose2d(16, 1, kernel_size=19, padding=9)
self.unpool = nn.MaxUnpool2d(kernel_size=2, stride=2)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.relu(self.conv1(x))
x, pool1 = self.pool(x)
x = self.relu(self.conv2(x))
x, pool2 = self.pool(x)
x = self.unpool(x, pool2)
x = self.relu(self.t_conv1(x))
x = self.unpool(x, pool1)
x = self.relu(self.t_conv2(x))
return x
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 16
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32
x4 = xindex
x2 = xindex // 32 % 32
tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tmp17 = tl.full([1], 2, tl.int32)
tmp18 = tl.where((tmp16 < 0) != (tmp17 < 0), tl.where(tmp16 % tmp17 !=
0, tmp16 // tmp17 - 1, tmp16 // tmp17), tmp16 // tmp17)
tmp19 = tmp18 * tmp17
tmp20 = tmp16 - tmp19
tmp21 = 2 * x2
tmp22 = tmp21 + tmp18
tmp23 = 2 * x0
tmp24 = tmp23 + tmp20
tmp25 = tl.full([1], 64, tl.int64)
tmp26 = tmp22 * tmp25
tmp27 = tmp26 + tmp24
tl.store(out_ptr0 + x4, tmp6, None)
tl.store(out_ptr1 + x4, tmp27, None)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 4
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x3 = xindex // 16
x1 = xindex // 16 % 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x3), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x3), None, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x3), None, eviction_policy
='evict_last')
tmp12 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x3), None,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = tl.full([1], 2, tl.int32)
tmp18 = tl.where((tmp15 < 0) != (tmp17 < 0), tl.where(tmp15 % tmp17 !=
0, tmp15 // tmp17 - 1, tmp15 // tmp17), tmp15 // tmp17)
tmp19 = tmp18 * tmp17
tmp20 = tmp15 - tmp19
tmp21 = 2 * x1
tmp22 = tmp21 + tmp18
tmp23 = 2 * x0
tmp24 = tmp23 + tmp20
tmp25 = tl.full([1], 32, tl.int64)
tmp26 = tmp22 * tmp25
tmp27 = tmp26 + tmp24
tl.store(out_ptr0 + x4, tmp27, None)
tl.store(out_ptr1 + x4, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_4(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 16
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x3, tmp4, None)
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_5(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = 0.0
tmp7 = tmp5 <= tmp6
tl.store(in_out_ptr0 + x0, tmp5, None)
tl.store(out_ptr0 + x0, tmp7, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (16, 1, 19, 19), (361, 361, 19, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (4, 16, 15, 15), (3600, 225, 15, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 16, 15, 15), (3600, 225, 15, 1))
assert_size_stride(primals_7, (16,), (1,))
assert_size_stride(primals_8, (16, 1, 19, 19), (361, 361, 19, 1))
assert_size_stride(primals_9, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(9, 9), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(262144)](buf1, primals_2,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 16, 32, 32), (16384, 1024, 32, 1),
torch.float32)
buf3 = empty_strided_cuda((4, 16, 32, 32), (16384, 1024, 32, 1),
torch.int64)
triton_poi_fused_max_pool2d_with_indices_1[grid(65536)](buf1, buf2,
buf3, 65536, XBLOCK=256, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(7, 7), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 32, 32), (4096, 1024, 32, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(16384)](buf5, primals_5,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch
.int64)
buf7 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch
.float32)
triton_poi_fused_max_pool2d_with_indices_3[grid(4096)](buf5, buf6,
buf7, 4096, XBLOCK=128, num_warps=4, num_stages=1)
buf8 = torch.ops.aten.max_unpool2d.default(buf7, buf6, [32, 32])
del buf7
buf9 = buf8
del buf8
buf10 = extern_kernels.convolution(buf9, primals_6, stride=(1, 1),
padding=(7, 7), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 16, 32, 32), (16384, 1024, 32, 1))
buf11 = buf10
del buf10
buf17 = empty_strided_cuda((4, 16, 32, 32), (16384, 1024, 32, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_4[grid(65536)](
buf11, primals_7, buf17, 65536, XBLOCK=512, num_warps=4,
num_stages=1)
del primals_7
buf12 = torch.ops.aten.max_unpool2d.default(buf11, buf3, [64, 64])
del buf11
buf13 = buf12
del buf12
buf14 = extern_kernels.convolution(buf13, primals_8, stride=(1, 1),
padding=(9, 9), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf15 = buf14
del buf14
buf16 = empty_strided_cuda((4, 1, 64, 64), (4096, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_5[grid(16384)](
buf15, primals_9, buf16, 16384, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_9
return (buf15, primals_1, primals_3, primals_4, primals_6, primals_8,
buf1, buf2, buf3, buf5, buf6, buf9, buf13, buf16, buf17)
class AENew(nn.Module):
def __init__(self):
super(AENew, self).__init__()
self.conv1 = nn.Conv2d(1, 16, kernel_size=19, padding=9)
self.conv2 = nn.Conv2d(16, 4, kernel_size=15, padding=7)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True)
self.t_conv1 = nn.ConvTranspose2d(4, 16, kernel_size=15, padding=7)
self.t_conv2 = nn.ConvTranspose2d(16, 1, kernel_size=19, padding=9)
self.unpool = nn.MaxUnpool2d(kernel_size=2, stride=2)
self.relu = nn.ReLU(inplace=True)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.t_conv1.weight
primals_7 = self.t_conv1.bias
primals_8 = self.t_conv2.weight
primals_9 = self.t_conv2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
Minauras/deepdefresneling
|
AE
| false
| 5,604
|
[
"BSD-2-Clause"
] | 1
|
e17168e9a8d322201998c73da54efbd334b0ffb9
|
https://github.com/Minauras/deepdefresneling/tree/e17168e9a8d322201998c73da54efbd334b0ffb9
|
Lookahead
|
import torch
import torch.utils.data.distributed
import torch.nn as nn
import torch.nn.functional as F
class Lookahead(nn.Module):
def __init__(self, n_features, context):
super(Lookahead, self).__init__()
assert context > 0
self.context = context
self.n_features = n_features
self.pad = 0, self.context - 1
self.conv = nn.Conv1d(self.n_features, self.n_features, kernel_size
=self.context, stride=1, groups=self.n_features, padding=0,
bias=None)
def forward(self, x):
x = x.transpose(0, 1).transpose(1, 2)
x = F.pad(x, pad=self.pad, value=0)
x = self.conv(x)
x = x.transpose(1, 2).transpose(0, 1).contiguous()
return x
def __repr__(self):
return self.__class__.__name__ + '(' + 'n_features=' + str(self.
n_features) + ', context=' + str(self.context) + ')'
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'n_features': 4, 'context': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data.distributed
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 7
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = x1
tmp1 = tl.full([1, 1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.load(in_ptr0 + (y0 + 16 * x1), tmp2 & xmask & ymask,
eviction_policy='evict_last', other=0.0)
tl.store(out_ptr0 + (x1 + 7 * y0), tmp3, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 1, 4), (4, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 7), (28, 7, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(16, 7)](primals_1, buf0, 16,
7, XBLOCK=8, YBLOCK=16, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=4, bias=None)
assert_size_stride(buf1, (4, 4, 4), (16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_1[grid(4, 16)](buf1, buf2, 4, 16, XBLOCK=16,
YBLOCK=4, num_warps=1, num_stages=1)
del buf1
return buf2, primals_2, buf0
class LookaheadNew(nn.Module):
def __init__(self, n_features, context):
super(LookaheadNew, self).__init__()
assert context > 0
self.context = context
self.n_features = n_features
self.pad = 0, self.context - 1
self.conv = nn.Conv1d(self.n_features, self.n_features, kernel_size
=self.context, stride=1, groups=self.n_features, padding=0,
bias=None)
def __repr__(self):
return self.__class__.__name__ + '(' + 'n_features=' + str(self.
n_features) + ', context=' + str(self.context) + ')'
def forward(self, input_0):
primals_2 = self.conv.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
MrXJC/deepspeech.pytorch
|
Lookahead
| false
| 5,605
|
[
"MIT"
] | 1
|
6379c18d3f56cad8896a51d45166ea979423e0bf
|
https://github.com/MrXJC/deepspeech.pytorch/tree/6379c18d3f56cad8896a51d45166ea979423e0bf
|
VDSR
|
import torch
import torch.nn as nn
def load_param(model1_path, model2):
dict_param1 = torch.load(model1_path)
dict_param2 = dict(model2.named_parameters())
for name2 in dict_param2:
if name2 in dict_param1:
dict_param2[name2].data.copy_(dict_param1[name2].data)
model2.load_state_dict(dict_param2)
return model2
class VDSR(nn.Module):
def __init__(self, model=False, fixed=False):
super(VDSR, self).__init__()
self.fixed = fixed
self.conv1 = nn.Conv2d(1, 64, 3, 1, 1, bias=False)
self.conv2 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv3 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv4 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv5 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv6 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv7 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv8 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv9 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv10 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv11 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv12 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv13 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv14 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv15 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv16 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv17 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv18 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv19 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv20 = nn.Conv2d(64, 1, 3, 1, 1, bias=False)
self.relu = nn.ReLU(inplace=True)
if model:
load_param(model, self)
if fixed:
for param in self.parameters():
param.requires_grad = False
def forward(self, y):
y = self.relu(self.conv1(y))
y = self.relu(self.conv2(y))
y = self.relu(self.conv3(y))
y = self.relu(self.conv4(y))
y = self.relu(self.conv5(y))
y = self.relu(self.conv6(y))
y = self.relu(self.conv7(y))
y = self.relu(self.conv8(y))
y = self.relu(self.conv9(y))
y = self.relu(self.conv10(y))
y = self.relu(self.conv11(y))
y = self.relu(self.conv12(y))
y = self.relu(self.conv13(y))
y = self.relu(self.conv14(y))
y = self.relu(self.conv15(y))
y = self.relu(self.conv16(y))
y = self.relu(self.conv17(y))
y = self.relu(self.conv18(y))
y = self.relu(self.conv19(y))
y = self.conv20(y)
return y
def forward_stem(self, y):
y = self.relu(self.conv1(y))
out1 = y
y = self.relu(self.conv2(y))
out2 = y
y = self.relu(self.conv3(y))
out3 = y
y = self.relu(self.conv4(y))
out4 = y
y = self.relu(self.conv5(y))
out5 = y
y = self.relu(self.conv6(y))
out6 = y
y = self.relu(self.conv7(y))
out7 = y
y = self.relu(self.conv8(y))
out8 = y
y = self.relu(self.conv9(y))
out9 = y
y = self.relu(self.conv10(y))
out10 = y
y = self.relu(self.conv11(y))
out11 = y
y = self.relu(self.conv12(y))
out12 = y
y = self.relu(self.conv13(y))
out13 = y
y = self.relu(self.conv14(y))
out14 = y
y = self.relu(self.conv15(y))
out15 = y
y = self.relu(self.conv16(y))
out16 = y
y = self.relu(self.conv17(y))
out17 = y
y = self.relu(self.conv18(y))
out18 = y
y = self.relu(self.conv19(y))
out19 = y
y = self.conv20(y)
return (out1, out2, out3, out4, out5, out6, out7, out8, out9, out10,
out11, out12, out13, out14, out15, out16, out17, out18, out19, y)
def forward_dense(self, y):
y = self.relu(self.conv1(y))
out1 = y
y = self.relu(self.conv2(y))
out2 = y
y = self.relu(self.conv3(y))
out3 = y
y = self.relu(self.conv4(y))
out4 = y
y = self.relu(self.conv5(y))
out5 = y
y = self.relu(self.conv6(y))
out6 = y
y = self.relu(self.conv7(y))
out7 = y
y = self.relu(self.conv8(y))
out8 = y
y = self.relu(self.conv9(y))
out9 = y
y = self.relu(self.conv10(y))
out10 = y
y = self.relu(self.conv11(y))
out11 = y
y = self.relu(self.conv12(y))
out12 = y
y = self.relu(self.conv13(y))
out13 = y
y = self.relu(self.conv14(y))
out14 = y
y = self.relu(self.conv15(y))
out15 = y
y = self.relu(self.conv16(y))
out16 = y
y = self.relu(self.conv17(y))
out17 = y
y = self.relu(self.conv18(y))
out18 = y
y = self.relu(self.conv19(y))
out19 = y
y = self.conv20(y)
out20 = y
return (out1, out2, out3, out4, out5, out6, out7, out8, out9, out10,
out11, out12, out13, out14, out15, out16, out17, out18, out19,
out20)
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21) = args
args.clear()
assert_size_stride(primals_1, (64, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_3, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_9, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_10, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_11, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_12, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_13, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_14, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_15, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_16, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_17, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_18, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_19, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_20, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_21, (1, 64, 3, 3), (576, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(1048576)](buf1, 1048576, XBLOCK=512,
num_warps=8, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_relu_0[grid(1048576)](buf3, 1048576, XBLOCK=512,
num_warps=8, num_stages=1)
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf5 = buf4
del buf4
triton_poi_fused_relu_0[grid(1048576)](buf5, 1048576, XBLOCK=512,
num_warps=8, num_stages=1)
buf6 = extern_kernels.convolution(buf5, primals_5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf7 = buf6
del buf6
triton_poi_fused_relu_0[grid(1048576)](buf7, 1048576, XBLOCK=512,
num_warps=8, num_stages=1)
buf8 = extern_kernels.convolution(buf7, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf9 = buf8
del buf8
triton_poi_fused_relu_0[grid(1048576)](buf9, 1048576, XBLOCK=512,
num_warps=8, num_stages=1)
buf10 = extern_kernels.convolution(buf9, primals_7, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf11 = buf10
del buf10
triton_poi_fused_relu_0[grid(1048576)](buf11, 1048576, XBLOCK=512,
num_warps=8, num_stages=1)
buf12 = extern_kernels.convolution(buf11, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf13 = buf12
del buf12
triton_poi_fused_relu_0[grid(1048576)](buf13, 1048576, XBLOCK=512,
num_warps=8, num_stages=1)
buf14 = extern_kernels.convolution(buf13, primals_9, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf15 = buf14
del buf14
triton_poi_fused_relu_0[grid(1048576)](buf15, 1048576, XBLOCK=512,
num_warps=8, num_stages=1)
buf16 = extern_kernels.convolution(buf15, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf17 = buf16
del buf16
triton_poi_fused_relu_0[grid(1048576)](buf17, 1048576, XBLOCK=512,
num_warps=8, num_stages=1)
buf18 = extern_kernels.convolution(buf17, primals_11, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf19 = buf18
del buf18
triton_poi_fused_relu_0[grid(1048576)](buf19, 1048576, XBLOCK=512,
num_warps=8, num_stages=1)
buf20 = extern_kernels.convolution(buf19, primals_12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf21 = buf20
del buf20
triton_poi_fused_relu_0[grid(1048576)](buf21, 1048576, XBLOCK=512,
num_warps=8, num_stages=1)
buf22 = extern_kernels.convolution(buf21, primals_13, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf23 = buf22
del buf22
triton_poi_fused_relu_0[grid(1048576)](buf23, 1048576, XBLOCK=512,
num_warps=8, num_stages=1)
buf24 = extern_kernels.convolution(buf23, primals_14, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf25 = buf24
del buf24
triton_poi_fused_relu_0[grid(1048576)](buf25, 1048576, XBLOCK=512,
num_warps=8, num_stages=1)
buf26 = extern_kernels.convolution(buf25, primals_15, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf27 = buf26
del buf26
triton_poi_fused_relu_0[grid(1048576)](buf27, 1048576, XBLOCK=512,
num_warps=8, num_stages=1)
buf28 = extern_kernels.convolution(buf27, primals_16, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf29 = buf28
del buf28
triton_poi_fused_relu_0[grid(1048576)](buf29, 1048576, XBLOCK=512,
num_warps=8, num_stages=1)
buf30 = extern_kernels.convolution(buf29, primals_17, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf30, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf31 = buf30
del buf30
triton_poi_fused_relu_0[grid(1048576)](buf31, 1048576, XBLOCK=512,
num_warps=8, num_stages=1)
buf32 = extern_kernels.convolution(buf31, primals_18, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf32, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf33 = buf32
del buf32
triton_poi_fused_relu_0[grid(1048576)](buf33, 1048576, XBLOCK=512,
num_warps=8, num_stages=1)
buf34 = extern_kernels.convolution(buf33, primals_19, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf34, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf35 = buf34
del buf34
triton_poi_fused_relu_0[grid(1048576)](buf35, 1048576, XBLOCK=512,
num_warps=8, num_stages=1)
buf36 = extern_kernels.convolution(buf35, primals_20, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf36, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf37 = buf36
del buf36
triton_poi_fused_relu_0[grid(1048576)](buf37, 1048576, XBLOCK=512,
num_warps=8, num_stages=1)
buf38 = extern_kernels.convolution(buf37, primals_21, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf38, (4, 1, 64, 64), (4096, 4096, 64, 1))
return (buf38, primals_1, primals_2, primals_3, primals_4, primals_5,
primals_6, primals_7, primals_8, primals_9, primals_10, primals_11,
primals_12, primals_13, primals_14, primals_15, primals_16,
primals_17, primals_18, primals_19, primals_20, primals_21, buf1,
buf3, buf5, buf7, buf9, buf11, buf13, buf15, buf17, buf19, buf21,
buf23, buf25, buf27, buf29, buf31, buf33, buf35, buf37)
def load_param(model1_path, model2):
dict_param1 = torch.load(model1_path)
dict_param2 = dict(model2.named_parameters())
for name2 in dict_param2:
if name2 in dict_param1:
dict_param2[name2].data.copy_(dict_param1[name2].data)
model2.load_state_dict(dict_param2)
return model2
class VDSRNew(nn.Module):
def __init__(self, model=False, fixed=False):
super(VDSRNew, self).__init__()
self.fixed = fixed
self.conv1 = nn.Conv2d(1, 64, 3, 1, 1, bias=False)
self.conv2 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv3 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv4 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv5 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv6 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv7 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv8 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv9 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv10 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv11 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv12 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv13 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv14 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv15 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv16 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv17 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv18 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv19 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv20 = nn.Conv2d(64, 1, 3, 1, 1, bias=False)
self.relu = nn.ReLU(inplace=True)
if model:
load_param(model, self)
if fixed:
for param in self.parameters():
param.requires_grad = False
def forward_stem(self, y):
y = self.relu(self.conv1(y))
out1 = y
y = self.relu(self.conv2(y))
out2 = y
y = self.relu(self.conv3(y))
out3 = y
y = self.relu(self.conv4(y))
out4 = y
y = self.relu(self.conv5(y))
out5 = y
y = self.relu(self.conv6(y))
out6 = y
y = self.relu(self.conv7(y))
out7 = y
y = self.relu(self.conv8(y))
out8 = y
y = self.relu(self.conv9(y))
out9 = y
y = self.relu(self.conv10(y))
out10 = y
y = self.relu(self.conv11(y))
out11 = y
y = self.relu(self.conv12(y))
out12 = y
y = self.relu(self.conv13(y))
out13 = y
y = self.relu(self.conv14(y))
out14 = y
y = self.relu(self.conv15(y))
out15 = y
y = self.relu(self.conv16(y))
out16 = y
y = self.relu(self.conv17(y))
out17 = y
y = self.relu(self.conv18(y))
out18 = y
y = self.relu(self.conv19(y))
out19 = y
y = self.conv20(y)
return (out1, out2, out3, out4, out5, out6, out7, out8, out9, out10,
out11, out12, out13, out14, out15, out16, out17, out18, out19, y)
def forward_dense(self, y):
y = self.relu(self.conv1(y))
out1 = y
y = self.relu(self.conv2(y))
out2 = y
y = self.relu(self.conv3(y))
out3 = y
y = self.relu(self.conv4(y))
out4 = y
y = self.relu(self.conv5(y))
out5 = y
y = self.relu(self.conv6(y))
out6 = y
y = self.relu(self.conv7(y))
out7 = y
y = self.relu(self.conv8(y))
out8 = y
y = self.relu(self.conv9(y))
out9 = y
y = self.relu(self.conv10(y))
out10 = y
y = self.relu(self.conv11(y))
out11 = y
y = self.relu(self.conv12(y))
out12 = y
y = self.relu(self.conv13(y))
out13 = y
y = self.relu(self.conv14(y))
out14 = y
y = self.relu(self.conv15(y))
out15 = y
y = self.relu(self.conv16(y))
out16 = y
y = self.relu(self.conv17(y))
out17 = y
y = self.relu(self.conv18(y))
out18 = y
y = self.relu(self.conv19(y))
out19 = y
y = self.conv20(y)
out20 = y
return (out1, out2, out3, out4, out5, out6, out7, out8, out9, out10,
out11, out12, out13, out14, out15, out16, out17, out18, out19,
out20)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_3 = self.conv2.weight
primals_4 = self.conv3.weight
primals_5 = self.conv4.weight
primals_6 = self.conv5.weight
primals_7 = self.conv6.weight
primals_8 = self.conv7.weight
primals_9 = self.conv8.weight
primals_10 = self.conv9.weight
primals_11 = self.conv10.weight
primals_12 = self.conv11.weight
primals_13 = self.conv12.weight
primals_14 = self.conv13.weight
primals_15 = self.conv14.weight
primals_16 = self.conv15.weight
primals_17 = self.conv16.weight
primals_18 = self.conv17.weight
primals_19 = self.conv18.weight
primals_20 = self.conv19.weight
primals_21 = self.conv20.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21])
return output[0]
|
MingSun-Tse/pytorch-vdsr
|
VDSR
| false
| 5,606
|
[
"MIT"
] | 1
|
597bacb4ec7385c8cc6cdf91e26e64ef2e6808b7
|
https://github.com/MingSun-Tse/pytorch-vdsr/tree/597bacb4ec7385c8cc6cdf91e26e64ef2e6808b7
|
DepthwiseSeparableConv
|
import torch
import torch.nn.functional as F
import torch.nn as nn
class DepthwiseSeparableConv(nn.Module):
def __init__(self, in_ch, out_ch, k, bias=True):
super().__init__()
self.depthwise_conv = nn.Conv1d(in_channels=in_ch, out_channels=
in_ch, kernel_size=k, groups=in_ch, padding=k // 2, bias=False)
self.pointwise_conv = nn.Conv1d(in_channels=in_ch, out_channels=
out_ch, kernel_size=1, padding=0, bias=bias)
def forward(self, x):
return F.relu(self.pointwise_conv(self.depthwise_conv(x)))
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_ch': 4, 'out_ch': 4, 'k': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 20
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 5
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 4), (4, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_2, (1,
4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(2,),
dilation=(1,), transposed=False, output_padding=(0,), groups=4,
bias=None)
assert_size_stride(buf0, (1, 4, 5), (20, 5, 1))
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (1, 4, 5), (20, 5, 1))
buf2 = reinterpret_tensor(buf1, (4, 5), (5, 1), 0)
del buf1
buf3 = empty_strided_cuda((4, 5), (5, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(20)](buf2,
primals_4, buf3, 20, XBLOCK=32, num_warps=1, num_stages=1)
del primals_4
return buf2, primals_1, primals_3, reinterpret_tensor(primals_2, (1, 4,
4), (16, 4, 1), 0), buf0, buf3
class DepthwiseSeparableConvNew(nn.Module):
def __init__(self, in_ch, out_ch, k, bias=True):
super().__init__()
self.depthwise_conv = nn.Conv1d(in_channels=in_ch, out_channels=
in_ch, kernel_size=k, groups=in_ch, padding=k // 2, bias=False)
self.pointwise_conv = nn.Conv1d(in_channels=in_ch, out_channels=
out_ch, kernel_size=1, padding=0, bias=bias)
def forward(self, input_0):
primals_1 = self.depthwise_conv.weight
primals_3 = self.pointwise_conv.weight
primals_4 = self.pointwise_conv.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
MobtgZhang/MWMLNet
|
DepthwiseSeparableConv
| false
| 5,607
|
[
"MIT"
] | 1
|
125bb39935916b6b4be505c51cb6a04eb49b96d0
|
https://github.com/MobtgZhang/MWMLNet/tree/125bb39935916b6b4be505c51cb6a04eb49b96d0
|
SFU
|
import torch
import torch.nn as nn
class SFU(nn.Module):
"""Semantic Fusion Unit
The ouput vector is expected to not only retrieve correlative information from fusion vectors,
but also retain partly unchange as the input vector
"""
def __init__(self, input_size, fusion_size):
super(SFU, self).__init__()
self.linear_r = nn.Linear(input_size + fusion_size, input_size)
self.linear_g = nn.Linear(input_size + fusion_size, input_size)
def forward(self, x, fusions):
r_f = torch.cat([x, fusions], 2)
r = torch.tanh(self.linear_r(r_f))
g = torch.sigmoid(self.linear_g(r_f))
o = g * r + (1 - g) * x
return o
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'fusion_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_tanh_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp7 = tl.load(in_ptr2 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = libdevice.tanh(tmp2)
tmp4 = tmp1 * tmp3
tmp5 = 1.0
tmp6 = tmp5 - tmp1
tmp8 = tmp6 * tmp7
tmp9 = tmp4 + tmp8
tl.store(out_ptr0 + x0, tmp9, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 8), (8, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(128)](primals_1, primals_2, buf0, 128,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (16, 8), (
8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0),
alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, reinterpret_tensor(buf0, (16, 8), (
8, 1), 0), reinterpret_tensor(primals_5, (8, 4), (1, 8), 0),
alpha=1, beta=1, out=buf2)
del primals_5
del primals_6
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_mul_rsub_sigmoid_tanh_1[grid(64)](buf2, buf1,
primals_1, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1)
return buf3, primals_1, reinterpret_tensor(buf0, (16, 8), (8, 1), 0
), buf1, buf2
class SFUNew(nn.Module):
"""Semantic Fusion Unit
The ouput vector is expected to not only retrieve correlative information from fusion vectors,
but also retain partly unchange as the input vector
"""
def __init__(self, input_size, fusion_size):
super(SFUNew, self).__init__()
self.linear_r = nn.Linear(input_size + fusion_size, input_size)
self.linear_g = nn.Linear(input_size + fusion_size, input_size)
def forward(self, input_0, input_1):
primals_3 = self.linear_r.weight
primals_4 = self.linear_r.bias
primals_5 = self.linear_g.weight
primals_6 = self.linear_g.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
MobtgZhang/MWMLNet
|
SFU
| false
| 5,608
|
[
"MIT"
] | 1
|
125bb39935916b6b4be505c51cb6a04eb49b96d0
|
https://github.com/MobtgZhang/MWMLNet/tree/125bb39935916b6b4be505c51cb6a04eb49b96d0
|
Attention
|
import math
import torch
import torch.nn.functional as F
import torch.nn as nn
class Attention(nn.Module):
"""
Compute 'Scaled Dot Product Attention
"""
def forward(self, query, key, value, mask=None, dropout=None):
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(query
.size(-1))
if mask is not None:
scores = scores.masked_fill(mask == 0, -1000000000.0)
p_attn = F.softmax(scores, dim=-1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x2, tmp17, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(arg0_1, (16, 4, 4), (16, 1, 4), 0),
out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](buf0, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused__softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0)
del buf1
extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf3
)
del arg2_1
return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf2
class AttentionNew(nn.Module):
"""
Compute 'Scaled Dot Product Attention
"""
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0], output[1]
|
MobtgZhang/MWMLNet
|
Attention
| false
| 5,609
|
[
"MIT"
] | 1
|
125bb39935916b6b4be505c51cb6a04eb49b96d0
|
https://github.com/MobtgZhang/MWMLNet/tree/125bb39935916b6b4be505c51cb6a04eb49b96d0
|
Sharpen_Block
|
import torch
import numpy as np
import torch.nn as nn
class Sharpen_Block(nn.Module):
def __init__(self):
super(Sharpen_Block, self).__init__()
self.pad = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv = nn.Conv2d(1, 1, 3, 1, 0, bias=False)
self.conv.weight = nn.Parameter(torch.from_numpy(np.array([[[[0, -
0.4, 0], [0, 2.6, 0], [0, -0.4, 0]]]])).float())
self.conv.weight.requires_grad = False
def forward(self, x):
return self.conv(self.pad(x))
def get_inputs():
return [torch.rand([4, 1, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(arg1_1, (1, 1, 3, 3), (9, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 6, 6), (36, 36, 6, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0[grid(144)](arg0_1, buf0, 144,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
buf1 = extern_kernels.convolution(buf0, arg1_1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1))
del arg1_1
del buf0
return buf1,
class Sharpen_BlockNew(nn.Module):
def __init__(self):
super(Sharpen_BlockNew, self).__init__()
self.pad = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv = nn.Conv2d(1, 1, 3, 1, 0, bias=False)
self.conv.weight = nn.Parameter(torch.from_numpy(np.array([[[[0, -
0.4, 0], [0, 2.6, 0], [0, -0.4, 0]]]])).float())
self.conv.weight.requires_grad = False
def forward(self, input_0):
arg1_1 = self.conv.weight
arg0_1 = input_0
output = call([arg0_1, arg1_1])
return output[0]
|
MingSun-Tse/pytorch-vdsr
|
Sharpen_Block
| false
| 5,610
|
[
"MIT"
] | 1
|
597bacb4ec7385c8cc6cdf91e26e64ef2e6808b7
|
https://github.com/MingSun-Tse/pytorch-vdsr/tree/597bacb4ec7385c8cc6cdf91e26e64ef2e6808b7
|
SmallVDSR_F8
|
import torch
import torch.nn as nn
def load_param(model1_path, model2):
dict_param1 = torch.load(model1_path)
dict_param2 = dict(model2.named_parameters())
for name2 in dict_param2:
if name2 in dict_param1:
dict_param2[name2].data.copy_(dict_param1[name2].data)
model2.load_state_dict(dict_param2)
return model2
class SmallVDSR_F8(nn.Module):
def __init__(self, model=False, fixed=False):
super(SmallVDSR_F8, self).__init__()
self.fixed = fixed
self.conv1 = nn.Conv2d(1, 8, 3, 1, 1, bias=False)
self.conv2 = nn.Conv2d(8, 8, 3, 1, 1, bias=False)
self.conv3 = nn.Conv2d(8, 8, 3, 1, 1, bias=False)
self.conv4 = nn.Conv2d(8, 8, 3, 1, 1, bias=False)
self.conv5 = nn.Conv2d(8, 8, 3, 1, 1, bias=False)
self.conv6 = nn.Conv2d(8, 8, 3, 1, 1, bias=False)
self.conv7 = nn.Conv2d(8, 8, 3, 1, 1, bias=False)
self.conv8 = nn.Conv2d(8, 8, 3, 1, 1, bias=False)
self.conv9 = nn.Conv2d(8, 8, 3, 1, 1, bias=False)
self.conv10 = nn.Conv2d(8, 8, 3, 1, 1, bias=False)
self.conv11 = nn.Conv2d(8, 8, 3, 1, 1, bias=False)
self.conv12 = nn.Conv2d(8, 8, 3, 1, 1, bias=False)
self.conv13 = nn.Conv2d(8, 8, 3, 1, 1, bias=False)
self.conv14 = nn.Conv2d(8, 8, 3, 1, 1, bias=False)
self.conv15 = nn.Conv2d(8, 8, 3, 1, 1, bias=False)
self.conv16 = nn.Conv2d(8, 8, 3, 1, 1, bias=False)
self.conv17 = nn.Conv2d(8, 8, 3, 1, 1, bias=False)
self.conv18 = nn.Conv2d(8, 8, 3, 1, 1, bias=False)
self.conv19 = nn.Conv2d(8, 8, 3, 1, 1, bias=False)
self.conv20 = nn.Conv2d(8, 1, 3, 1, 1, bias=False)
self.relu = nn.ReLU()
if model:
load_param(model, self)
if fixed:
for param in self.parameters():
param.requires_grad = False
def forward_stem(self, y):
y = self.relu(self.conv1(y))
out1 = y
y = self.relu(self.conv2(y))
out2 = y
y = self.relu(self.conv3(y))
out3 = y
y = self.relu(self.conv4(y))
out4 = y
y = self.relu(self.conv5(y))
out5 = y
y = self.relu(self.conv6(y))
out6 = y
y = self.relu(self.conv7(y))
out7 = y
y = self.relu(self.conv8(y))
out8 = y
y = self.relu(self.conv9(y))
out9 = y
y = self.relu(self.conv10(y))
out10 = y
y = self.relu(self.conv11(y))
out11 = y
y = self.relu(self.conv12(y))
out12 = y
y = self.relu(self.conv13(y))
out13 = y
y = self.relu(self.conv14(y))
out14 = y
y = self.relu(self.conv15(y))
out15 = y
y = self.relu(self.conv16(y))
out16 = y
y = self.relu(self.conv17(y))
out17 = y
y = self.relu(self.conv18(y))
out18 = y
y = self.relu(self.conv19(y))
out19 = y
y = self.conv20(y)
return (out1, out2, out3, out4, out5, out6, out7, out8, out9, out10,
out11, out12, out13, out14, out15, out16, out17, out18, out19, y)
def forward_dense(self, y):
y = self.relu(self.conv1(y))
out1 = y
y = self.relu(self.conv2(y))
out2 = y
y = self.relu(self.conv3(y))
out3 = y
y = self.relu(self.conv4(y))
out4 = y
y = self.relu(self.conv5(y))
out5 = y
y = self.relu(self.conv6(y))
out6 = y
y = self.relu(self.conv7(y))
out7 = y
y = self.relu(self.conv8(y))
out8 = y
y = self.relu(self.conv9(y))
out9 = y
y = self.relu(self.conv10(y))
out10 = y
y = self.relu(self.conv11(y))
out11 = y
y = self.relu(self.conv12(y))
out12 = y
y = self.relu(self.conv13(y))
out13 = y
y = self.relu(self.conv14(y))
out14 = y
y = self.relu(self.conv15(y))
out15 = y
y = self.relu(self.conv16(y))
out16 = y
y = self.relu(self.conv17(y))
out17 = y
y = self.relu(self.conv18(y))
out18 = y
y = self.relu(self.conv19(y))
out19 = y
y = self.conv20(y)
out20 = y
return (out1, out2, out3, out4, out5, out6, out7, out8, out9, out10,
out11, out12, out13, out14, out15, out16, out17, out18, out19,
out20)
def forward(self, y):
y = self.relu(self.conv1(y))
y = self.relu(self.conv2(y))
y = self.relu(self.conv3(y))
y = self.relu(self.conv4(y))
y = self.relu(self.conv5(y))
y = self.relu(self.conv6(y))
y = self.relu(self.conv7(y))
y = self.relu(self.conv8(y))
y = self.relu(self.conv9(y))
y = self.relu(self.conv10(y))
y = self.relu(self.conv11(y))
y = self.relu(self.conv12(y))
y = self.relu(self.conv13(y))
y = self.relu(self.conv14(y))
y = self.relu(self.conv15(y))
y = self.relu(self.conv16(y))
y = self.relu(self.conv17(y))
y = self.relu(self.conv18(y))
y = self.relu(self.conv19(y))
y = self.conv20(y)
return y
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21) = args
args.clear()
assert_size_stride(primals_1, (8, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_3, (8, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_4, (8, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_5, (8, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_6, (8, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_7, (8, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_8, (8, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_9, (8, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_10, (8, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_11, (8, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_12, (8, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_13, (8, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_14, (8, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_15, (8, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_16, (8, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_17, (8, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_18, (8, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_19, (8, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_20, (8, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_21, (1, 8, 3, 3), (72, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 8, 64, 64), (32768, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(131072)](buf1, 131072, XBLOCK=512,
num_warps=8, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 8, 64, 64), (32768, 4096, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_relu_0[grid(131072)](buf3, 131072, XBLOCK=512,
num_warps=8, num_stages=1)
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 8, 64, 64), (32768, 4096, 64, 1))
buf5 = buf4
del buf4
triton_poi_fused_relu_0[grid(131072)](buf5, 131072, XBLOCK=512,
num_warps=8, num_stages=1)
buf6 = extern_kernels.convolution(buf5, primals_5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 8, 64, 64), (32768, 4096, 64, 1))
buf7 = buf6
del buf6
triton_poi_fused_relu_0[grid(131072)](buf7, 131072, XBLOCK=512,
num_warps=8, num_stages=1)
buf8 = extern_kernels.convolution(buf7, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 8, 64, 64), (32768, 4096, 64, 1))
buf9 = buf8
del buf8
triton_poi_fused_relu_0[grid(131072)](buf9, 131072, XBLOCK=512,
num_warps=8, num_stages=1)
buf10 = extern_kernels.convolution(buf9, primals_7, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 8, 64, 64), (32768, 4096, 64, 1))
buf11 = buf10
del buf10
triton_poi_fused_relu_0[grid(131072)](buf11, 131072, XBLOCK=512,
num_warps=8, num_stages=1)
buf12 = extern_kernels.convolution(buf11, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 8, 64, 64), (32768, 4096, 64, 1))
buf13 = buf12
del buf12
triton_poi_fused_relu_0[grid(131072)](buf13, 131072, XBLOCK=512,
num_warps=8, num_stages=1)
buf14 = extern_kernels.convolution(buf13, primals_9, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 8, 64, 64), (32768, 4096, 64, 1))
buf15 = buf14
del buf14
triton_poi_fused_relu_0[grid(131072)](buf15, 131072, XBLOCK=512,
num_warps=8, num_stages=1)
buf16 = extern_kernels.convolution(buf15, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 8, 64, 64), (32768, 4096, 64, 1))
buf17 = buf16
del buf16
triton_poi_fused_relu_0[grid(131072)](buf17, 131072, XBLOCK=512,
num_warps=8, num_stages=1)
buf18 = extern_kernels.convolution(buf17, primals_11, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 8, 64, 64), (32768, 4096, 64, 1))
buf19 = buf18
del buf18
triton_poi_fused_relu_0[grid(131072)](buf19, 131072, XBLOCK=512,
num_warps=8, num_stages=1)
buf20 = extern_kernels.convolution(buf19, primals_12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 8, 64, 64), (32768, 4096, 64, 1))
buf21 = buf20
del buf20
triton_poi_fused_relu_0[grid(131072)](buf21, 131072, XBLOCK=512,
num_warps=8, num_stages=1)
buf22 = extern_kernels.convolution(buf21, primals_13, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 8, 64, 64), (32768, 4096, 64, 1))
buf23 = buf22
del buf22
triton_poi_fused_relu_0[grid(131072)](buf23, 131072, XBLOCK=512,
num_warps=8, num_stages=1)
buf24 = extern_kernels.convolution(buf23, primals_14, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 8, 64, 64), (32768, 4096, 64, 1))
buf25 = buf24
del buf24
triton_poi_fused_relu_0[grid(131072)](buf25, 131072, XBLOCK=512,
num_warps=8, num_stages=1)
buf26 = extern_kernels.convolution(buf25, primals_15, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 8, 64, 64), (32768, 4096, 64, 1))
buf27 = buf26
del buf26
triton_poi_fused_relu_0[grid(131072)](buf27, 131072, XBLOCK=512,
num_warps=8, num_stages=1)
buf28 = extern_kernels.convolution(buf27, primals_16, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 8, 64, 64), (32768, 4096, 64, 1))
buf29 = buf28
del buf28
triton_poi_fused_relu_0[grid(131072)](buf29, 131072, XBLOCK=512,
num_warps=8, num_stages=1)
buf30 = extern_kernels.convolution(buf29, primals_17, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf30, (4, 8, 64, 64), (32768, 4096, 64, 1))
buf31 = buf30
del buf30
triton_poi_fused_relu_0[grid(131072)](buf31, 131072, XBLOCK=512,
num_warps=8, num_stages=1)
buf32 = extern_kernels.convolution(buf31, primals_18, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf32, (4, 8, 64, 64), (32768, 4096, 64, 1))
buf33 = buf32
del buf32
triton_poi_fused_relu_0[grid(131072)](buf33, 131072, XBLOCK=512,
num_warps=8, num_stages=1)
buf34 = extern_kernels.convolution(buf33, primals_19, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf34, (4, 8, 64, 64), (32768, 4096, 64, 1))
buf35 = buf34
del buf34
triton_poi_fused_relu_0[grid(131072)](buf35, 131072, XBLOCK=512,
num_warps=8, num_stages=1)
buf36 = extern_kernels.convolution(buf35, primals_20, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf36, (4, 8, 64, 64), (32768, 4096, 64, 1))
buf37 = buf36
del buf36
triton_poi_fused_relu_0[grid(131072)](buf37, 131072, XBLOCK=512,
num_warps=8, num_stages=1)
buf38 = extern_kernels.convolution(buf37, primals_21, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf38, (4, 1, 64, 64), (4096, 4096, 64, 1))
return (buf38, primals_1, primals_2, primals_3, primals_4, primals_5,
primals_6, primals_7, primals_8, primals_9, primals_10, primals_11,
primals_12, primals_13, primals_14, primals_15, primals_16,
primals_17, primals_18, primals_19, primals_20, primals_21, buf1,
buf3, buf5, buf7, buf9, buf11, buf13, buf15, buf17, buf19, buf21,
buf23, buf25, buf27, buf29, buf31, buf33, buf35, buf37)
def load_param(model1_path, model2):
dict_param1 = torch.load(model1_path)
dict_param2 = dict(model2.named_parameters())
for name2 in dict_param2:
if name2 in dict_param1:
dict_param2[name2].data.copy_(dict_param1[name2].data)
model2.load_state_dict(dict_param2)
return model2
class SmallVDSR_F8New(nn.Module):
def __init__(self, model=False, fixed=False):
super(SmallVDSR_F8New, self).__init__()
self.fixed = fixed
self.conv1 = nn.Conv2d(1, 8, 3, 1, 1, bias=False)
self.conv2 = nn.Conv2d(8, 8, 3, 1, 1, bias=False)
self.conv3 = nn.Conv2d(8, 8, 3, 1, 1, bias=False)
self.conv4 = nn.Conv2d(8, 8, 3, 1, 1, bias=False)
self.conv5 = nn.Conv2d(8, 8, 3, 1, 1, bias=False)
self.conv6 = nn.Conv2d(8, 8, 3, 1, 1, bias=False)
self.conv7 = nn.Conv2d(8, 8, 3, 1, 1, bias=False)
self.conv8 = nn.Conv2d(8, 8, 3, 1, 1, bias=False)
self.conv9 = nn.Conv2d(8, 8, 3, 1, 1, bias=False)
self.conv10 = nn.Conv2d(8, 8, 3, 1, 1, bias=False)
self.conv11 = nn.Conv2d(8, 8, 3, 1, 1, bias=False)
self.conv12 = nn.Conv2d(8, 8, 3, 1, 1, bias=False)
self.conv13 = nn.Conv2d(8, 8, 3, 1, 1, bias=False)
self.conv14 = nn.Conv2d(8, 8, 3, 1, 1, bias=False)
self.conv15 = nn.Conv2d(8, 8, 3, 1, 1, bias=False)
self.conv16 = nn.Conv2d(8, 8, 3, 1, 1, bias=False)
self.conv17 = nn.Conv2d(8, 8, 3, 1, 1, bias=False)
self.conv18 = nn.Conv2d(8, 8, 3, 1, 1, bias=False)
self.conv19 = nn.Conv2d(8, 8, 3, 1, 1, bias=False)
self.conv20 = nn.Conv2d(8, 1, 3, 1, 1, bias=False)
self.relu = nn.ReLU()
if model:
load_param(model, self)
if fixed:
for param in self.parameters():
param.requires_grad = False
def forward_stem(self, y):
y = self.relu(self.conv1(y))
out1 = y
y = self.relu(self.conv2(y))
out2 = y
y = self.relu(self.conv3(y))
out3 = y
y = self.relu(self.conv4(y))
out4 = y
y = self.relu(self.conv5(y))
out5 = y
y = self.relu(self.conv6(y))
out6 = y
y = self.relu(self.conv7(y))
out7 = y
y = self.relu(self.conv8(y))
out8 = y
y = self.relu(self.conv9(y))
out9 = y
y = self.relu(self.conv10(y))
out10 = y
y = self.relu(self.conv11(y))
out11 = y
y = self.relu(self.conv12(y))
out12 = y
y = self.relu(self.conv13(y))
out13 = y
y = self.relu(self.conv14(y))
out14 = y
y = self.relu(self.conv15(y))
out15 = y
y = self.relu(self.conv16(y))
out16 = y
y = self.relu(self.conv17(y))
out17 = y
y = self.relu(self.conv18(y))
out18 = y
y = self.relu(self.conv19(y))
out19 = y
y = self.conv20(y)
return (out1, out2, out3, out4, out5, out6, out7, out8, out9, out10,
out11, out12, out13, out14, out15, out16, out17, out18, out19, y)
def forward_dense(self, y):
y = self.relu(self.conv1(y))
out1 = y
y = self.relu(self.conv2(y))
out2 = y
y = self.relu(self.conv3(y))
out3 = y
y = self.relu(self.conv4(y))
out4 = y
y = self.relu(self.conv5(y))
out5 = y
y = self.relu(self.conv6(y))
out6 = y
y = self.relu(self.conv7(y))
out7 = y
y = self.relu(self.conv8(y))
out8 = y
y = self.relu(self.conv9(y))
out9 = y
y = self.relu(self.conv10(y))
out10 = y
y = self.relu(self.conv11(y))
out11 = y
y = self.relu(self.conv12(y))
out12 = y
y = self.relu(self.conv13(y))
out13 = y
y = self.relu(self.conv14(y))
out14 = y
y = self.relu(self.conv15(y))
out15 = y
y = self.relu(self.conv16(y))
out16 = y
y = self.relu(self.conv17(y))
out17 = y
y = self.relu(self.conv18(y))
out18 = y
y = self.relu(self.conv19(y))
out19 = y
y = self.conv20(y)
out20 = y
return (out1, out2, out3, out4, out5, out6, out7, out8, out9, out10,
out11, out12, out13, out14, out15, out16, out17, out18, out19,
out20)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_3 = self.conv2.weight
primals_4 = self.conv3.weight
primals_5 = self.conv4.weight
primals_6 = self.conv5.weight
primals_7 = self.conv6.weight
primals_8 = self.conv7.weight
primals_9 = self.conv8.weight
primals_10 = self.conv9.weight
primals_11 = self.conv10.weight
primals_12 = self.conv11.weight
primals_13 = self.conv12.weight
primals_14 = self.conv13.weight
primals_15 = self.conv14.weight
primals_16 = self.conv15.weight
primals_17 = self.conv16.weight
primals_18 = self.conv17.weight
primals_19 = self.conv18.weight
primals_20 = self.conv19.weight
primals_21 = self.conv20.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21])
return output[0]
|
MingSun-Tse/pytorch-vdsr
|
SmallVDSR_F8
| false
| 5,611
|
[
"MIT"
] | 1
|
597bacb4ec7385c8cc6cdf91e26e64ef2e6808b7
|
https://github.com/MingSun-Tse/pytorch-vdsr/tree/597bacb4ec7385c8cc6cdf91e26e64ef2e6808b7
|
Dense_block
|
import torch
import torch.nn as nn
class Dense_block(nn.Module):
""" This is the initial dense block as in the paper """
def __init__(self, in_channels, out_channels):
super(Dense_block, self).__init__()
self.Dense = torch.nn.Linear(in_channels, out_channels)
nn.init.xavier_uniform(self.Dense.weight.data, 1.0)
self.activation = torch.nn.LeakyReLU(0.2)
def forward(self, x):
return self.activation(self.Dense(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(256)](buf0, primals_2, buf1,
buf2, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_2
return buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1
class Dense_blockNew(nn.Module):
""" This is the initial dense block as in the paper """
def __init__(self, in_channels, out_channels):
super(Dense_blockNew, self).__init__()
self.Dense = torch.nn.Linear(in_channels, out_channels)
nn.init.xavier_uniform(self.Dense.weight.data, 1.0)
self.activation = torch.nn.LeakyReLU(0.2)
def forward(self, input_0):
primals_1 = self.Dense.weight
primals_2 = self.Dense.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Mohanned-Elkholy/ResNet-GAN
|
Dense_block
| false
| 5,612
|
[
"MIT"
] | 1
|
81b01294d8b5035131aee24d486e2cb879030832
|
https://github.com/Mohanned-Elkholy/ResNet-GAN/tree/81b01294d8b5035131aee24d486e2cb879030832
|
MatrixTree
|
import torch
import torch.nn as nn
import torch.cuda
import torch.distributed
class MatrixTree(nn.Module):
"""Implementation of the matrix-tree theorem for computing marginals
of non-projective dependency parsing. This attention layer is used
in the paper "Learning Structured Text Representations"
:cite:`DBLP:journals/corr/LiuL17d`.
"""
def __init__(self, eps=1e-05):
self.eps = eps
super(MatrixTree, self).__init__()
def forward(self, input):
laplacian = input.exp() + self.eps
output = input.clone()
for b in range(input.size(0)):
lap = laplacian[b].masked_fill(torch.eye(input.size(1), device=
input.device).ne(0), 0)
lap = -lap + torch.diag(lap.sum(0))
lap[0] = input[b].diag().exp()
inv_laplacian = lap.inverse()
factor = inv_laplacian.diag().unsqueeze(1).expand_as(input[b]
).transpose(0, 1)
term1 = input[b].exp().mul(factor).clone()
term2 = input[b].exp().mul(inv_laplacian.transpose(0, 1)).clone()
term1[:, 0] = 0
term2[0] = 0
output[b] = term1 - term2
roots_output = input[b].diag().exp().mul(inv_laplacian.
transpose(0, 1)[0])
output[b] = output[b] + torch.diag(roots_output)
return output
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.cuda
import torch.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_eye_masked_fill_ne_sum_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp7 = tl.load(in_ptr0 + x0, xmask)
tmp16 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp25 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp34 = tl.load(in_ptr0 + (12 + x0), xmask)
tmp0 = tl.full([1], 0, tl.int64)
tmp1 = x0
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = tmp5 != tmp4
tmp8 = tl_math.exp(tmp7)
tmp9 = 1e-05
tmp10 = tmp8 + tmp9
tmp11 = tl.where(tmp6, tmp4, tmp10)
tmp12 = tl.full([1], 1, tl.int64)
tmp13 = tmp12 == tmp1
tmp14 = tl.where(tmp13, tmp3, tmp4)
tmp15 = tmp14 != tmp4
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp17 + tmp9
tmp19 = tl.where(tmp15, tmp4, tmp18)
tmp20 = tmp11 + tmp19
tmp21 = tl.full([1], 2, tl.int64)
tmp22 = tmp21 == tmp1
tmp23 = tl.where(tmp22, tmp3, tmp4)
tmp24 = tmp23 != tmp4
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp26 + tmp9
tmp28 = tl.where(tmp24, tmp4, tmp27)
tmp29 = tmp20 + tmp28
tmp30 = tl.full([1], 3, tl.int64)
tmp31 = tmp30 == tmp1
tmp32 = tl.where(tmp31, tmp3, tmp4)
tmp33 = tmp32 != tmp4
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp35 + tmp9
tmp37 = tl.where(tmp33, tmp4, tmp36)
tmp38 = tmp29 + tmp37
tl.store(out_ptr0 + x0, tmp38, xmask)
@triton.jit
def triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_1(
in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp3 = tl.load(in_ptr0 + 5 * x0, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + x2, xmask)
tmp18 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = tl_math.exp(tmp3)
tmp5 = x0
tmp6 = tmp0 == tmp5
tmp7 = 1.0
tmp8 = 0.0
tmp9 = tl.where(tmp6, tmp7, tmp8)
tmp10 = tmp9 != tmp8
tmp12 = tl_math.exp(tmp11)
tmp13 = 1e-05
tmp14 = tmp12 + tmp13
tmp15 = tl.where(tmp10, tmp8, tmp14)
tmp16 = -tmp15
tmp17 = tmp5 == tmp0
tmp19 = tl.where(tmp17, tmp18, tmp8)
tmp20 = tmp16 + tmp19
tmp21 = tl.where(tmp2, tmp4, tmp20)
tl.store(out_ptr0 + x2, tmp21, xmask)
@triton.jit
def triton_poi_fused_eye_masked_fill_ne_sum_2(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp7 = tl.load(in_ptr0 + (16 + x0), xmask)
tmp16 = tl.load(in_ptr0 + (20 + x0), xmask)
tmp25 = tl.load(in_ptr0 + (24 + x0), xmask)
tmp34 = tl.load(in_ptr0 + (28 + x0), xmask)
tmp0 = tl.full([1], 0, tl.int64)
tmp1 = x0
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = tmp5 != tmp4
tmp8 = tl_math.exp(tmp7)
tmp9 = 1e-05
tmp10 = tmp8 + tmp9
tmp11 = tl.where(tmp6, tmp4, tmp10)
tmp12 = tl.full([1], 1, tl.int64)
tmp13 = tmp12 == tmp1
tmp14 = tl.where(tmp13, tmp3, tmp4)
tmp15 = tmp14 != tmp4
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp17 + tmp9
tmp19 = tl.where(tmp15, tmp4, tmp18)
tmp20 = tmp11 + tmp19
tmp21 = tl.full([1], 2, tl.int64)
tmp22 = tmp21 == tmp1
tmp23 = tl.where(tmp22, tmp3, tmp4)
tmp24 = tmp23 != tmp4
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp26 + tmp9
tmp28 = tl.where(tmp24, tmp4, tmp27)
tmp29 = tmp20 + tmp28
tmp30 = tl.full([1], 3, tl.int64)
tmp31 = tmp30 == tmp1
tmp32 = tl.where(tmp31, tmp3, tmp4)
tmp33 = tmp32 != tmp4
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp35 + tmp9
tmp37 = tl.where(tmp33, tmp4, tmp36)
tmp38 = tmp29 + tmp37
tl.store(out_ptr0 + x0, tmp38, xmask)
@triton.jit
def triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_3(
in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp3 = tl.load(in_ptr0 + (16 + 5 * x0), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (16 + x2), xmask)
tmp18 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = tl_math.exp(tmp3)
tmp5 = x0
tmp6 = tmp0 == tmp5
tmp7 = 1.0
tmp8 = 0.0
tmp9 = tl.where(tmp6, tmp7, tmp8)
tmp10 = tmp9 != tmp8
tmp12 = tl_math.exp(tmp11)
tmp13 = 1e-05
tmp14 = tmp12 + tmp13
tmp15 = tl.where(tmp10, tmp8, tmp14)
tmp16 = -tmp15
tmp17 = tmp5 == tmp0
tmp19 = tl.where(tmp17, tmp18, tmp8)
tmp20 = tmp16 + tmp19
tmp21 = tl.where(tmp2, tmp4, tmp20)
tl.store(out_ptr0 + x2, tmp21, xmask)
@triton.jit
def triton_poi_fused_add_diag_embed_4(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp4 = tl.load(in_ptr0 + x2, xmask)
tmp6 = tl.load(in_ptr1 + 5 * x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + x2, xmask)
tmp18 = tl.load(in_ptr0 + 5 * x0, xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp0 = tl.full([1], 0, tl.int32)
tmp1 = tmp0 == tmp0
tmp2 = x0
tmp3 = tmp2 == tmp0
tmp5 = tl_math.exp(tmp4)
tmp7 = tmp5 * tmp6
tmp8 = 0.0
tmp9 = tl.where(tmp3, tmp8, tmp7)
tmp10 = x1
tmp11 = tmp10 == tmp0
tmp13 = tmp5 * tmp12
tmp14 = tl.where(tmp11, tmp8, tmp13)
tmp15 = tmp9 - tmp14
tmp16 = tl.where(tmp1, tmp15, tmp4)
tmp17 = tmp2 == tmp10
tmp19 = tl_math.exp(tmp18)
tmp21 = tmp19 * tmp20
tmp22 = tl.where(tmp17, tmp21, tmp8)
tmp23 = tmp16 + tmp22
tl.store(out_ptr0 + x2, tmp23, xmask)
@triton.jit
def triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_5(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x3 = xindex % 16
x0 = xindex % 4
x1 = xindex // 4 % 4
x5 = xindex
tmp3 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + x3, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + 5 * x0, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr1 + x5, xmask)
tmp0 = x2
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = x0
tmp5 = tmp4 == tmp1
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp7 * tmp8
tmp10 = 0.0
tmp11 = tl.where(tmp5, tmp10, tmp9)
tmp12 = x1
tmp13 = tmp12 == tmp1
tmp15 = tmp7 * tmp14
tmp16 = tl.where(tmp13, tmp10, tmp15)
tmp17 = tmp11 - tmp16
tmp19 = tl.where(tmp2, tmp17, tmp18)
tmp20 = tl.where(tmp2, tmp3, tmp19)
tl.store(out_ptr0 + x5, tmp20, xmask)
@triton.jit
def triton_poi_fused_eye_masked_fill_ne_sum_6(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp7 = tl.load(in_ptr0 + (32 + x0), xmask)
tmp16 = tl.load(in_ptr0 + (36 + x0), xmask)
tmp25 = tl.load(in_ptr0 + (40 + x0), xmask)
tmp34 = tl.load(in_ptr0 + (44 + x0), xmask)
tmp0 = tl.full([1], 0, tl.int64)
tmp1 = x0
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = tmp5 != tmp4
tmp8 = tl_math.exp(tmp7)
tmp9 = 1e-05
tmp10 = tmp8 + tmp9
tmp11 = tl.where(tmp6, tmp4, tmp10)
tmp12 = tl.full([1], 1, tl.int64)
tmp13 = tmp12 == tmp1
tmp14 = tl.where(tmp13, tmp3, tmp4)
tmp15 = tmp14 != tmp4
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp17 + tmp9
tmp19 = tl.where(tmp15, tmp4, tmp18)
tmp20 = tmp11 + tmp19
tmp21 = tl.full([1], 2, tl.int64)
tmp22 = tmp21 == tmp1
tmp23 = tl.where(tmp22, tmp3, tmp4)
tmp24 = tmp23 != tmp4
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp26 + tmp9
tmp28 = tl.where(tmp24, tmp4, tmp27)
tmp29 = tmp20 + tmp28
tmp30 = tl.full([1], 3, tl.int64)
tmp31 = tmp30 == tmp1
tmp32 = tl.where(tmp31, tmp3, tmp4)
tmp33 = tmp32 != tmp4
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp35 + tmp9
tmp37 = tl.where(tmp33, tmp4, tmp36)
tmp38 = tmp29 + tmp37
tl.store(out_ptr0 + x0, tmp38, xmask)
@triton.jit
def triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_7(
in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp3 = tl.load(in_ptr0 + (32 + 5 * x0), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (32 + x2), xmask)
tmp18 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = tl_math.exp(tmp3)
tmp5 = x0
tmp6 = tmp0 == tmp5
tmp7 = 1.0
tmp8 = 0.0
tmp9 = tl.where(tmp6, tmp7, tmp8)
tmp10 = tmp9 != tmp8
tmp12 = tl_math.exp(tmp11)
tmp13 = 1e-05
tmp14 = tmp12 + tmp13
tmp15 = tl.where(tmp10, tmp8, tmp14)
tmp16 = -tmp15
tmp17 = tmp5 == tmp0
tmp19 = tl.where(tmp17, tmp18, tmp8)
tmp20 = tmp16 + tmp19
tmp21 = tl.where(tmp2, tmp4, tmp20)
tl.store(out_ptr0 + x2, tmp21, xmask)
@triton.jit
def triton_poi_fused_add_diag_embed_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp5 = tl.load(in_ptr0 + (16 + x2), xmask)
tmp7 = tl.load(in_ptr1 + 5 * x0, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + x2, xmask)
tmp17 = tl.load(in_ptr2 + (16 + x2), xmask)
tmp20 = tl.load(in_ptr0 + (16 + 5 * x0), xmask, eviction_policy=
'evict_last')
tmp22 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp0 = tl.full([1], 1, tl.int32)
tmp1 = tmp0 == tmp0
tmp2 = x0
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = tmp2 == tmp3
tmp6 = tl_math.exp(tmp5)
tmp8 = tmp6 * tmp7
tmp9 = 0.0
tmp10 = tl.where(tmp4, tmp9, tmp8)
tmp11 = x1
tmp12 = tmp11 == tmp3
tmp14 = tmp6 * tmp13
tmp15 = tl.where(tmp12, tmp9, tmp14)
tmp16 = tmp10 - tmp15
tmp18 = tl.where(tmp1, tmp16, tmp17)
tmp19 = tmp2 == tmp11
tmp21 = tl_math.exp(tmp20)
tmp23 = tmp21 * tmp22
tmp24 = tl.where(tmp19, tmp23, tmp9)
tmp25 = tmp18 + tmp24
tl.store(out_ptr0 + x2, tmp25, xmask)
@triton.jit
def triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_9(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x3 = xindex % 16
x0 = xindex % 4
x1 = xindex // 4 % 4
x5 = xindex
tmp3 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (16 + x3), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + 5 * x0, xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_out_ptr0 + x5, xmask)
tmp0 = x2
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = x0
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = tmp4 == tmp5
tmp8 = tl_math.exp(tmp7)
tmp10 = tmp8 * tmp9
tmp11 = 0.0
tmp12 = tl.where(tmp6, tmp11, tmp10)
tmp13 = x1
tmp14 = tmp13 == tmp5
tmp16 = tmp8 * tmp15
tmp17 = tl.where(tmp14, tmp11, tmp16)
tmp18 = tmp12 - tmp17
tmp20 = tl.where(tmp2, tmp18, tmp19)
tmp21 = tl.where(tmp2, tmp3, tmp20)
tl.store(in_out_ptr0 + x5, tmp21, xmask)
@triton.jit
def triton_poi_fused_eye_masked_fill_ne_sum_10(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp7 = tl.load(in_ptr0 + (48 + x0), xmask)
tmp16 = tl.load(in_ptr0 + (52 + x0), xmask)
tmp25 = tl.load(in_ptr0 + (56 + x0), xmask)
tmp34 = tl.load(in_ptr0 + (60 + x0), xmask)
tmp0 = tl.full([1], 0, tl.int64)
tmp1 = x0
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = tmp5 != tmp4
tmp8 = tl_math.exp(tmp7)
tmp9 = 1e-05
tmp10 = tmp8 + tmp9
tmp11 = tl.where(tmp6, tmp4, tmp10)
tmp12 = tl.full([1], 1, tl.int64)
tmp13 = tmp12 == tmp1
tmp14 = tl.where(tmp13, tmp3, tmp4)
tmp15 = tmp14 != tmp4
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp17 + tmp9
tmp19 = tl.where(tmp15, tmp4, tmp18)
tmp20 = tmp11 + tmp19
tmp21 = tl.full([1], 2, tl.int64)
tmp22 = tmp21 == tmp1
tmp23 = tl.where(tmp22, tmp3, tmp4)
tmp24 = tmp23 != tmp4
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp26 + tmp9
tmp28 = tl.where(tmp24, tmp4, tmp27)
tmp29 = tmp20 + tmp28
tmp30 = tl.full([1], 3, tl.int64)
tmp31 = tmp30 == tmp1
tmp32 = tl.where(tmp31, tmp3, tmp4)
tmp33 = tmp32 != tmp4
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp35 + tmp9
tmp37 = tl.where(tmp33, tmp4, tmp36)
tmp38 = tmp29 + tmp37
tl.store(out_ptr0 + x0, tmp38, xmask)
@triton.jit
def triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_11(
in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp3 = tl.load(in_ptr0 + (48 + 5 * x0), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (48 + x2), xmask)
tmp18 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = tl_math.exp(tmp3)
tmp5 = x0
tmp6 = tmp0 == tmp5
tmp7 = 1.0
tmp8 = 0.0
tmp9 = tl.where(tmp6, tmp7, tmp8)
tmp10 = tmp9 != tmp8
tmp12 = tl_math.exp(tmp11)
tmp13 = 1e-05
tmp14 = tmp12 + tmp13
tmp15 = tl.where(tmp10, tmp8, tmp14)
tmp16 = -tmp15
tmp17 = tmp5 == tmp0
tmp19 = tl.where(tmp17, tmp18, tmp8)
tmp20 = tmp16 + tmp19
tmp21 = tl.where(tmp2, tmp4, tmp20)
tl.store(out_ptr0 + x2, tmp21, xmask)
@triton.jit
def triton_poi_fused_add_diag_embed_12(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp5 = tl.load(in_ptr0 + (32 + x2), xmask)
tmp7 = tl.load(in_ptr1 + 5 * x0, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + x2, xmask)
tmp17 = tl.load(in_ptr2 + (32 + x2), xmask)
tmp20 = tl.load(in_ptr0 + (32 + 5 * x0), xmask, eviction_policy=
'evict_last')
tmp22 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp0 = tl.full([1], 2, tl.int32)
tmp1 = tmp0 == tmp0
tmp2 = x0
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = tmp2 == tmp3
tmp6 = tl_math.exp(tmp5)
tmp8 = tmp6 * tmp7
tmp9 = 0.0
tmp10 = tl.where(tmp4, tmp9, tmp8)
tmp11 = x1
tmp12 = tmp11 == tmp3
tmp14 = tmp6 * tmp13
tmp15 = tl.where(tmp12, tmp9, tmp14)
tmp16 = tmp10 - tmp15
tmp18 = tl.where(tmp1, tmp16, tmp17)
tmp19 = tmp2 == tmp11
tmp21 = tl_math.exp(tmp20)
tmp23 = tmp21 * tmp22
tmp24 = tl.where(tmp19, tmp23, tmp9)
tmp25 = tmp18 + tmp24
tl.store(out_ptr0 + x2, tmp25, xmask)
@triton.jit
def triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_13(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x3 = xindex % 16
x0 = xindex % 4
x1 = xindex // 4 % 4
x5 = xindex
tmp3 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (32 + x3), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + 5 * x0, xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_out_ptr0 + x5, xmask)
tmp0 = x2
tmp1 = tl.full([1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = x0
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = tmp4 == tmp5
tmp8 = tl_math.exp(tmp7)
tmp10 = tmp8 * tmp9
tmp11 = 0.0
tmp12 = tl.where(tmp6, tmp11, tmp10)
tmp13 = x1
tmp14 = tmp13 == tmp5
tmp16 = tmp8 * tmp15
tmp17 = tl.where(tmp14, tmp11, tmp16)
tmp18 = tmp12 - tmp17
tmp20 = tl.where(tmp2, tmp18, tmp19)
tmp21 = tl.where(tmp2, tmp3, tmp20)
tl.store(in_out_ptr0 + x5, tmp21, xmask)
@triton.jit
def triton_poi_fused_add_diag_embed_14(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp5 = tl.load(in_ptr0 + (48 + x2), xmask)
tmp7 = tl.load(in_ptr1 + 5 * x0, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + x2, xmask)
tmp17 = tl.load(in_ptr2 + (48 + x2), xmask)
tmp20 = tl.load(in_ptr0 + (48 + 5 * x0), xmask, eviction_policy=
'evict_last')
tmp22 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp0 = tl.full([1], 3, tl.int32)
tmp1 = tmp0 == tmp0
tmp2 = x0
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = tmp2 == tmp3
tmp6 = tl_math.exp(tmp5)
tmp8 = tmp6 * tmp7
tmp9 = 0.0
tmp10 = tl.where(tmp4, tmp9, tmp8)
tmp11 = x1
tmp12 = tmp11 == tmp3
tmp14 = tmp6 * tmp13
tmp15 = tl.where(tmp12, tmp9, tmp14)
tmp16 = tmp10 - tmp15
tmp18 = tl.where(tmp1, tmp16, tmp17)
tmp19 = tmp2 == tmp11
tmp21 = tl_math.exp(tmp20)
tmp23 = tmp21 * tmp22
tmp24 = tl.where(tmp19, tmp23, tmp9)
tmp25 = tmp18 + tmp24
tl.store(out_ptr0 + x2, tmp25, xmask)
@triton.jit
def triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_15(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x3 = xindex % 16
x0 = xindex % 4
x1 = xindex // 4 % 4
x5 = xindex
tmp3 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (48 + x3), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + 5 * x0, xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_out_ptr0 + x5, xmask)
tmp0 = x2
tmp1 = tl.full([1], 3, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = x0
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = tmp4 == tmp5
tmp8 = tl_math.exp(tmp7)
tmp10 = tmp8 * tmp9
tmp11 = 0.0
tmp12 = tl.where(tmp6, tmp11, tmp10)
tmp13 = x1
tmp14 = tmp13 == tmp5
tmp16 = tmp8 * tmp15
tmp17 = tl.where(tmp14, tmp11, tmp16)
tmp18 = tmp12 - tmp17
tmp20 = tl.where(tmp2, tmp18, tmp19)
tmp21 = tl.where(tmp2, tmp3, tmp20)
tl.store(in_out_ptr0 + x5, tmp21, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
get_raw_stream(0)
triton_poi_fused_eye_masked_fill_ne_sum_0[grid(4)](arg0_1, buf0, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_1[
grid(16)](arg0_1, buf0, buf1, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf2 = torch.ops.aten.linalg_inv_ex.default(buf1)
buf3 = buf2[0]
del buf2
buf5 = buf0
del buf0
triton_poi_fused_eye_masked_fill_ne_sum_2[grid(4)](arg0_1, buf5, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf6 = buf1
del buf1
triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_3[
grid(16)](arg0_1, buf5, buf6, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf7 = torch.ops.aten.linalg_inv_ex.default(buf6)
buf8 = buf7[0]
del buf7
buf10 = buf6
del buf6
triton_poi_fused_add_diag_embed_4[grid(16)](arg0_1, buf3, buf10, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_5[grid(64)
](buf10, arg0_1, buf3, buf11, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf10
buf12 = buf5
del buf5
triton_poi_fused_eye_masked_fill_ne_sum_6[grid(4)](arg0_1, buf12, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf13 = reinterpret_tensor(buf3, (4, 4), (4, 1), 0)
del buf3
triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_7[
grid(16)](arg0_1, buf12, buf13, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf14 = torch.ops.aten.linalg_inv_ex.default(buf13)
buf15 = buf14[0]
del buf14
buf17 = buf13
del buf13
triton_poi_fused_add_diag_embed_8[grid(16)](arg0_1, buf8, buf11,
buf17, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf18 = buf11
del buf11
triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_9[grid(64)
](buf18, buf17, arg0_1, buf8, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf17
buf19 = buf12
del buf12
triton_poi_fused_eye_masked_fill_ne_sum_10[grid(4)](arg0_1, buf19,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf20 = reinterpret_tensor(buf8, (4, 4), (4, 1), 0)
del buf8
triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_11[
grid(16)](arg0_1, buf19, buf20, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del buf19
buf21 = torch.ops.aten.linalg_inv_ex.default(buf20)
buf22 = buf21[0]
del buf21
buf24 = buf20
del buf20
triton_poi_fused_add_diag_embed_12[grid(16)](arg0_1, buf15, buf18,
buf24, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf25 = buf18
del buf18
triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_13[grid(64)
](buf25, buf24, arg0_1, buf15, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf15
buf26 = buf24
del buf24
triton_poi_fused_add_diag_embed_14[grid(16)](arg0_1, buf22, buf25,
buf26, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf27 = buf25
del buf25
triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_15[grid(64)
](buf27, buf26, arg0_1, buf22, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del arg0_1
del buf22
del buf26
return buf27,
class MatrixTreeNew(nn.Module):
"""Implementation of the matrix-tree theorem for computing marginals
of non-projective dependency parsing. This attention layer is used
in the paper "Learning Structured Text Representations"
:cite:`DBLP:journals/corr/LiuL17d`.
"""
def __init__(self, eps=1e-05):
self.eps = eps
super(MatrixTreeNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
MaxatTezekbayev/OpenNMT-py-lexical
|
MatrixTree
| false
| 5,613
|
[
"MIT"
] | 1
|
44182999b863fc4074d67e0281c5bdab19abddfe
|
https://github.com/MaxatTezekbayev/OpenNMT-py-lexical/tree/44182999b863fc4074d67e0281c5bdab19abddfe
|
RandomShiftsAug
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class RandomShiftsAug(nn.Module):
def __init__(self, pad):
super().__init__()
self.pad = pad
def forward(self, x):
x = x.float()
n, _c, h, w = x.size()
assert h == w
padding = tuple([self.pad] * 4)
x = F.pad(x, padding, 'replicate')
eps = 1.0 / (h + 2 * self.pad)
arange = torch.linspace(-1.0 + eps, 1.0 - eps, h + 2 * self.pad,
device=x.device, dtype=x.dtype)[:h]
arange = arange.unsqueeze(0).repeat(h, 1).unsqueeze(2)
base_grid = torch.cat([arange, arange.transpose(1, 0)], dim=2)
base_grid = base_grid.unsqueeze(0).repeat(n, 1, 1, 1)
shift = torch.randint(0, 2 * self.pad + 1, size=(n, 1, 1, 2),
device=x.device, dtype=x.dtype)
shift *= 2.0 / (h + 2 * self.pad)
grid = base_grid + shift
return F.grid_sample(x, grid, padding_mode='zeros', align_corners=False
)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'pad': 4}]
|
import torch
from torch import device
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2 % 4
x2 = xindex // 8
x4 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = x1
tmp6 = tmp5.to(tl.float32)
tmp7 = 6.0
tmp8 = tmp6 < tmp7
tmp9 = 0.16666666666666666
tmp10 = tmp6 * tmp9
tmp11 = -0.9166666666666666
tmp12 = tmp10 + tmp11
tmp13 = 11 + -1 * x1
tmp14 = tmp13.to(tl.float32)
tmp15 = tmp14 * tmp9
tmp16 = 0.9166666666666666
tmp17 = tmp16 - tmp15
tmp18 = tl.where(tmp8, tmp12, tmp17)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp4, tmp18, tmp19)
tmp21 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp24 = x2
tmp25 = tmp24.to(tl.float32)
tmp26 = tmp25 < tmp7
tmp27 = tmp25 * tmp9
tmp28 = tmp27 + tmp11
tmp29 = 11 + -1 * x2
tmp30 = tmp29.to(tl.float32)
tmp31 = tmp30 * tmp9
tmp32 = tmp16 - tmp31
tmp33 = tl.where(tmp26, tmp28, tmp32)
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp21, tmp33, tmp34)
tmp36 = tl.where(tmp4, tmp20, tmp35)
tl.store(out_ptr0 + x4, tmp36, xmask)
@triton.jit
def triton_poi_fused_grid_sampler_2d_replication_pad2d_1(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex // 64
x5 = xindex
x3 = xindex // 16
tmp0 = tl.load(in_ptr0 + 2 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 2 * x2, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (1 + 2 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr1 + (1 + 2 * x2), xmask, eviction_policy='evict_last'
)
tmp2 = 0.16666666666666666
tmp3 = tmp1 * tmp2
tmp4 = tmp0 + tmp3
tmp5 = 6.0
tmp6 = tmp4 * tmp5
tmp7 = 5.5
tmp8 = tmp6 + tmp7
tmp9 = libdevice.floor(tmp8)
tmp10 = 0.0
tmp11 = tmp9 >= tmp10
tmp12 = 12.0
tmp13 = tmp9 < tmp12
tmp16 = tmp15 * tmp2
tmp17 = tmp14 + tmp16
tmp18 = tmp17 * tmp5
tmp19 = tmp18 + tmp7
tmp20 = libdevice.floor(tmp19)
tmp21 = tmp20 >= tmp10
tmp22 = tmp20 < tmp12
tmp23 = tmp21 & tmp22
tmp24 = tmp13 & tmp23
tmp25 = tmp11 & tmp24
tmp26 = 1.0
tmp27 = tmp9 + tmp26
tmp28 = tmp27 - tmp8
tmp29 = tmp20 + tmp26
tmp30 = tmp29 - tmp19
tmp31 = tmp28 * tmp30
tmp32 = tl.where(tmp25, tmp31, tmp10)
tmp33 = tmp27 >= tmp10
tmp34 = tmp27 < tmp12
tmp35 = tmp34 & tmp23
tmp36 = tmp33 & tmp35
tmp37 = tmp8 - tmp9
tmp38 = tmp37 * tmp30
tmp39 = tl.where(tmp36, tmp38, tmp10)
tmp40 = tmp29 >= tmp10
tmp41 = tmp29 < tmp12
tmp42 = tmp40 & tmp41
tmp43 = tmp34 & tmp42
tmp44 = tmp33 & tmp43
tmp45 = tmp19 - tmp20
tmp46 = tmp37 * tmp45
tmp47 = tl.where(tmp44, tmp46, tmp10)
tmp48 = tmp13 & tmp42
tmp49 = tmp11 & tmp48
tmp50 = tmp28 * tmp45
tmp51 = tl.where(tmp49, tmp50, tmp10)
tmp52 = tmp20.to(tl.int64)
tmp53 = tl.full([1], 0, tl.int64)
tmp54 = tl.where(tmp25, tmp52, tmp53)
tmp55 = tl.full([XBLOCK], 12, tl.int32)
tmp56 = tmp54 + tmp55
tmp57 = tmp54 < 0
tmp58 = tl.where(tmp57, tmp56, tmp54)
tl.device_assert((0 <= tmp58) & (tmp58 < 12) | ~xmask,
'index out of bounds: 0 <= tmp58 < 12')
tmp60 = tmp9.to(tl.int64)
tmp61 = tl.where(tmp25, tmp60, tmp53)
tmp62 = tmp61 + tmp55
tmp63 = tmp61 < 0
tmp64 = tl.where(tmp63, tmp62, tmp61)
tl.device_assert((0 <= tmp64) & (tmp64 < 12) | ~xmask,
'index out of bounds: 0 <= tmp64 < 12')
tmp66 = tl.load(in_ptr2 + (4 * (3 * (3 <= 0 * (0 >= -4 + tmp58) + (-4 +
tmp58) * (-4 + tmp58 > 0)) + (0 * (0 >= -4 + tmp58) + (-4 + tmp58) *
(-4 + tmp58 > 0)) * (0 * (0 >= -4 + tmp58) + (-4 + tmp58) * (-4 +
tmp58 > 0) < 3)) + 16 * x3 + (3 * (3 <= 0 * (0 >= -4 + tmp64) + (-4 +
tmp64) * (-4 + tmp64 > 0)) + (0 * (0 >= -4 + tmp64) + (-4 + tmp64) *
(-4 + tmp64 > 0)) * (0 * (0 >= -4 + tmp64) + (-4 + tmp64) * (-4 +
tmp64 > 0) < 3))), xmask, eviction_policy='evict_last')
tmp67 = tl.where(tmp36, tmp52, tmp53)
tmp68 = tmp27.to(tl.int64)
tmp69 = tl.where(tmp36, tmp68, tmp53)
tmp70 = tmp29.to(tl.int64)
tmp71 = tl.where(tmp49, tmp70, tmp53)
tmp72 = tl.where(tmp49, tmp60, tmp53)
tmp73 = tmp66 * tmp32
tmp74 = tmp67 + tmp55
tmp75 = tmp67 < 0
tmp76 = tl.where(tmp75, tmp74, tmp67)
tl.device_assert((0 <= tmp76) & (tmp76 < 12) | ~xmask,
'index out of bounds: 0 <= tmp76 < 12')
tmp78 = tmp69 + tmp55
tmp79 = tmp69 < 0
tmp80 = tl.where(tmp79, tmp78, tmp69)
tl.device_assert((0 <= tmp80) & (tmp80 < 12) | ~xmask,
'index out of bounds: 0 <= tmp80 < 12')
tmp82 = tl.load(in_ptr2 + (4 * (3 * (3 <= 0 * (0 >= -4 + tmp76) + (-4 +
tmp76) * (-4 + tmp76 > 0)) + (0 * (0 >= -4 + tmp76) + (-4 + tmp76) *
(-4 + tmp76 > 0)) * (0 * (0 >= -4 + tmp76) + (-4 + tmp76) * (-4 +
tmp76 > 0) < 3)) + 16 * x3 + (3 * (3 <= 0 * (0 >= -4 + tmp80) + (-4 +
tmp80) * (-4 + tmp80 > 0)) + (0 * (0 >= -4 + tmp80) + (-4 + tmp80) *
(-4 + tmp80 > 0)) * (0 * (0 >= -4 + tmp80) + (-4 + tmp80) * (-4 +
tmp80 > 0) < 3))), xmask, eviction_policy='evict_last')
tmp83 = tmp82 * tmp39
tmp84 = tmp73 + tmp83
tmp85 = tmp71 + tmp55
tmp86 = tmp71 < 0
tmp87 = tl.where(tmp86, tmp85, tmp71)
tl.device_assert((0 <= tmp87) & (tmp87 < 12) | ~xmask,
'index out of bounds: 0 <= tmp87 < 12')
tmp89 = tmp72 + tmp55
tmp90 = tmp72 < 0
tmp91 = tl.where(tmp90, tmp89, tmp72)
tl.device_assert((0 <= tmp91) & (tmp91 < 12) | ~xmask,
'index out of bounds: 0 <= tmp91 < 12')
tmp93 = tl.load(in_ptr2 + (4 * (3 * (3 <= 0 * (0 >= -4 + tmp87) + (-4 +
tmp87) * (-4 + tmp87 > 0)) + (0 * (0 >= -4 + tmp87) + (-4 + tmp87) *
(-4 + tmp87 > 0)) * (0 * (0 >= -4 + tmp87) + (-4 + tmp87) * (-4 +
tmp87 > 0) < 3)) + 16 * x3 + (3 * (3 <= 0 * (0 >= -4 + tmp91) + (-4 +
tmp91) * (-4 + tmp91 > 0)) + (0 * (0 >= -4 + tmp91) + (-4 + tmp91) *
(-4 + tmp91 > 0)) * (0 * (0 >= -4 + tmp91) + (-4 + tmp91) * (-4 +
tmp91 > 0) < 3))), xmask, eviction_policy='evict_last')
tmp94 = tmp93 * tmp51
tmp95 = tmp84 + tmp94
tmp96 = tl.where(tmp44, tmp70, tmp53)
tmp97 = tl.where(tmp44, tmp68, tmp53)
tmp98 = tmp96 + tmp55
tmp99 = tmp96 < 0
tmp100 = tl.where(tmp99, tmp98, tmp96)
tl.device_assert((0 <= tmp100) & (tmp100 < 12) | ~xmask,
'index out of bounds: 0 <= tmp100 < 12')
tmp102 = tmp97 + tmp55
tmp103 = tmp97 < 0
tmp104 = tl.where(tmp103, tmp102, tmp97)
tl.device_assert((0 <= tmp104) & (tmp104 < 12) | ~xmask,
'index out of bounds: 0 <= tmp104 < 12')
tmp106 = tl.load(in_ptr2 + (4 * (3 * (3 <= 0 * (0 >= -4 + tmp100) + (-4 +
tmp100) * (-4 + tmp100 > 0)) + (0 * (0 >= -4 + tmp100) + (-4 +
tmp100) * (-4 + tmp100 > 0)) * (0 * (0 >= -4 + tmp100) + (-4 +
tmp100) * (-4 + tmp100 > 0) < 3)) + 16 * x3 + (3 * (3 <= 0 * (0 >=
-4 + tmp104) + (-4 + tmp104) * (-4 + tmp104 > 0)) + (0 * (0 >= -4 +
tmp104) + (-4 + tmp104) * (-4 + tmp104 > 0)) * (0 * (0 >= -4 +
tmp104) + (-4 + tmp104) * (-4 + tmp104 > 0) < 3))), xmask,
eviction_policy='evict_last')
tmp107 = tmp106 * tmp47
tmp108 = tmp95 + tmp107
tl.store(in_out_ptr0 + x5, tmp108, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](buf0, 32, XBLOCK=32, num_warps=1,
num_stages=1)
buf1 = torch.ops.aten.randint.low(0, 9, [4, 1, 1, 2], dtype=torch.
float32, device=device(type='cuda', index=0), pin_memory=False)
buf2 = buf1
del buf1
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf11 = buf10
del buf10
buf15 = buf11
del buf11
triton_poi_fused_grid_sampler_2d_replication_pad2d_1[grid(256)](buf15,
buf0, buf2, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del buf0
del buf2
return buf15,
class RandomShiftsAugNew(nn.Module):
def __init__(self, pad):
super().__init__()
self.pad = pad
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
MishaLaskin/url_benchmark
|
RandomShiftsAug
| false
| 5,614
|
[
"MIT"
] | 1
|
a81aed0a0aec3a7dad83d930e54d480f97cf535d
|
https://github.com/MishaLaskin/url_benchmark/tree/a81aed0a0aec3a7dad83d930e54d480f97cf535d
|
FeedForwardNetwork
|
import math
import torch
import torch.nn as nn
class GELU(nn.Module):
def forward(self, x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x +
0.044715 * torch.pow(x, 3))))
class FeedForwardNetwork(nn.Module):
def __init__(self, in_dim, hid_dim) ->None:
super().__init__()
self.lin1 = nn.Linear(in_dim, hid_dim)
self.lin2 = nn.Linear(hid_dim, in_dim)
self.gleu = GELU()
self.dropout = nn.Dropout()
def forward(self, inputs):
hid = self.gleu(self.lin1(inputs))
return self.lin2(self.dropout(hid))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4, 'hid_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_pow_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tmp0 * tmp0
tmp4 = tmp3 * tmp0
tmp5 = 0.044715
tmp6 = tmp4 * tmp5
tmp7 = tmp0 + tmp6
tmp8 = 0.7978845608028654
tmp9 = tmp7 * tmp8
tmp10 = libdevice.tanh(tmp9)
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp2 * tmp12
tl.store(out_ptr0 + x0, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_pow_tanh_0[grid(256)](buf0, buf1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4
class GELU(nn.Module):
def forward(self, x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x +
0.044715 * torch.pow(x, 3))))
class FeedForwardNetworkNew(nn.Module):
def __init__(self, in_dim, hid_dim) ->None:
super().__init__()
self.lin1 = nn.Linear(in_dim, hid_dim)
self.lin2 = nn.Linear(hid_dim, in_dim)
self.gleu = GELU()
self.dropout = nn.Dropout()
def forward(self, input_0):
primals_1 = self.lin1.weight
primals_2 = self.lin1.bias
primals_4 = self.lin2.weight
primals_5 = self.lin2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
MobtgZhang/MWMLNet
|
FeedForwardNetwork
| false
| 5,615
|
[
"MIT"
] | 1
|
125bb39935916b6b4be505c51cb6a04eb49b96d0
|
https://github.com/MobtgZhang/MWMLNet/tree/125bb39935916b6b4be505c51cb6a04eb49b96d0
|
BERTNextSentence
|
import torch
import torch.nn as nn
class BERTNextSentence(nn.Module):
def __init__(self, hidden):
super().__init__()
self.linear = nn.Linear(hidden, 2)
self.softmax = nn.LogSoftmax(dim=-1)
def forward(self, x):
return self.softmax(self.linear(x[:, 0]))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused__log_softmax_add_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
x1 = xindex // 2
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + 2 * x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr0 + (1 + 2 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + 1)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp6 = tmp3 + tmp5
tmp10 = tmp7 + tmp9
tmp11 = triton_helpers.maximum(tmp6, tmp10)
tmp12 = tmp2 - tmp11
tl.store(out_ptr0 + x2, tmp12, xmask)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 2
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 2 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 2 * x1), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp6 = tl_math.log(tmp5)
tmp7 = tmp0 - tmp6
tl.store(out_ptr0 + x2, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2, 4), (4, 1))
assert_size_stride(primals_3, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((16, 2), (2, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 2), (1, 4), 0), out=buf1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32)
triton_poi_fused__log_softmax_add_1[grid(32)](buf1, primals_3, buf2,
32, XBLOCK=32, num_warps=1, num_stages=1)
del primals_3
buf3 = reinterpret_tensor(buf1, (4, 4, 2), (8, 2, 1), 0)
del buf1
triton_poi_fused__log_softmax_2[grid(32)](buf2, buf3, 32, XBLOCK=32,
num_warps=1, num_stages=1)
del buf2
return buf3, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf3
class BERTNextSentenceNew(nn.Module):
def __init__(self, hidden):
super().__init__()
self.linear = nn.Linear(hidden, 2)
self.softmax = nn.LogSoftmax(dim=-1)
def forward(self, input_0):
primals_2 = self.linear.weight
primals_3 = self.linear.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Moymix/BERT-pytorch
|
BERTNextSentence
| false
| 5,616
|
[
"Apache-2.0"
] | 1
|
f0b9c3ae53e05c00adcc761e0422e4222d8b5619
|
https://github.com/Moymix/BERT-pytorch/tree/f0b9c3ae53e05c00adcc761e0422e4222d8b5619
|
Upsample2d
|
from _paritybench_helpers import _mock_config
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
def _setup_kernel(k):
k = np.asarray(k, dtype=np.float32)
if k.ndim == 1:
k = np.outer(k, k)
k /= np.sum(k)
assert k.ndim == 2
assert k.shape[0] == k.shape[1]
return k
class Upsample2d(nn.Module):
def __init__(self, opts, k=[1, 3, 3, 1], factor=2, down=1, gain=1):
"""
Upsample2d method in G_synthesis_stylegan2.
:param k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable).
The default is `[1] * factor`, which corresponds to average pooling.
:param factor: Integer downsampling factor (default: 2).
:param gain: Scaling factor for signal magnitude (default: 1.0).
Returns: Tensor of the shape `[N, C, H // factor, W // factor]`
"""
super().__init__()
assert isinstance(factor, int
) and factor >= 1, 'factor must be larger than 1! (default: 2)'
self.gain = gain
self.factor = factor
self.opts = opts
self.k = _setup_kernel(k) * (self.gain * factor ** 2)
self.k = torch.FloatTensor(self.k).unsqueeze(0).unsqueeze(0)
self.k = nn.Parameter(self.k, requires_grad=False)
self.p = self.k.shape[0] - self.factor
self.padx0, self.pady0 = (self.p + 1) // 2 + factor - 1, (self.p + 1
) // 2 + factor - 1
self.padx1, self.pady1 = self.p // 2, self.p // 2
self.kernelH, self.kernelW = self.k.shape[2:]
self.down = down
def forward(self, x):
y = x.clone()
y = y.reshape([-1, x.shape[2], x.shape[3], 1])
inC, inH, inW = x.shape[1:]
y = torch.reshape(y, (-1, inH, 1, inW, 1, 1))
y = F.pad(y, (0, 0, self.factor - 1, 0, 0, 0, self.factor - 1, 0, 0,
0, 0, 0))
y = torch.reshape(y, (-1, 1, inH * self.factor, inW * self.factor))
y = F.pad(y, (0, 0, max(self.pady0, 0), max(self.pady1, 0), max(
self.padx0, 0), max(self.padx1, 0), 0, 0))
y = y[:, max(-self.pady0, 0):y.shape[1] - max(-self.pady1, 0), max(
-self.padx0, 0):y.shape[2] - max(-self.padx1, 0), :]
y = y.permute(0, 3, 1, 2)
y = y.reshape(-1, 1, inH * self.factor + self.pady0 + self.pady1,
inW * self.factor + self.padx0 + self.padx1)
y = F.conv2d(y, self.k)
y = y.view(-1, 1, inH * self.factor + self.pady0 + self.pady1 -
self.kernelH + 1, inW * self.factor + self.padx0 + self.padx1 -
self.kernelW + 1)
if inH * self.factor != y.shape[1]:
y = F.interpolate(y, size=(inH * self.factor, inW * self.factor))
y = y.permute(0, 2, 3, 1)
y = y.reshape(-1, inC, inH * self.factor, inW * self.factor)
return y
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'opts': _mock_config()}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 2304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 72 % 2
x1 = xindex // 8 % 9
x0 = xindex % 8
x3 = xindex // 144
x7 = xindex
tmp0 = -1 + x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = -1 + x1
tmp4 = tmp3 >= tmp1
tmp5 = tmp2 & tmp4
tmp6 = -1 + (-1 + x1) % 2
tmp7 = tmp6 >= tmp1
tmp8 = -1 + x0 % 2
tmp9 = tmp8 >= tmp1
tmp10 = tmp7 & tmp9
tmp11 = tmp10 & tmp5
tmp12 = tl.load(in_ptr0 + (4 * ((-1 + x1) // 2 % 4) + 16 * x3 + x0 // 2
), tmp11 & xmask, eviction_policy='evict_last', other=0.0)
tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype)
tmp14 = tl.where(tmp5, tmp12, tmp13)
tl.store(out_ptr0 + x7, tmp14, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 128
xnumel = 8
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 8
y1 = yindex // 8
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 8 * x2 + 144 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 8 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__unsafe_index_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 8 % 8
x0 = xindex % 8
x2 = xindex // 64
x4 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.625
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tmp5 = x0
tmp6 = tmp5.to(tl.float32)
tmp7 = tmp6 * tmp2
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.load(in_ptr0 + (tmp8 + 5 * tmp4 + 25 * x2), xmask,
eviction_policy='evict_last')
tl.store(out_ptr0 + x4, tmp9, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (1, 1, 4, 4), (16, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 2, 9, 8), (144, 72, 8, 1), torch.float32
)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(2304)](arg0_1, buf0, 2304,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((16, 1, 8, 8), (64, 1, 8, 1), torch.float32)
triton_poi_fused_convolution_1[grid(128, 8)](buf0, buf1, 128, 8,
XBLOCK=8, YBLOCK=128, num_warps=4, num_stages=1)
del buf0
buf2 = extern_kernels.convolution(buf1, arg1_1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (16, 1, 5, 5), (25, 1, 5, 1))
del arg1_1
buf3 = buf1
del buf1
triton_poi_fused__unsafe_index_2[grid(1024)](buf2, buf3, 1024,
XBLOCK=256, num_warps=4, num_stages=1)
del buf2
return reinterpret_tensor(buf3, (4, 4, 8, 8), (256, 64, 8, 1), 0),
def _setup_kernel(k):
k = np.asarray(k, dtype=np.float32)
if k.ndim == 1:
k = np.outer(k, k)
k /= np.sum(k)
assert k.ndim == 2
assert k.shape[0] == k.shape[1]
return k
class Upsample2dNew(nn.Module):
def __init__(self, opts, k=[1, 3, 3, 1], factor=2, down=1, gain=1):
"""
Upsample2d method in G_synthesis_stylegan2.
:param k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable).
The default is `[1] * factor`, which corresponds to average pooling.
:param factor: Integer downsampling factor (default: 2).
:param gain: Scaling factor for signal magnitude (default: 1.0).
Returns: Tensor of the shape `[N, C, H // factor, W // factor]`
"""
super().__init__()
assert isinstance(factor, int
) and factor >= 1, 'factor must be larger than 1! (default: 2)'
self.gain = gain
self.factor = factor
self.opts = opts
self.k = _setup_kernel(k) * (self.gain * factor ** 2)
self.k = torch.FloatTensor(self.k).unsqueeze(0).unsqueeze(0)
self.k = nn.Parameter(self.k, requires_grad=False)
self.p = self.k.shape[0] - self.factor
self.padx0, self.pady0 = (self.p + 1) // 2 + factor - 1, (self.p + 1
) // 2 + factor - 1
self.padx1, self.pady1 = self.p // 2, self.p // 2
self.kernelH, self.kernelW = self.k.shape[2:]
self.down = down
def forward(self, input_0):
arg1_1 = self.k
arg0_1 = input_0
output = call([arg0_1, arg1_1])
return output[0]
|
Iceland-Leo/StyleGAN2_PyTorch
|
Upsample2d
| false
| 5,617
|
[
"MIT"
] | 1
|
3621f5e4ba1c7fde7e2fae1f4700d050656a0b02
|
https://github.com/Iceland-Leo/StyleGAN2_PyTorch/tree/3621f5e4ba1c7fde7e2fae1f4700d050656a0b02
|
TanhGaussianDistParams
|
import torch
from typing import Tuple
import torch.nn as nn
import torch.nn.functional as F
from typing import Callable
from torch.distributions import Normal
def identity(x: 'torch.Tensor') ->torch.Tensor:
"""Return input without any change."""
return x
def init_layer_uniform(layer: 'nn.Linear', init_w: 'float'=0.003) ->nn.Linear:
"""Init uniform parameters on the single layer"""
layer.weight.data.uniform_(-init_w, init_w)
layer.bias.data.uniform_(-init_w, init_w)
return layer
class MLP(nn.Module):
"""Baseline of Multilayer perceptron.
Attributes:
input_size (int): size of input
output_size (int): size of output layer
hidden_sizes (list): sizes of hidden layers
hidden_activation (function): activation function of hidden layers
output_activation (function): activation function of output layer
hidden_layers (list): list containing linear layers
use_output_layer (bool): whether or not to use the last layer
n_category (int): category number (-1 if the action is continuous)
"""
def __init__(self, input_size: 'int', output_size: 'int', hidden_sizes:
'list', hidden_activation: 'Callable'=F.relu, output_activation:
'Callable'=identity, linear_layer: 'nn.Module'=nn.Linear,
use_output_layer: 'bool'=True, n_category: 'int'=-1, init_fn:
'Callable'=init_layer_uniform):
"""Initialize.
Args:
input_size (int): size of input
output_size (int): size of output layer
hidden_sizes (list): number of hidden layers
hidden_activation (function): activation function of hidden layers
output_activation (function): activation function of output layer
linear_layer (nn.Module): linear layer of mlp
use_output_layer (bool): whether or not to use the last layer
n_category (int): category number (-1 if the action is continuous)
init_fn (Callable): weight initialization function bound for the last layer
"""
super(MLP, self).__init__()
self.hidden_sizes = hidden_sizes
self.input_size = input_size
self.output_size = output_size
self.hidden_activation = hidden_activation
self.output_activation = output_activation
self.linear_layer = linear_layer
self.use_output_layer = use_output_layer
self.n_category = n_category
self.hidden_layers: 'list' = []
in_size = self.input_size
for i, next_size in enumerate(hidden_sizes):
fc = self.linear_layer(in_size, next_size)
in_size = next_size
self.__setattr__('hidden_fc{}'.format(i), fc)
self.hidden_layers.append(fc)
if self.use_output_layer:
self.output_layer = self.linear_layer(in_size, output_size)
self.output_layer = init_fn(self.output_layer)
else:
self.output_layer = identity
self.output_activation = identity
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
"""Forward method implementation."""
for hidden_layer in self.hidden_layers:
x = self.hidden_activation(hidden_layer(x))
x = self.output_activation(self.output_layer(x))
return x
class GaussianDist(MLP):
"""Multilayer perceptron with Gaussian distribution output.
Attributes:
mu_activation (function): bounding function for mean
log_std_min (float): lower bound of log std
log_std_max (float): upper bound of log std
mu_layer (nn.Linear): output layer for mean
log_std_layer (nn.Linear): output layer for log std
"""
def __init__(self, input_size: 'int', output_size: 'int', hidden_sizes:
'list', hidden_activation: 'Callable'=F.relu, mu_activation:
'Callable'=torch.tanh, log_std_min: 'float'=-20, log_std_max:
'float'=2, init_fn: 'Callable'=init_layer_uniform):
"""Initialize."""
super(GaussianDist, self).__init__(input_size=input_size,
output_size=output_size, hidden_sizes=hidden_sizes,
hidden_activation=hidden_activation, use_output_layer=False)
self.mu_activation = mu_activation
self.log_std_min = log_std_min
self.log_std_max = log_std_max
in_size = hidden_sizes[-1]
self.log_std_layer = nn.Linear(in_size, output_size)
self.log_std_layer = init_fn(self.log_std_layer)
self.mu_layer = nn.Linear(in_size, output_size)
self.mu_layer = init_fn(self.mu_layer)
def get_dist_params(self, x: 'torch.Tensor') ->Tuple[torch.Tensor, ...]:
"""Return gausian distribution parameters."""
hidden = super(GaussianDist, self).forward(x)
mu = self.mu_activation(self.mu_layer(hidden))
log_std = torch.tanh(self.log_std_layer(hidden))
log_std = self.log_std_min + 0.5 * (self.log_std_max - self.log_std_min
) * (log_std + 1)
std = torch.exp(log_std)
return mu, log_std, std
def forward(self, x: 'torch.Tensor') ->Tuple[torch.Tensor, ...]:
"""Forward method implementation."""
mu, _, std = self.get_dist_params(x)
dist = Normal(mu, std)
action = dist.sample()
return action, dist
class TanhGaussianDistParams(GaussianDist):
"""Multilayer perceptron with Gaussian distribution output."""
def __init__(self, **kwargs):
"""Initialize."""
super(TanhGaussianDistParams, self).__init__(**kwargs,
mu_activation=identity)
def forward(self, x: 'torch.Tensor', epsilon: 'float'=1e-06) ->Tuple[
torch.Tensor, ...]:
"""Forward method implementation."""
mu, _, std = super(TanhGaussianDistParams, self).get_dist_params(x)
dist = Normal(mu, std)
z = dist.rsample()
action = torch.tanh(z)
log_prob = dist.log_prob(z) - torch.log(1 - action.pow(2) + epsilon)
log_prob = log_prob.sum(-1, keepdim=True)
return action, log_prob, z, mu, std
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'output_size': 4, 'hidden_sizes': [4, 4]}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from typing import Tuple
import torch.nn as nn
import torch.nn.functional as F
from typing import Callable
from torch.distributions import Normal
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_exp_mul_sub_tanh_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp9 = tl.load(in_ptr1 + x0, xmask)
tmp10 = tl.load(in_ptr2 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp4 = 11.0
tmp5 = tmp3 * tmp4
tmp6 = -20.0
tmp7 = tmp5 + tmp6
tmp8 = tl_math.exp(tmp7)
tmp11 = tmp10 * tmp8
tmp12 = tmp9 + tmp11
tmp13 = tmp12 - tmp9
tmp14 = libdevice.tanh(tmp12)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp12, xmask)
tl.store(out_ptr2 + x0, tmp13, xmask)
tl.store(out_ptr3 + x0, tmp14, xmask)
@triton.jit
def triton_poi_fused_add_div_log_mul_neg_pow_rsub_sub_sum_2(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp23 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp30 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp37 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp40 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp47 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp54 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp57 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp64 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = tmp0 * tmp0
tmp2 = -tmp1
tmp4 = tmp3 * tmp3
tmp5 = 2.0
tmp6 = tmp4 * tmp5
tmp7 = tmp2 / tmp6
tmp8 = tl_math.log(tmp3)
tmp9 = tmp7 - tmp8
tmp10 = 0.9189385332046727
tmp11 = tmp9 - tmp10
tmp13 = tmp12 * tmp12
tmp14 = 1.0
tmp15 = tmp14 - tmp13
tmp16 = 1e-06
tmp17 = tmp15 + tmp16
tmp18 = tl_math.log(tmp17)
tmp19 = tmp11 - tmp18
tmp21 = tmp20 * tmp20
tmp22 = -tmp21
tmp24 = tmp23 * tmp23
tmp25 = tmp24 * tmp5
tmp26 = tmp22 / tmp25
tmp27 = tl_math.log(tmp23)
tmp28 = tmp26 - tmp27
tmp29 = tmp28 - tmp10
tmp31 = tmp30 * tmp30
tmp32 = tmp14 - tmp31
tmp33 = tmp32 + tmp16
tmp34 = tl_math.log(tmp33)
tmp35 = tmp29 - tmp34
tmp36 = tmp19 + tmp35
tmp38 = tmp37 * tmp37
tmp39 = -tmp38
tmp41 = tmp40 * tmp40
tmp42 = tmp41 * tmp5
tmp43 = tmp39 / tmp42
tmp44 = tl_math.log(tmp40)
tmp45 = tmp43 - tmp44
tmp46 = tmp45 - tmp10
tmp48 = tmp47 * tmp47
tmp49 = tmp14 - tmp48
tmp50 = tmp49 + tmp16
tmp51 = tl_math.log(tmp50)
tmp52 = tmp46 - tmp51
tmp53 = tmp36 + tmp52
tmp55 = tmp54 * tmp54
tmp56 = -tmp55
tmp58 = tmp57 * tmp57
tmp59 = tmp58 * tmp5
tmp60 = tmp56 / tmp59
tmp61 = tl_math.log(tmp57)
tmp62 = tmp60 - tmp61
tmp63 = tmp62 - tmp10
tmp65 = tmp64 * tmp64
tmp66 = tmp14 - tmp65
tmp67 = tmp66 + tmp16
tmp68 = tl_math.log(tmp67)
tmp69 = tmp63 - tmp68
tmp70 = tmp53 + tmp69
tl.store(out_ptr0 + x0, tmp70, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf15, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3,
primals_5, buf14, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf5)
del primals_9
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf8 = torch.ops.aten.normal_functional.default(buf7)
buf9 = buf8
del buf8
buf6 = buf7
del buf7
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_exp_mul_sub_tanh_1[grid(256)](buf5, buf4, buf9,
buf6, buf10, buf12, buf11, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf13 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_add_div_log_mul_neg_pow_rsub_sub_sum_2[grid(64)](buf12
, buf6, buf11, buf13, 64, XBLOCK=64, num_warps=1, num_stages=1)
return (buf11, buf13, buf10, reinterpret_tensor(buf4, (4, 4, 4, 4), (64,
16, 4, 1), 0), buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1),
0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(buf3, (64, 4), (4, 1), 0), buf5, buf6, buf9,
buf11, buf12, primals_8, primals_6, buf14, primals_4, buf15)
def identity(x: 'torch.Tensor') ->torch.Tensor:
"""Return input without any change."""
return x
def init_layer_uniform(layer: 'nn.Linear', init_w: 'float'=0.003) ->nn.Linear:
"""Init uniform parameters on the single layer"""
layer.weight.data.uniform_(-init_w, init_w)
layer.bias.data.uniform_(-init_w, init_w)
return layer
class MLP(nn.Module):
"""Baseline of Multilayer perceptron.
Attributes:
input_size (int): size of input
output_size (int): size of output layer
hidden_sizes (list): sizes of hidden layers
hidden_activation (function): activation function of hidden layers
output_activation (function): activation function of output layer
hidden_layers (list): list containing linear layers
use_output_layer (bool): whether or not to use the last layer
n_category (int): category number (-1 if the action is continuous)
"""
def __init__(self, input_size: 'int', output_size: 'int', hidden_sizes:
'list', hidden_activation: 'Callable'=F.relu, output_activation:
'Callable'=identity, linear_layer: 'nn.Module'=nn.Linear,
use_output_layer: 'bool'=True, n_category: 'int'=-1, init_fn:
'Callable'=init_layer_uniform):
"""Initialize.
Args:
input_size (int): size of input
output_size (int): size of output layer
hidden_sizes (list): number of hidden layers
hidden_activation (function): activation function of hidden layers
output_activation (function): activation function of output layer
linear_layer (nn.Module): linear layer of mlp
use_output_layer (bool): whether or not to use the last layer
n_category (int): category number (-1 if the action is continuous)
init_fn (Callable): weight initialization function bound for the last layer
"""
super(MLP, self).__init__()
self.hidden_sizes = hidden_sizes
self.input_size = input_size
self.output_size = output_size
self.hidden_activation = hidden_activation
self.output_activation = output_activation
self.linear_layer = linear_layer
self.use_output_layer = use_output_layer
self.n_category = n_category
self.hidden_layers: 'list' = []
in_size = self.input_size
for i, next_size in enumerate(hidden_sizes):
fc = self.linear_layer(in_size, next_size)
in_size = next_size
self.__setattr__('hidden_fc{}'.format(i), fc)
self.hidden_layers.append(fc)
if self.use_output_layer:
self.output_layer = self.linear_layer(in_size, output_size)
self.output_layer = init_fn(self.output_layer)
else:
self.output_layer = identity
self.output_activation = identity
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
"""Forward method implementation."""
for hidden_layer in self.hidden_layers:
x = self.hidden_activation(hidden_layer(x))
x = self.output_activation(self.output_layer(x))
return x
class GaussianDist(MLP):
"""Multilayer perceptron with Gaussian distribution output.
Attributes:
mu_activation (function): bounding function for mean
log_std_min (float): lower bound of log std
log_std_max (float): upper bound of log std
mu_layer (nn.Linear): output layer for mean
log_std_layer (nn.Linear): output layer for log std
"""
def __init__(self, input_size: 'int', output_size: 'int', hidden_sizes:
'list', hidden_activation: 'Callable'=F.relu, mu_activation:
'Callable'=torch.tanh, log_std_min: 'float'=-20, log_std_max:
'float'=2, init_fn: 'Callable'=init_layer_uniform):
"""Initialize."""
super(GaussianDist, self).__init__(input_size=input_size,
output_size=output_size, hidden_sizes=hidden_sizes,
hidden_activation=hidden_activation, use_output_layer=False)
self.mu_activation = mu_activation
self.log_std_min = log_std_min
self.log_std_max = log_std_max
in_size = hidden_sizes[-1]
self.log_std_layer = nn.Linear(in_size, output_size)
self.log_std_layer = init_fn(self.log_std_layer)
self.mu_layer = nn.Linear(in_size, output_size)
self.mu_layer = init_fn(self.mu_layer)
def get_dist_params(self, x: 'torch.Tensor') ->Tuple[torch.Tensor, ...]:
"""Return gausian distribution parameters."""
hidden = super(GaussianDist, self).forward(x)
mu = self.mu_activation(self.mu_layer(hidden))
log_std = torch.tanh(self.log_std_layer(hidden))
log_std = self.log_std_min + 0.5 * (self.log_std_max - self.log_std_min
) * (log_std + 1)
std = torch.exp(log_std)
return mu, log_std, std
def forward(self, x: 'torch.Tensor') ->Tuple[torch.Tensor, ...]:
"""Forward method implementation."""
mu, _, std = self.get_dist_params(x)
dist = Normal(mu, std)
action = dist.sample()
return action, dist
class TanhGaussianDistParamsNew(GaussianDist):
"""Multilayer perceptron with Gaussian distribution output."""
def __init__(self, **kwargs):
"""Initialize."""
super(TanhGaussianDistParamsNew, self).__init__(**kwargs,
mu_activation=identity)
def forward(self, input_0):
primals_1 = self.hidden_fc0.weight
primals_2 = self.hidden_fc0.bias
primals_4 = self.hidden_fc1.weight
primals_5 = self.hidden_fc1.bias
primals_6 = self.log_std_layer.weight
primals_7 = self.log_std_layer.bias
primals_8 = self.mu_layer.weight
primals_9 = self.mu_layer.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0], output[1], output[2], output[3], output[4]
|
MrSyee/rl_algorithms
|
TanhGaussianDistParams
| false
| 5,618
|
[
"MIT"
] | 1
|
5b5276982032f8a8a614b9466849b7b3ef245b3e
|
https://github.com/MrSyee/rl_algorithms/tree/5b5276982032f8a8a614b9466849b7b3ef245b3e
|
DuelingMLP
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Callable
def identity(x: 'torch.Tensor') ->torch.Tensor:
"""Return input without any change."""
return x
def init_layer_uniform(layer: 'nn.Linear', init_w: 'float'=0.003) ->nn.Linear:
"""Init uniform parameters on the single layer"""
layer.weight.data.uniform_(-init_w, init_w)
layer.bias.data.uniform_(-init_w, init_w)
return layer
class MLP(nn.Module):
"""Baseline of Multilayer perceptron.
Attributes:
input_size (int): size of input
output_size (int): size of output layer
hidden_sizes (list): sizes of hidden layers
hidden_activation (function): activation function of hidden layers
output_activation (function): activation function of output layer
hidden_layers (list): list containing linear layers
use_output_layer (bool): whether or not to use the last layer
n_category (int): category number (-1 if the action is continuous)
"""
def __init__(self, input_size: 'int', output_size: 'int', hidden_sizes:
'list', hidden_activation: 'Callable'=F.relu, output_activation:
'Callable'=identity, linear_layer: 'nn.Module'=nn.Linear,
use_output_layer: 'bool'=True, n_category: 'int'=-1, init_fn:
'Callable'=init_layer_uniform):
"""Initialize.
Args:
input_size (int): size of input
output_size (int): size of output layer
hidden_sizes (list): number of hidden layers
hidden_activation (function): activation function of hidden layers
output_activation (function): activation function of output layer
linear_layer (nn.Module): linear layer of mlp
use_output_layer (bool): whether or not to use the last layer
n_category (int): category number (-1 if the action is continuous)
init_fn (Callable): weight initialization function bound for the last layer
"""
super(MLP, self).__init__()
self.hidden_sizes = hidden_sizes
self.input_size = input_size
self.output_size = output_size
self.hidden_activation = hidden_activation
self.output_activation = output_activation
self.linear_layer = linear_layer
self.use_output_layer = use_output_layer
self.n_category = n_category
self.hidden_layers: 'list' = []
in_size = self.input_size
for i, next_size in enumerate(hidden_sizes):
fc = self.linear_layer(in_size, next_size)
in_size = next_size
self.__setattr__('hidden_fc{}'.format(i), fc)
self.hidden_layers.append(fc)
if self.use_output_layer:
self.output_layer = self.linear_layer(in_size, output_size)
self.output_layer = init_fn(self.output_layer)
else:
self.output_layer = identity
self.output_activation = identity
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
"""Forward method implementation."""
for hidden_layer in self.hidden_layers:
x = self.hidden_activation(hidden_layer(x))
x = self.output_activation(self.output_layer(x))
return x
class NoisyMLPHandler:
"""Includes methods to handle noisy linear."""
def reset_noise(self):
"""Re-sample noise"""
for _, module in self.named_children():
module.reset_noise()
class DuelingMLP(MLP, NoisyMLPHandler):
"""Multilayer perceptron with dueling construction."""
def __init__(self, input_size: 'int', output_size: 'int', hidden_sizes:
'list', hidden_activation: 'Callable'=F.relu, linear_layer:
'nn.Module'=nn.Linear, init_fn: 'Callable'=init_layer_uniform):
"""Initialize."""
super(DuelingMLP, self).__init__(input_size=input_size, output_size
=output_size, hidden_sizes=hidden_sizes, hidden_activation=
hidden_activation, linear_layer=linear_layer, use_output_layer=
False)
in_size = hidden_sizes[-1]
self.advantage_hidden_layer = self.linear_layer(in_size, in_size)
self.advantage_layer = self.linear_layer(in_size, output_size)
self.advantage_layer = init_fn(self.advantage_layer)
self.value_hidden_layer = self.linear_layer(in_size, in_size)
self.value_layer = self.linear_layer(in_size, 1)
self.value_layer = init_fn(self.value_layer)
def _forward_dueling(self, x: 'torch.Tensor') ->torch.Tensor:
adv_x = self.hidden_activation(self.advantage_hidden_layer(x))
val_x = self.hidden_activation(self.value_hidden_layer(x))
advantage = self.advantage_layer(adv_x)
value = self.value_layer(val_x)
advantage_mean = advantage.mean(dim=-1, keepdim=True)
q = value + advantage - advantage_mean
return q
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
"""Forward method implementation."""
x = super(DuelingMLP, self).forward(x)
x = self._forward_dueling(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'output_size': 4, 'hidden_sizes': [4, 4]}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.functional as F
from typing import Callable
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_mean_sub_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + x2, xmask)
tmp6 = tl.load(in_ptr2 + 4 * x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr2 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr2 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp3 = tmp0 + tmp2
tmp5 = tmp3 + tmp4
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tmp12 = tmp10 + tmp11
tmp13 = 4.0
tmp14 = tmp12 / tmp13
tmp15 = tmp5 - tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (1, 4), (4, 1))
assert_size_stride(primals_13, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf14, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3,
primals_5, buf13, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4)
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf5)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf6,
primals_7, buf12, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_11, reinterpret_tensor(buf6, (64, 4),
(4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf7)
del primals_11
buf8 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf8,
primals_9, buf11, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_9
buf9 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf8, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_12, (4, 1), (1, 4), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mean_sub_1[grid(256)](buf9, primals_13, buf7,
buf10, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf7
del buf9
del primals_13
return (buf10, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(
buf3, (64, 4), (4, 1), 0), reinterpret_tensor(buf6, (64, 4), (4, 1),
0), reinterpret_tensor(buf8, (64, 4), (4, 1), 0), primals_12,
primals_10, buf11, primals_8, buf12, primals_6, buf13, primals_4, buf14
)
def identity(x: 'torch.Tensor') ->torch.Tensor:
"""Return input without any change."""
return x
def init_layer_uniform(layer: 'nn.Linear', init_w: 'float'=0.003) ->nn.Linear:
"""Init uniform parameters on the single layer"""
layer.weight.data.uniform_(-init_w, init_w)
layer.bias.data.uniform_(-init_w, init_w)
return layer
class MLP(nn.Module):
"""Baseline of Multilayer perceptron.
Attributes:
input_size (int): size of input
output_size (int): size of output layer
hidden_sizes (list): sizes of hidden layers
hidden_activation (function): activation function of hidden layers
output_activation (function): activation function of output layer
hidden_layers (list): list containing linear layers
use_output_layer (bool): whether or not to use the last layer
n_category (int): category number (-1 if the action is continuous)
"""
def __init__(self, input_size: 'int', output_size: 'int', hidden_sizes:
'list', hidden_activation: 'Callable'=F.relu, output_activation:
'Callable'=identity, linear_layer: 'nn.Module'=nn.Linear,
use_output_layer: 'bool'=True, n_category: 'int'=-1, init_fn:
'Callable'=init_layer_uniform):
"""Initialize.
Args:
input_size (int): size of input
output_size (int): size of output layer
hidden_sizes (list): number of hidden layers
hidden_activation (function): activation function of hidden layers
output_activation (function): activation function of output layer
linear_layer (nn.Module): linear layer of mlp
use_output_layer (bool): whether or not to use the last layer
n_category (int): category number (-1 if the action is continuous)
init_fn (Callable): weight initialization function bound for the last layer
"""
super(MLP, self).__init__()
self.hidden_sizes = hidden_sizes
self.input_size = input_size
self.output_size = output_size
self.hidden_activation = hidden_activation
self.output_activation = output_activation
self.linear_layer = linear_layer
self.use_output_layer = use_output_layer
self.n_category = n_category
self.hidden_layers: 'list' = []
in_size = self.input_size
for i, next_size in enumerate(hidden_sizes):
fc = self.linear_layer(in_size, next_size)
in_size = next_size
self.__setattr__('hidden_fc{}'.format(i), fc)
self.hidden_layers.append(fc)
if self.use_output_layer:
self.output_layer = self.linear_layer(in_size, output_size)
self.output_layer = init_fn(self.output_layer)
else:
self.output_layer = identity
self.output_activation = identity
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
"""Forward method implementation."""
for hidden_layer in self.hidden_layers:
x = self.hidden_activation(hidden_layer(x))
x = self.output_activation(self.output_layer(x))
return x
class NoisyMLPHandler:
"""Includes methods to handle noisy linear."""
def reset_noise(self):
"""Re-sample noise"""
for _, module in self.named_children():
module.reset_noise()
class DuelingMLPNew(MLP, NoisyMLPHandler):
"""Multilayer perceptron with dueling construction."""
def __init__(self, input_size: 'int', output_size: 'int', hidden_sizes:
'list', hidden_activation: 'Callable'=F.relu, linear_layer:
'nn.Module'=nn.Linear, init_fn: 'Callable'=init_layer_uniform):
"""Initialize."""
super(DuelingMLPNew, self).__init__(input_size=input_size,
output_size=output_size, hidden_sizes=hidden_sizes,
hidden_activation=hidden_activation, linear_layer=linear_layer,
use_output_layer=False)
in_size = hidden_sizes[-1]
self.advantage_hidden_layer = self.linear_layer(in_size, in_size)
self.advantage_layer = self.linear_layer(in_size, output_size)
self.advantage_layer = init_fn(self.advantage_layer)
self.value_hidden_layer = self.linear_layer(in_size, in_size)
self.value_layer = self.linear_layer(in_size, 1)
self.value_layer = init_fn(self.value_layer)
def _forward_dueling(self, x: 'torch.Tensor') ->torch.Tensor:
adv_x = self.hidden_activation(self.advantage_hidden_layer(x))
val_x = self.hidden_activation(self.value_hidden_layer(x))
advantage = self.advantage_layer(adv_x)
value = self.value_layer(val_x)
advantage_mean = advantage.mean(dim=-1, keepdim=True)
q = value + advantage - advantage_mean
return q
def forward(self, input_0):
primals_1 = self.hidden_fc0.weight
primals_2 = self.hidden_fc0.bias
primals_4 = self.hidden_fc1.weight
primals_5 = self.hidden_fc1.bias
primals_6 = self.advantage_hidden_layer.weight
primals_7 = self.advantage_hidden_layer.bias
primals_8 = self.advantage_layer.weight
primals_9 = self.advantage_layer.bias
primals_10 = self.value_hidden_layer.weight
primals_11 = self.value_hidden_layer.bias
primals_12 = self.value_layer.weight
primals_13 = self.value_layer.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
|
MrSyee/rl_algorithms
|
DuelingMLP
| false
| 5,619
|
[
"MIT"
] | 1
|
5b5276982032f8a8a614b9466849b7b3ef245b3e
|
https://github.com/MrSyee/rl_algorithms/tree/5b5276982032f8a8a614b9466849b7b3ef245b3e
|
RollLayer
|
import torch
class RollLayer(torch.nn.Module):
"""
Layer which shifts the dimensions for performing the coupling permutations
on different dimensions
"""
def __init__(self, shift):
super(RollLayer, self).__init__()
self.shift = shift
def forward(self, x):
return torch.cat((torch.roll(x[:, :-1], self.shift, dims=-1), x[:,
-1:]), axis=-1)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'shift': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 3, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + (2 + x0) % 3), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 4, tl.int64)
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
class RollLayerNew(torch.nn.Module):
"""
Layer which shifts the dimensions for performing the coupling permutations
on different dimensions
"""
def __init__(self, shift):
super(RollLayerNew, self).__init__()
self.shift = shift
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
NGoetz/NF
|
RollLayer
| false
| 5,620
|
[
"MIT"
] | 1
|
935886db48f4675db1a2c42f7c264b12d5014ed8
|
https://github.com/NGoetz/NF/tree/935886db48f4675db1a2c42f7c264b12d5014ed8
|
CocoLinear
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class CocoLinear(nn.Module):
"""Congenerous Cosine linear module (for CoCo loss)
Parameters
----------
nfeat : int
Embedding dimension
nclass : int
Number of classes
alpha : float
Scaling factor used in embedding L2-normalization
"""
def __init__(self, nfeat, nclass, alpha):
super(CocoLinear, self).__init__()
self.alpha = alpha
self.centers = nn.Parameter(torch.randn(nclass, nfeat))
def forward(self, x):
"""Apply the angular margin transformation
Parameters
----------
x : `torch.Tensor`
an embedding batch
Returns
-------
fX : `torch.Tensor`
logits after the congenerous cosine transformation
"""
cnorm = F.normalize(self.centers)
xnorm = self.alpha * F.normalize(x)
logits = torch.matmul(xnorm, torch.transpose(cnorm, 0, 1))
return logits
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'nfeat': 4, 'nclass': 4, 'alpha': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tmp16 = 4.0
tmp17 = tmp15 * tmp16
tl.store(out_ptr0 + x3, tmp17, xmask)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_mul_0[grid(256)](primals_2, buf0, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_div_1[grid(16)](primals_1, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2)
del buf1
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_1, reinterpret_tensor(buf0, (64, 4), (4, 1), 0)
class CocoLinearNew(nn.Module):
"""Congenerous Cosine linear module (for CoCo loss)
Parameters
----------
nfeat : int
Embedding dimension
nclass : int
Number of classes
alpha : float
Scaling factor used in embedding L2-normalization
"""
def __init__(self, nfeat, nclass, alpha):
super(CocoLinearNew, self).__init__()
self.alpha = alpha
self.centers = nn.Parameter(torch.randn(nclass, nfeat))
def forward(self, input_0):
primals_1 = self.centers
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
Mymoza/pyannote-audio
|
CocoLinear
| false
| 5,621
|
[
"MIT"
] | 1
|
9ac612ee6b854a1a65c3d8992856550304969674
|
https://github.com/Mymoza/pyannote-audio/tree/9ac612ee6b854a1a65c3d8992856550304969674
|
SelfAttnMatch
|
import math
import torch
import torch.nn.functional as F
import torch.nn as nn
class GELU(nn.Module):
def forward(self, x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x +
0.044715 * torch.pow(x, 3))))
class SelfAttnMatch(nn.Module):
"""Given sequences X and Y, match sequence Y to each element in X.
* o_i = sum(alpha_j * x_j) for i in X
* alpha_j = softmax(x_j * x_i)
"""
def __init__(self, input_size, identity=False, diag=True):
super(SelfAttnMatch, self).__init__()
if not identity:
self.linear = nn.Linear(input_size, input_size)
else:
self.linear = None
self.diag = diag
self.gelu = GELU()
def forward(self, inputs):
"""
Args:
x: batch * len1 * dim1
x_mask: batch * len1 (1 for padding, 0 for true)
Output:
matched_seq: batch * len1 * dim1
"""
if self.linear:
x_proj = self.linear(inputs)
x_proj = self.gelu(x_proj)
else:
x_proj = inputs
scores = x_proj.bmm(x_proj.transpose(2, 1))
if not self.diag:
x_len = inputs.size(1)
for i in range(x_len):
scores[:, i, i] = 0
alpha = F.softmax(scores, dim=2)
matched_seq = alpha.bmm(inputs)
return matched_seq, alpha.sum(dim=1)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_pow_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tmp0 * tmp0
tmp4 = tmp3 * tmp0
tmp5 = 0.044715
tmp6 = tmp4 * tmp5
tmp7 = tmp0 + tmp6
tmp8 = 0.7978845608028654
tmp9 = tmp7 * tmp8
tmp10 = libdevice.tanh(tmp9)
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp2 * tmp12
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_sum_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_pow_tanh_0[grid(64)](buf0, buf1, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf1, reinterpret_tensor(buf1, (4, 4, 4), (16, 1,
4), 0), out=buf2)
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(64)](buf2, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf4 = buf2
del buf2
triton_poi_fused__softmax_2[grid(64)](buf3, buf4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf5 = buf3
del buf3
extern_kernels.bmm(buf4, primals_3, out=buf5)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_sum_3[grid(16)](buf4, buf6, 16, XBLOCK=16,
num_warps=1, num_stages=1)
return buf5, buf6, primals_3, buf0, reinterpret_tensor(buf1, (4, 4, 4),
(16, 1, 4), 0), buf4
class GELU(nn.Module):
def forward(self, x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x +
0.044715 * torch.pow(x, 3))))
class SelfAttnMatchNew(nn.Module):
"""Given sequences X and Y, match sequence Y to each element in X.
* o_i = sum(alpha_j * x_j) for i in X
* alpha_j = softmax(x_j * x_i)
"""
def __init__(self, input_size, identity=False, diag=True):
super(SelfAttnMatchNew, self).__init__()
if not identity:
self.linear = nn.Linear(input_size, input_size)
else:
self.linear = None
self.diag = diag
self.gelu = GELU()
def forward(self, input_0):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0], output[1]
|
MobtgZhang/MWMLNet
|
SelfAttnMatch
| false
| 5,622
|
[
"MIT"
] | 1
|
125bb39935916b6b4be505c51cb6a04eb49b96d0
|
https://github.com/MobtgZhang/MWMLNet/tree/125bb39935916b6b4be505c51cb6a04eb49b96d0
|
Merge
|
import torch
import torch.nn as nn
import torch.optim
class Merge(nn.Module):
def __init__(self, hidden_size, embedding_size, dropout=0.5):
super(Merge, self).__init__()
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.em_dropout = nn.Dropout(dropout)
self.merge = nn.Linear(hidden_size * 2 + embedding_size, hidden_size)
self.merge_g = nn.Linear(hidden_size * 2 + embedding_size, hidden_size)
def forward(self, node_embedding, sub_tree_1, sub_tree_2):
sub_tree_1 = self.em_dropout(sub_tree_1)
sub_tree_2 = self.em_dropout(sub_tree_2)
node_embedding = self.em_dropout(node_embedding)
sub_tree = torch.tanh(self.merge(torch.cat((node_embedding,
sub_tree_1, sub_tree_2), 1)))
sub_tree_g = torch.sigmoid(self.merge_g(torch.cat((node_embedding,
sub_tree_1, sub_tree_2), 1)))
sub_tree = sub_tree * sub_tree_g
return sub_tree
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4, 'embedding_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 12
x1 = xindex // 12
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tl.full([1], 12, tl.int64)
tmp14 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp11 & xmask,
eviction_policy='evict_last', other=0.0)
tmp15 = tl.where(tmp9, tmp10, tmp14)
tmp16 = tl.where(tmp4, tmp5, tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_tanh_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tmp3 = tl.sigmoid(tmp2)
tmp4 = tmp1 * tmp3
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 12), (12, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 12), (12, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(48)](primals_3, primals_1, primals_2,
buf0, 48, XBLOCK=64, num_warps=1, num_stages=1)
del primals_1
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, buf0, reinterpret_tensor(primals_4,
(12, 4), (1, 12), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, buf0, reinterpret_tensor(primals_6,
(12, 4), (1, 12), 0), alpha=1, beta=1, out=buf2)
del primals_6
del primals_7
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_mul_sigmoid_tanh_1[grid(16)](buf1, buf2, buf3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
return buf3, buf0, buf1, buf2
class MergeNew(nn.Module):
def __init__(self, hidden_size, embedding_size, dropout=0.5):
super(MergeNew, self).__init__()
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.em_dropout = nn.Dropout(dropout)
self.merge = nn.Linear(hidden_size * 2 + embedding_size, hidden_size)
self.merge_g = nn.Linear(hidden_size * 2 + embedding_size, hidden_size)
def forward(self, input_0, input_1, input_2):
primals_4 = self.merge.weight
primals_5 = self.merge.bias
primals_6 = self.merge_g.weight
primals_7 = self.merge_g.bias
primals_1 = input_0
primals_2 = input_1
primals_3 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
Myeongchan-Kim/SVAMP
|
Merge
| false
| 5,623
|
[
"MIT"
] | 1
|
9ff9ad471a61aa390199df4b99beb3b654f5c943
|
https://github.com/Myeongchan-Kim/SVAMP/tree/9ff9ad471a61aa390199df4b99beb3b654f5c943
|
MultiLabelSoftMarginLoss
|
import torch
from torch.nn.modules.loss import _WeightedLoss
import torch.nn.parallel
import torch.optim
import torch.utils.data
def binary_cross_entropy(input, target, eps=1e-10):
"""if not (target.size() == input.size()):
warnings.warn("Using a target size ({}) that is different to the input size ({}) is deprecated. "
"Please ensure they have the same size.".format(target.size(), input.size()))
if input.nelement() != target.nelement():
raise ValueError("Target and input must have the same number of elements. target nelement ({}) "
"!= input nelement ({})".format(target.nelement(), input.nelement()))
if weight is not None:
new_size = _infer_size(target.size(), weight.size())
weight = weight.expand(new_size)
if torch.is_tensor(weight):
weight = Variable(weight)"""
input = torch.sigmoid(input)
return -(target * torch.log(input + eps) + (1 - target) * torch.log(1 -
input + eps))
class MultiLabelSoftMarginLoss(_WeightedLoss):
def forward(self, input, target):
return binary_cross_entropy(input, target)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch.nn.modules.loss import _WeightedLoss
import torch.nn.parallel
import torch.optim
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_log_mul_neg_rsub_sigmoid_0(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tl.sigmoid(tmp1)
tmp3 = 1e-10
tmp4 = tmp2 + tmp3
tmp5 = tl_math.log(tmp4)
tmp6 = tmp0 * tmp5
tmp7 = 1.0
tmp8 = tmp7 - tmp0
tmp9 = tmp7 - tmp2
tmp10 = tmp9 + tmp3
tmp11 = tl_math.log(tmp10)
tmp12 = tmp8 * tmp11
tmp13 = tmp6 + tmp12
tmp14 = -tmp13
tl.store(out_ptr0 + x0, tmp14, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_log_mul_neg_rsub_sigmoid_0[grid(256)](arg1_1,
arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
def binary_cross_entropy(input, target, eps=1e-10):
"""if not (target.size() == input.size()):
warnings.warn("Using a target size ({}) that is different to the input size ({}) is deprecated. "
"Please ensure they have the same size.".format(target.size(), input.size()))
if input.nelement() != target.nelement():
raise ValueError("Target and input must have the same number of elements. target nelement ({}) "
"!= input nelement ({})".format(target.nelement(), input.nelement()))
if weight is not None:
new_size = _infer_size(target.size(), weight.size())
weight = weight.expand(new_size)
if torch.is_tensor(weight):
weight = Variable(weight)"""
input = torch.sigmoid(input)
return -(target * torch.log(input + eps) + (1 - target) * torch.log(1 -
input + eps))
class MultiLabelSoftMarginLossNew(_WeightedLoss):
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
NIRVANALAN/microscopy
|
MultiLabelSoftMarginLoss
| false
| 5,624
|
[
"MIT"
] | 1
|
4e48e51ebb11d8af44b71e8b497cc5da3b097c9b
|
https://github.com/NIRVANALAN/microscopy/tree/4e48e51ebb11d8af44b71e8b497cc5da3b097c9b
|
Reshape
|
import torch
class Reshape(torch.nn.Module):
"""
Reshaping layer
"""
def __init__(self, shapes1, shapes2):
super(Reshape, self).__init__()
self.shapes = shapes1, shapes2
def forward(self, tensor):
return torch.reshape(tensor.clone(), (tensor.shape[0], self.shapes[
0], self.shapes[1]))
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'shapes1': 4, 'shapes2': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tl.store(out_ptr0 + x0, tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
class ReshapeNew(torch.nn.Module):
"""
Reshaping layer
"""
def __init__(self, shapes1, shapes2):
super(ReshapeNew, self).__init__()
self.shapes = shapes1, shapes2
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
NGoetz/NF
|
Reshape
| false
| 5,625
|
[
"MIT"
] | 1
|
935886db48f4675db1a2c42f7c264b12d5014ed8
|
https://github.com/NGoetz/NF/tree/935886db48f4675db1a2c42f7c264b12d5014ed8
|
Lift
|
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.init import kaiming_normal
def ZeroInitializer(param):
shape = param.size()
init = np.zeros(shape).astype(np.float32)
param.data.set_(torch.from_numpy(init))
def Linear(initializer=kaiming_normal, bias_initializer=ZeroInitializer):
class CustomLinear(nn.Linear):
def reset_parameters(self):
initializer(self.weight)
if self.bias is not None:
bias_initializer(self.bias)
return CustomLinear
class Lift(nn.Module):
def __init__(self, in_features, out_features):
super(Lift, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.lift = Linear()(self.in_features, self.out_features * 2)
def forward(self, input):
return F.tanh(self.lift(input))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import numpy as np
import torch.nn as nn
from torch.nn.init import kaiming_normal
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (8, 4), (4, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 8), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 8), (128, 32, 8, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(512)](buf1, primals_2, 512, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_2
return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1
def ZeroInitializer(param):
shape = param.size()
init = np.zeros(shape).astype(np.float32)
param.data.set_(torch.from_numpy(init))
def Linear(initializer=kaiming_normal, bias_initializer=ZeroInitializer):
class CustomLinear(nn.Linear):
def reset_parameters(self):
initializer(self.weight)
if self.bias is not None:
bias_initializer(self.bias)
return CustomLinear
class LiftNew(nn.Module):
def __init__(self, in_features, out_features):
super(LiftNew, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.lift = Linear()(self.in_features, self.out_features * 2)
def forward(self, input_0):
primals_1 = self.lift.weight
primals_2 = self.lift.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
NLP-Discourse-SoochowU/rst_dp2019Bottom2Up
|
Lift
| false
| 5,626
|
[
"MIT"
] | 1
|
ac1624127c9c8a3301685193ac8239357e01f6ca
|
https://github.com/NLP-Discourse-SoochowU/rst_dp2019Bottom2Up/tree/ac1624127c9c8a3301685193ac8239357e01f6ca
|
Attention
|
from torch.nn import Module
import torch
from torch.nn.modules import Module
from torch.nn.functional import softmax
from torch.nn import Linear
def neginf(dtype):
"""
Return a representable finite
number near -inf for a dtype.
"""
if dtype is torch.float16:
return -65504
else:
return -1e+20
class Attention(Module):
"""
Luong style general attention from
https://arxiv.org/pdf/1508.04025.pdf.
"""
def __init__(self, hidden_size):
super().__init__()
self.project = Linear(in_features=hidden_size, out_features=
hidden_size, bias=False)
self.combine = Linear(in_features=hidden_size * 2, out_features=
hidden_size, bias=False)
def forward(self, decoder_output, hidden_state, encoder_outputs,
attn_mask=None):
"""
Applies attention by creating the weighted
context vector. Implementation is based on
`IBM/pytorch-seq2seq`.
"""
hidden_state = self.project(hidden_state)
hidden_state = hidden_state.transpose(0, 1)
encoder_outputs_t = encoder_outputs.transpose(1, 2)
attn_scores = torch.bmm(hidden_state, encoder_outputs_t)
if attn_mask is not None:
attn_scores = attn_scores.squeeze(1)
attn_scores.masked_fill_(attn_mask, neginf(attn_scores.dtype))
attn_scores = attn_scores.unsqueeze(1)
attn_weights = softmax(attn_scores, dim=-1)
attn_applied = torch.bmm(attn_weights, encoder_outputs)
stacked = torch.cat([decoder_output, attn_applied], dim=-1)
outputs = self.combine(stacked)
return outputs, attn_weights
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch.nn import Module
from torch.nn.modules import Module
from torch.nn import Linear
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4, 8), (8, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (4, 16, 1),
0), reinterpret_tensor(primals_3, (4, 4, 4), (16, 1, 4), 0),
out=buf1)
buf2 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = buf1
del buf1
triton_poi_fused__softmax_1[grid(64)](buf2, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf4 = buf2
del buf2
extern_kernels.bmm(buf3, primals_3, out=buf4)
buf5 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
triton_poi_fused_cat_2[grid(128)](primals_4, buf4, buf5, 128,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_4
buf6 = reinterpret_tensor(buf4, (16, 4), (4, 1), 0)
del buf4
extern_kernels.mm(reinterpret_tensor(buf5, (16, 8), (8, 1), 0),
reinterpret_tensor(primals_5, (8, 4), (1, 8), 0), out=buf6)
return reinterpret_tensor(buf6, (4, 4, 4), (16, 4, 1), 0
), buf3, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_3, (4, 4, 4), (16, 1, 4), 0
), buf3, reinterpret_tensor(buf5, (16, 8), (8, 1), 0), primals_5
def neginf(dtype):
"""
Return a representable finite
number near -inf for a dtype.
"""
if dtype is torch.float16:
return -65504
else:
return -1e+20
class AttentionNew(Module):
"""
Luong style general attention from
https://arxiv.org/pdf/1508.04025.pdf.
"""
def __init__(self, hidden_size):
super().__init__()
self.project = Linear(in_features=hidden_size, out_features=
hidden_size, bias=False)
self.combine = Linear(in_features=hidden_size * 2, out_features=
hidden_size, bias=False)
def forward(self, input_0, input_1, input_2):
primals_1 = self.project.weight
primals_5 = self.combine.weight
primals_2 = input_0
primals_3 = input_1
primals_4 = input_2
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1]
|
Mrpatekful/supervised-translation
|
Attention
| false
| 5,627
|
[
"MIT"
] | 1
|
d03db6a0fc25900fd42b8057a12adad0b8d025f8
|
https://github.com/Mrpatekful/supervised-translation/tree/d03db6a0fc25900fd42b8057a12adad0b8d025f8
|
GCN
|
from torch.nn import Module
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
import torch.optim
class GraphConvolution(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, bias=True):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input, adj):
support = torch.matmul(input, self.weight)
output = torch.matmul(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
class GCN(nn.Module):
def __init__(self, in_feat_dim, nhid, out_feat_dim, dropout):
super(GCN, self).__init__()
"""
## Inputs:
- graph_nodes (batch_size, K, in_feat_dim): input features
- adjacency matrix (batch_size, K, K)
## Returns:
- gcn_enhance_feature (batch_size, K, out_feat_dim)
"""
self.gc1 = GraphConvolution(in_feat_dim, nhid)
self.gc2 = GraphConvolution(nhid, out_feat_dim)
self.dropout = dropout
def forward(self, x, adj):
x = F.relu(self.gc1(x, adj))
x = F.dropout(x, self.dropout, training=self.training)
x = self.gc2(x, adj)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_feat_dim': 4, 'nhid': 4, 'out_feat_dim': 4, 'dropout':
0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch.nn import Module
import math
import torch.nn as nn
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_3, (16, 4, 4), (16, 4,
1), 0), reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0),
out=buf1)
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_add_relu_threshold_backward_0[grid(256)](buf2,
primals_4, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_4
buf3 = buf0
del buf0
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0),
primals_5, out=buf3)
buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_3, (16, 4, 4), (16, 4,
1), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0),
out=buf4)
del buf3
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
triton_poi_fused_add_1[grid(256)](buf5, primals_6, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_6
return buf5, reinterpret_tensor(primals_3, (16, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf2, (4, 64), (1, 4), 0), reinterpret_tensor(
primals_5, (4, 4), (1, 4), 0), buf6, reinterpret_tensor(primals_2,
(4, 64), (1, 4), 0)
class GraphConvolution(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, bias=True):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input, adj):
support = torch.matmul(input, self.weight)
output = torch.matmul(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
class GCNNew(nn.Module):
def __init__(self, in_feat_dim, nhid, out_feat_dim, dropout):
super(GCNNew, self).__init__()
"""
## Inputs:
- graph_nodes (batch_size, K, in_feat_dim): input features
- adjacency matrix (batch_size, K, K)
## Returns:
- gcn_enhance_feature (batch_size, K, out_feat_dim)
"""
self.gc1 = GraphConvolution(in_feat_dim, nhid)
self.gc2 = GraphConvolution(nhid, out_feat_dim)
self.dropout = dropout
def forward(self, input_0, input_1):
primals_1 = self.gc1.weight
primals_4 = self.gc1.bias
primals_5 = self.gc2.weight
primals_6 = self.gc2.bias
primals_2 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
Myeongchan-Kim/SVAMP
|
GCN
| false
| 5,628
|
[
"MIT"
] | 1
|
9ff9ad471a61aa390199df4b99beb3b654f5c943
|
https://github.com/Myeongchan-Kim/SVAMP/tree/9ff9ad471a61aa390199df4b99beb3b654f5c943
|
Autoencoder
|
import torch
import torch.nn as nn
import torch.nn.functional as F
def Conv(in_channels, out_channels):
return nn.Conv2d(in_channels, out_channels, 3, padding=1)
def concat(a, b):
return torch.cat((a, b), 1)
def pool(x):
return F.max_pool2d(x, 2, 2)
def relu(x):
return F.relu(x, inplace=True)
def upsample(x):
return F.interpolate(x, scale_factor=2, mode='nearest')
class Autoencoder(nn.Module):
def __init__(self, in_channels=3, out_channels=3):
super(Autoencoder, self).__init__()
ic = in_channels
ec1 = 32
ec2 = 48
ec3 = 64
ec4 = 80
ec5 = 112
dc5 = 160
dc4 = 112
dc3 = 96
dc2 = 64
dc1a = 64
dc1b = 32
oc = out_channels
self.enc_conv0 = Conv(ic, ec1)
self.enc_conv1 = Conv(ec1, ec1)
self.enc_conv2 = Conv(ec1, ec2)
self.enc_conv3 = Conv(ec2, ec3)
self.enc_conv4 = Conv(ec3, ec4)
self.enc_conv5 = Conv(ec4, ec5)
self.dec_conv5a = Conv(ec5 + ec4, dc5)
self.dec_conv5b = Conv(dc5, dc5)
self.dec_conv4a = Conv(dc5 + ec3, dc4)
self.dec_conv4b = Conv(dc4, dc4)
self.dec_conv3a = Conv(dc4 + ec2, dc3)
self.dec_conv3b = Conv(dc3, dc3)
self.dec_conv2a = Conv(dc3 + ec1, dc2)
self.dec_conv2b = Conv(dc2, dc2)
self.dec_conv1a = Conv(dc2 + ic, dc1a)
self.dec_conv1b = Conv(dc1a, dc1b)
self.dec_conv0 = Conv(dc1b, oc)
def forward(self, input):
x = relu(self.enc_conv0(input))
x = relu(self.enc_conv1(x))
x = pool1 = pool(x)
x = relu(self.enc_conv2(x))
x = pool2 = pool(x)
x = relu(self.enc_conv3(x))
x = pool3 = pool(x)
x = relu(self.enc_conv4(x))
x = pool4 = pool(x)
x = relu(self.enc_conv5(x))
x = pool(x)
x = upsample(x)
x = concat(x, pool4)
x = relu(self.dec_conv5a(x))
x = relu(self.dec_conv5b(x))
x = upsample(x)
x = concat(x, pool3)
x = relu(self.dec_conv4a(x))
x = relu(self.dec_conv4b(x))
x = upsample(x)
x = concat(x, pool2)
x = relu(self.dec_conv3a(x))
x = relu(self.dec_conv3b(x))
x = upsample(x)
x = concat(x, pool1)
x = relu(self.dec_conv2a(x))
x = relu(self.dec_conv2b(x))
x = upsample(x)
x = concat(x, input)
x = relu(self.dec_conv1a(x))
x = relu(self.dec_conv1b(x))
x = self.dec_conv0(x)
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 48
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), None, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 80
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_7(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 5120
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 16 * x1), xmask, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (8 + 2 * x0 + 16 * x1), xmask, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (9 + 2 * x0 + 16 * x1), xmask, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 7168
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 112
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_9(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 1792
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1), xmask, eviction_policy
='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_10(out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_cat_11(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 16 % 192
x1 = xindex // 4 % 4
x0 = xindex % 4
x3 = xindex // 3072
x4 = xindex % 16
x5 = xindex
tmp0 = x2
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 112, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp6 = tl.full([XBLOCK], 2, tl.int32)
tmp7 = tmp5 + tmp6
tmp8 = tmp5 < 0
tmp9 = tl.where(tmp8, tmp7, tmp5)
tmp10 = tl.load(in_ptr0 + x0, tmp4, eviction_policy='evict_last', other=0.0
)
tmp11 = tmp10 + tmp6
tmp12 = tmp10 < 0
tmp13 = tl.where(tmp12, tmp11, tmp10)
tmp14 = tl.load(in_ptr1 + (2 * tmp13 + 8 * tmp9 + 16 * x2 + 1792 * x3),
tmp4, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr1 + (1 + 2 * tmp13 + 8 * tmp9 + 16 * x2 + 1792 *
x3), tmp4, eviction_policy='evict_last', other=0.0)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp17 = tl.load(in_ptr1 + (4 + 2 * tmp13 + 8 * tmp9 + 16 * x2 + 1792 *
x3), tmp4, eviction_policy='evict_last', other=0.0)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.load(in_ptr1 + (5 + 2 * tmp13 + 8 * tmp9 + 16 * x2 + 1792 *
x3), tmp4, eviction_policy='evict_last', other=0.0)
tmp20 = triton_helpers.maximum(tmp19, tmp18)
tmp21 = tl.full(tmp20.shape, 0.0, tmp20.dtype)
tmp22 = tl.where(tmp4, tmp20, tmp21)
tmp23 = tmp0 >= tmp3
tl.full([1], 192, tl.int64)
tmp26 = tl.load(in_ptr2 + (x4 + 16 * (-112 + x2) + 1280 * x3), tmp23,
other=0.0)
tmp27 = tl.where(tmp4, tmp22, tmp26)
tl.store(out_ptr0 + x5, tmp27, None)
@triton.jit
def triton_poi_fused_convolution_relu_12(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 160
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_13(out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_cat_14(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 64 % 224
x1 = xindex // 8 % 8
x0 = xindex % 8
x3 = xindex // 14336
x4 = xindex % 64
x5 = xindex
tmp0 = x2
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 160, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp6 = tl.full([XBLOCK], 4, tl.int32)
tmp7 = tmp5 + tmp6
tmp8 = tmp5 < 0
tmp9 = tl.where(tmp8, tmp7, tmp5)
tmp10 = tl.load(in_ptr0 + x0, tmp4, eviction_policy='evict_last', other=0.0
)
tmp11 = tmp10 + tmp6
tmp12 = tmp10 < 0
tmp13 = tl.where(tmp12, tmp11, tmp10)
tmp14 = tl.load(in_ptr1 + (tmp13 + 4 * tmp9 + 16 * x2 + 2560 * x3),
tmp4, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr2 + x2, tmp4, eviction_policy='evict_last', other=0.0
)
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp4, tmp18, tmp19)
tmp21 = tmp0 >= tmp3
tl.full([1], 224, tl.int64)
tmp24 = tl.load(in_ptr3 + (x4 + 64 * (-160 + x2) + 4096 * x3), tmp21,
other=0.0)
tmp25 = tl.where(tmp4, tmp20, tmp24)
tl.store(out_ptr0 + x5, tmp25, None)
@triton.jit
def triton_poi_fused_convolution_relu_15(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 112
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_16(out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_cat_17(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 256 % 160
x1 = xindex // 16 % 16
x0 = xindex % 16
x3 = xindex // 40960
x4 = xindex % 256
x5 = xindex
tmp0 = x2
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 112, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp6 = tl.full([XBLOCK], 8, tl.int32)
tmp7 = tmp5 + tmp6
tmp8 = tmp5 < 0
tmp9 = tl.where(tmp8, tmp7, tmp5)
tmp10 = tl.load(in_ptr0 + x0, tmp4, eviction_policy='evict_last', other=0.0
)
tmp11 = tmp10 + tmp6
tmp12 = tmp10 < 0
tmp13 = tl.where(tmp12, tmp11, tmp10)
tmp14 = tl.load(in_ptr1 + (tmp13 + 8 * tmp9 + 64 * x2 + 7168 * x3),
tmp4, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr2 + x2, tmp4, eviction_policy='evict_last', other=0.0
)
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp4, tmp18, tmp19)
tmp21 = tmp0 >= tmp3
tl.full([1], 160, tl.int64)
tmp24 = tl.load(in_ptr3 + (x4 + 256 * (-112 + x2) + 12288 * x3), tmp21,
other=0.0)
tmp25 = tl.where(tmp4, tmp20, tmp24)
tl.store(out_ptr0 + x5, tmp25, None)
@triton.jit
def triton_poi_fused_convolution_relu_18(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 96
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_19(out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_cat_20(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 1024 % 128
x1 = xindex // 32 % 32
x0 = xindex % 32
x3 = xindex // 131072
x4 = xindex % 1024
x5 = xindex
tmp0 = x2
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 96, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp6 = tl.full([XBLOCK], 16, tl.int32)
tmp7 = tmp5 + tmp6
tmp8 = tmp5 < 0
tmp9 = tl.where(tmp8, tmp7, tmp5)
tmp10 = tl.load(in_ptr0 + x0, tmp4, eviction_policy='evict_last', other=0.0
)
tmp11 = tmp10 + tmp6
tmp12 = tmp10 < 0
tmp13 = tl.where(tmp12, tmp11, tmp10)
tmp14 = tl.load(in_ptr1 + (tmp13 + 16 * tmp9 + 256 * x2 + 24576 * x3),
tmp4, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr2 + x2, tmp4, eviction_policy='evict_last', other=0.0
)
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp4, tmp18, tmp19)
tmp21 = tmp0 >= tmp3
tl.full([1], 128, tl.int64)
tmp24 = tl.load(in_ptr3 + (x4 + 1024 * (-96 + x2) + 32768 * x3), tmp21,
other=0.0)
tmp25 = tl.where(tmp4, tmp20, tmp24)
tl.store(out_ptr0 + x5, tmp25, None)
@triton.jit
def triton_poi_fused_convolution_relu_21(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_22(out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_cat_23(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 4096 % 67
x1 = xindex // 64 % 64
x0 = xindex % 64
x3 = xindex // 274432
x4 = xindex % 4096
x5 = xindex
tmp0 = x2
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp6 = tl.full([XBLOCK], 32, tl.int32)
tmp7 = tmp5 + tmp6
tmp8 = tmp5 < 0
tmp9 = tl.where(tmp8, tmp7, tmp5)
tmp10 = tl.load(in_ptr0 + x0, tmp4, eviction_policy='evict_last', other=0.0
)
tmp11 = tmp10 + tmp6
tmp12 = tmp10 < 0
tmp13 = tl.where(tmp12, tmp11, tmp10)
tmp14 = tl.load(in_ptr1 + (tmp13 + 32 * tmp9 + 1024 * x2 + 65536 * x3),
tmp4, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr2 + x2, tmp4, eviction_policy='evict_last', other=0.0
)
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp4, tmp18, tmp19)
tmp21 = tmp0 >= tmp3
tl.full([1], 67, tl.int64)
tmp24 = tl.load(in_ptr3 + (x4 + 4096 * (-64 + x2) + 12288 * x3), tmp21,
other=0.0)
tmp25 = tl.where(tmp4, tmp20, tmp24)
tl.store(out_ptr0 + x5, tmp25, None)
@triton.jit
def triton_poi_fused_convolution_relu_24(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_25(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 3
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_26(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_27(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 96
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_28(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 112
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_29(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 160
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32,
primals_33, primals_34, primals_35) = args
args.clear()
assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (48, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_7, (48,), (1,))
assert_size_stride(primals_8, (64, 48, 3, 3), (432, 9, 3, 1))
assert_size_stride(primals_9, (64,), (1,))
assert_size_stride(primals_10, (80, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_11, (80,), (1,))
assert_size_stride(primals_12, (112, 80, 3, 3), (720, 9, 3, 1))
assert_size_stride(primals_13, (112,), (1,))
assert_size_stride(primals_14, (160, 192, 3, 3), (1728, 9, 3, 1))
assert_size_stride(primals_15, (160,), (1,))
assert_size_stride(primals_16, (160, 160, 3, 3), (1440, 9, 3, 1))
assert_size_stride(primals_17, (160,), (1,))
assert_size_stride(primals_18, (112, 224, 3, 3), (2016, 9, 3, 1))
assert_size_stride(primals_19, (112,), (1,))
assert_size_stride(primals_20, (112, 112, 3, 3), (1008, 9, 3, 1))
assert_size_stride(primals_21, (112,), (1,))
assert_size_stride(primals_22, (96, 160, 3, 3), (1440, 9, 3, 1))
assert_size_stride(primals_23, (96,), (1,))
assert_size_stride(primals_24, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_25, (96,), (1,))
assert_size_stride(primals_26, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_27, (64,), (1,))
assert_size_stride(primals_28, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_29, (64,), (1,))
assert_size_stride(primals_30, (64, 67, 3, 3), (603, 9, 3, 1))
assert_size_stride(primals_31, (64,), (1,))
assert_size_stride(primals_32, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_33, (32,), (1,))
assert_size_stride(primals_34, (3, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_35, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(524288)](buf1, primals_2,
524288, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_0[grid(524288)](buf3, primals_5,
524288, XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1),
torch.float32)
buf5 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(131072)](buf3, buf4,
buf5, 131072, XBLOCK=512, num_warps=8, num_stages=1)
buf6 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 48, 32, 32), (49152, 1024, 32, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_relu_2[grid(196608)](buf7, primals_7,
196608, XBLOCK=512, num_warps=8, num_stages=1)
del primals_7
buf8 = empty_strided_cuda((4, 48, 16, 16), (12288, 256, 16, 1),
torch.float32)
buf9 = empty_strided_cuda((4, 48, 16, 16), (12288, 256, 16, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_3[grid(49152)](buf7, buf8,
buf9, 49152, XBLOCK=512, num_warps=4, num_stages=1)
buf10 = extern_kernels.convolution(buf8, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 64, 16, 16), (16384, 256, 16, 1))
buf11 = buf10
del buf10
triton_poi_fused_convolution_relu_4[grid(65536)](buf11, primals_9,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_9
buf12 = empty_strided_cuda((4, 64, 8, 8), (4096, 64, 8, 1), torch.
float32)
buf13 = empty_strided_cuda((4, 64, 8, 8), (4096, 64, 8, 1), torch.int8)
triton_poi_fused_max_pool2d_with_indices_5[grid(16384)](buf11,
buf12, buf13, 16384, XBLOCK=256, num_warps=4, num_stages=1)
buf14 = extern_kernels.convolution(buf12, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 80, 8, 8), (5120, 64, 8, 1))
buf15 = buf14
del buf14
triton_poi_fused_convolution_relu_6[grid(20480)](buf15, primals_11,
20480, XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
buf16 = empty_strided_cuda((4, 80, 4, 4), (1280, 16, 4, 1), torch.
float32)
buf17 = empty_strided_cuda((4, 80, 4, 4), (1280, 16, 4, 1), torch.int8)
triton_poi_fused_max_pool2d_with_indices_7[grid(5120)](buf15, buf16,
buf17, 5120, XBLOCK=256, num_warps=4, num_stages=1)
buf18 = extern_kernels.convolution(buf16, primals_12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 112, 4, 4), (1792, 16, 4, 1))
buf19 = buf18
del buf18
triton_poi_fused_convolution_relu_8[grid(7168)](buf19, primals_13,
7168, XBLOCK=256, num_warps=4, num_stages=1)
del primals_13
buf20 = empty_strided_cuda((4, 112, 2, 2), (448, 4, 2, 1), torch.int8)
triton_poi_fused_max_pool2d_with_indices_9[grid(1792)](buf19, buf20,
1792, XBLOCK=128, num_warps=4, num_stages=1)
buf21 = empty_strided_cuda((4,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_10[grid(4)](buf21, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf22 = empty_strided_cuda((4, 192, 4, 4), (3072, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_11[grid(12288)](buf21, buf19, buf16, buf22,
12288, XBLOCK=256, num_warps=4, num_stages=1)
buf23 = extern_kernels.convolution(buf22, primals_14, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf23, (4, 160, 4, 4), (2560, 16, 4, 1))
buf24 = buf23
del buf23
triton_poi_fused_convolution_relu_12[grid(10240)](buf24, primals_15,
10240, XBLOCK=128, num_warps=4, num_stages=1)
del primals_15
buf25 = extern_kernels.convolution(buf24, primals_16, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf25, (4, 160, 4, 4), (2560, 16, 4, 1))
buf26 = empty_strided_cuda((8,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_13[grid(8)](buf26, 8,
XBLOCK=8, num_warps=1, num_stages=1)
buf27 = empty_strided_cuda((4, 224, 8, 8), (14336, 64, 8, 1), torch
.float32)
triton_poi_fused_cat_14[grid(57344)](buf26, buf25, primals_17,
buf12, buf27, 57344, XBLOCK=512, num_warps=4, num_stages=1)
buf28 = extern_kernels.convolution(buf27, primals_18, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 112, 8, 8), (7168, 64, 8, 1))
buf29 = buf28
del buf28
triton_poi_fused_convolution_relu_15[grid(28672)](buf29, primals_19,
28672, XBLOCK=256, num_warps=4, num_stages=1)
del primals_19
buf30 = extern_kernels.convolution(buf29, primals_20, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf30, (4, 112, 8, 8), (7168, 64, 8, 1))
buf31 = empty_strided_cuda((16,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_16[grid(16)](buf31, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf32 = empty_strided_cuda((4, 160, 16, 16), (40960, 256, 16, 1),
torch.float32)
triton_poi_fused_cat_17[grid(163840)](buf31, buf30, primals_21,
buf8, buf32, 163840, XBLOCK=512, num_warps=8, num_stages=1)
buf33 = extern_kernels.convolution(buf32, primals_22, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf33, (4, 96, 16, 16), (24576, 256, 16, 1))
buf34 = buf33
del buf33
triton_poi_fused_convolution_relu_18[grid(98304)](buf34, primals_23,
98304, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_23
buf35 = extern_kernels.convolution(buf34, primals_24, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf35, (4, 96, 16, 16), (24576, 256, 16, 1))
buf36 = empty_strided_cuda((32,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_19[grid(32)](buf36, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf37 = empty_strided_cuda((4, 128, 32, 32), (131072, 1024, 32, 1),
torch.float32)
triton_poi_fused_cat_20[grid(524288)](buf36, buf35, primals_25,
buf4, buf37, 524288, XBLOCK=1024, num_warps=4, num_stages=1)
buf38 = extern_kernels.convolution(buf37, primals_26, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf38, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf39 = buf38
del buf38
triton_poi_fused_convolution_relu_21[grid(262144)](buf39,
primals_27, 262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_27
buf40 = extern_kernels.convolution(buf39, primals_28, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf40, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf41 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_22[grid(64)](buf41, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf42 = empty_strided_cuda((4, 67, 64, 64), (274432, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_23[grid(1097728)](buf41, buf40, primals_29,
primals_3, buf42, 1097728, XBLOCK=1024, num_warps=4, num_stages=1)
buf43 = extern_kernels.convolution(buf42, primals_30, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf43, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf44 = buf43
del buf43
triton_poi_fused_convolution_relu_24[grid(1048576)](buf44,
primals_31, 1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_31
buf45 = extern_kernels.convolution(buf44, primals_32, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf45, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf46 = buf45
del buf45
triton_poi_fused_convolution_relu_0[grid(524288)](buf46, primals_33,
524288, XBLOCK=512, num_warps=8, num_stages=1)
del primals_33
buf47 = extern_kernels.convolution(buf46, primals_34, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf47, (4, 3, 64, 64), (12288, 4096, 64, 1))
buf48 = buf47
del buf47
triton_poi_fused_convolution_25[grid(49152)](buf48, primals_35,
49152, XBLOCK=512, num_warps=4, num_stages=1)
del primals_35
buf49 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_26[grid(262144)](
buf40, primals_29, buf49, 262144, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf40
del primals_29
buf50 = empty_strided_cuda((4, 96, 16, 16), (24576, 256, 16, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_27[grid(98304)](
buf35, primals_25, buf50, 98304, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf35
del primals_25
buf51 = empty_strided_cuda((4, 112, 8, 8), (7168, 64, 8, 1), torch.bool
)
triton_poi_fused_convolution_relu_threshold_backward_28[grid(28672)](
buf30, primals_21, buf51, 28672, XBLOCK=256, num_warps=4,
num_stages=1)
del buf30
del primals_21
buf52 = empty_strided_cuda((4, 160, 4, 4), (2560, 16, 4, 1), torch.bool
)
triton_poi_fused_convolution_relu_threshold_backward_29[grid(10240)](
buf25, primals_17, buf52, 10240, XBLOCK=256, num_warps=4,
num_stages=1)
del buf25
del primals_17
return (buf48, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, primals_12, primals_14, primals_16, primals_18,
primals_20, primals_22, primals_24, primals_26, primals_28,
primals_30, primals_32, primals_34, buf1, buf3, buf4, buf5, buf7,
buf8, buf9, buf11, buf12, buf13, buf15, buf16, buf17, buf19, buf20,
buf21, buf22, buf24, buf26, buf27, buf29, buf31, buf32, buf34,
buf36, buf37, buf39, buf41, buf42, buf44, buf46, buf49, buf50,
buf51, buf52)
def Conv(in_channels, out_channels):
return nn.Conv2d(in_channels, out_channels, 3, padding=1)
def concat(a, b):
return torch.cat((a, b), 1)
def pool(x):
return F.max_pool2d(x, 2, 2)
def relu(x):
return F.relu(x, inplace=True)
def upsample(x):
return F.interpolate(x, scale_factor=2, mode='nearest')
class AutoencoderNew(nn.Module):
def __init__(self, in_channels=3, out_channels=3):
super(AutoencoderNew, self).__init__()
ic = in_channels
ec1 = 32
ec2 = 48
ec3 = 64
ec4 = 80
ec5 = 112
dc5 = 160
dc4 = 112
dc3 = 96
dc2 = 64
dc1a = 64
dc1b = 32
oc = out_channels
self.enc_conv0 = Conv(ic, ec1)
self.enc_conv1 = Conv(ec1, ec1)
self.enc_conv2 = Conv(ec1, ec2)
self.enc_conv3 = Conv(ec2, ec3)
self.enc_conv4 = Conv(ec3, ec4)
self.enc_conv5 = Conv(ec4, ec5)
self.dec_conv5a = Conv(ec5 + ec4, dc5)
self.dec_conv5b = Conv(dc5, dc5)
self.dec_conv4a = Conv(dc5 + ec3, dc4)
self.dec_conv4b = Conv(dc4, dc4)
self.dec_conv3a = Conv(dc4 + ec2, dc3)
self.dec_conv3b = Conv(dc3, dc3)
self.dec_conv2a = Conv(dc3 + ec1, dc2)
self.dec_conv2b = Conv(dc2, dc2)
self.dec_conv1a = Conv(dc2 + ic, dc1a)
self.dec_conv1b = Conv(dc1a, dc1b)
self.dec_conv0 = Conv(dc1b, oc)
def forward(self, input_0):
primals_1 = self.enc_conv0.weight
primals_2 = self.enc_conv0.bias
primals_4 = self.enc_conv1.weight
primals_5 = self.enc_conv1.bias
primals_6 = self.enc_conv2.weight
primals_7 = self.enc_conv2.bias
primals_8 = self.enc_conv3.weight
primals_9 = self.enc_conv3.bias
primals_10 = self.enc_conv4.weight
primals_11 = self.enc_conv4.bias
primals_12 = self.enc_conv5.weight
primals_13 = self.enc_conv5.bias
primals_14 = self.dec_conv5a.weight
primals_15 = self.dec_conv5a.bias
primals_16 = self.dec_conv5b.weight
primals_17 = self.dec_conv5b.bias
primals_18 = self.dec_conv4a.weight
primals_19 = self.dec_conv4a.bias
primals_20 = self.dec_conv4b.weight
primals_21 = self.dec_conv4b.bias
primals_22 = self.dec_conv3a.weight
primals_23 = self.dec_conv3a.bias
primals_24 = self.dec_conv3b.weight
primals_25 = self.dec_conv3b.bias
primals_26 = self.dec_conv2a.weight
primals_27 = self.dec_conv2a.bias
primals_28 = self.dec_conv2b.weight
primals_29 = self.dec_conv2b.bias
primals_30 = self.dec_conv1a.weight
primals_31 = self.dec_conv1a.bias
primals_32 = self.dec_conv1b.weight
primals_33 = self.dec_conv1b.bias
primals_34 = self.dec_conv0.weight
primals_35 = self.dec_conv0.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33, primals_34,
primals_35])
return output[0]
|
LongerVision/oidn
|
Autoencoder
| false
| 5,629
|
[
"Apache-2.0"
] | 1
|
2f9e59f8b747b217f78c5c274f4f2bff347a03a7
|
https://github.com/LongerVision/oidn/tree/2f9e59f8b747b217f78c5c274f4f2bff347a03a7
|
MLP
|
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.init import kaiming_normal
def ZeroInitializer(param):
shape = param.size()
init = np.zeros(shape).astype(np.float32)
param.data.set_(torch.from_numpy(init))
def Linear(initializer=kaiming_normal, bias_initializer=ZeroInitializer):
class CustomLinear(nn.Linear):
def reset_parameters(self):
initializer(self.weight)
if self.bias is not None:
bias_initializer(self.bias)
return CustomLinear
class LayerNormalization(nn.Module):
def __init__(self, hidden_size, eps=1e-05):
super(LayerNormalization, self).__init__()
self.eps = eps
self.a2 = nn.Parameter(torch.ones(1, hidden_size), requires_grad=True)
self.b2 = nn.Parameter(torch.zeros(1, hidden_size), requires_grad=True)
def forward(self, z):
mu = torch.mean(z)
sigma = torch.std(z)
ln_out = (z - mu) / (sigma + self.eps)
ln_out = ln_out * self.a2 + self.b2
return ln_out
class MLP(nn.Module):
def __init__(self, mlp_input_dim, mlp_dim, num_classes, num_mlp_layers,
mlp_ln, classifier_dropout_rate=0.0):
super(MLP, self).__init__()
self.num_mlp_layers = num_mlp_layers
self.mlp_ln = mlp_ln
self.classifier_dropout_rate = classifier_dropout_rate
features_dim = mlp_input_dim
if mlp_ln:
self.ln_inp = LayerNormalization(mlp_input_dim)
for i in range(num_mlp_layers):
setattr(self, 'l{}'.format(i), Linear()(features_dim, mlp_dim))
if mlp_ln:
setattr(self, 'ln{}'.format(i), LayerNormalization(mlp_dim))
features_dim = mlp_dim
setattr(self, 'l{}'.format(num_mlp_layers), Linear()(features_dim,
num_classes))
def forward(self, h):
if self.mlp_ln:
h = self.ln_inp(h)
h = F.dropout(h, self.classifier_dropout_rate, training=self.training)
for i in range(self.num_mlp_layers):
layer = getattr(self, 'l{}'.format(i))
h = layer(h)
h = F.relu(h)
if self.mlp_ln:
ln = getattr(self, 'ln{}'.format(i))
h = ln(h)
h = F.dropout(h, self.classifier_dropout_rate, training=self.
training)
layer = getattr(self, 'l{}'.format(self.num_mlp_layers))
y = layer(h)
return y
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'mlp_input_dim': 4, 'mlp_dim': 4, 'num_classes': 4,
'num_mlp_layers': 1, 'mlp_ln': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import numpy as np
import torch.nn as nn
from torch.nn.init import kaiming_normal
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_div_mean_mul_std_sub_0(in_out_ptr0, in_out_ptr1,
in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
r1 = rindex % 4
tmp0 = tl.load(in_ptr0 + r0, None)
tmp25 = tl.load(in_ptr1 + r1, None, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr2 + r1, None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0))
tmp5 = tl.broadcast_to(tmp1, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = tl.full([1], 256, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 256.0
tmp17 = tmp3 / tmp16
tmp18 = 255.0
tmp19 = tmp15 / tmp18
tmp20 = libdevice.sqrt(tmp19)
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = tmp0 - tmp17
tmp24 = tmp23 / tmp22
tmp26 = tmp24 * tmp25
tmp28 = tmp26 + tmp27
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None)
tl.debug_barrier()
tl.store(in_out_ptr1 + tl.full([1], 0, tl.int32), tmp22, None)
tl.store(out_ptr0 + tl.broadcast_to(r0, [RBLOCK]), tmp28, None)
@triton.jit
def triton_per_fused_add_div_mean_mul_relu_std_sub_1(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
r1 = rindex % 4
tmp0 = tl.load(in_ptr0 + r0, None)
tmp27 = tl.load(in_ptr1 + r1, None, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr2 + r1, None, eviction_policy='evict_last')
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp7 = tl.broadcast_to(tmp3, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = tl.full([1], 256, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [RBLOCK])
tmp17 = triton_helpers.promote_to_tensor(tl.sum(tmp15, 0))
tmp18 = 256.0
tmp19 = tmp5 / tmp18
tmp20 = 255.0
tmp21 = tmp17 / tmp20
tmp22 = libdevice.sqrt(tmp21)
tmp23 = tmp2 - tmp19
tmp24 = 1e-05
tmp25 = tmp22 + tmp24
tmp26 = tmp23 / tmp25
tmp28 = tmp26 * tmp27
tmp30 = tmp28 + tmp29
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp19, None)
tl.debug_barrier()
tl.store(in_out_ptr1 + tl.full([1], 0, tl.int32), tmp22, None)
tl.store(out_ptr0 + tl.broadcast_to(r0, [RBLOCK]), tmp30, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4), (4, 1))
assert_size_stride(primals_3, (1, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (1, 4), (4, 1))
assert_size_stride(primals_7, (1, 4), (4, 1))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
buf5 = buf3
del buf3
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_add_div_mean_mul_std_sub_0[grid(1)](buf1, buf5,
primals_1, primals_2, primals_3, buf6, 1, 256, num_warps=2,
num_stages=1)
del primals_2
del primals_3
buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf6, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf7)
del primals_5
buf8 = empty_strided_cuda((), (), torch.float32)
buf11 = empty_strided_cuda((), (), torch.float32)
buf9 = buf8
del buf8
buf13 = buf11
del buf11
buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_per_fused_add_div_mean_mul_relu_std_sub_1[grid(1)](buf9,
buf13, buf7, primals_6, primals_7, buf14, 1, 256, num_warps=2,
num_stages=1)
del primals_7
buf15 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf14, (64, 4),
(4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf15)
del primals_9
return reinterpret_tensor(buf15, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_1, primals_6, buf1, buf5, reinterpret_tensor(buf6, (64,
4), (4, 1), 0), buf7, buf9, buf13, reinterpret_tensor(buf14, (64, 4
), (4, 1), 0), primals_8, primals_4
def ZeroInitializer(param):
shape = param.size()
init = np.zeros(shape).astype(np.float32)
param.data.set_(torch.from_numpy(init))
def Linear(initializer=kaiming_normal, bias_initializer=ZeroInitializer):
class CustomLinear(nn.Linear):
def reset_parameters(self):
initializer(self.weight)
if self.bias is not None:
bias_initializer(self.bias)
return CustomLinear
class LayerNormalization(nn.Module):
def __init__(self, hidden_size, eps=1e-05):
super(LayerNormalization, self).__init__()
self.eps = eps
self.a2 = nn.Parameter(torch.ones(1, hidden_size), requires_grad=True)
self.b2 = nn.Parameter(torch.zeros(1, hidden_size), requires_grad=True)
def forward(self, z):
mu = torch.mean(z)
sigma = torch.std(z)
ln_out = (z - mu) / (sigma + self.eps)
ln_out = ln_out * self.a2 + self.b2
return ln_out
class MLPNew(nn.Module):
def __init__(self, mlp_input_dim, mlp_dim, num_classes, num_mlp_layers,
mlp_ln, classifier_dropout_rate=0.0):
super(MLPNew, self).__init__()
self.num_mlp_layers = num_mlp_layers
self.mlp_ln = mlp_ln
self.classifier_dropout_rate = classifier_dropout_rate
features_dim = mlp_input_dim
if mlp_ln:
self.ln_inp = LayerNormalization(mlp_input_dim)
for i in range(num_mlp_layers):
setattr(self, 'l{}'.format(i), Linear()(features_dim, mlp_dim))
if mlp_ln:
setattr(self, 'ln{}'.format(i), LayerNormalization(mlp_dim))
features_dim = mlp_dim
setattr(self, 'l{}'.format(num_mlp_layers), Linear()(features_dim,
num_classes))
def forward(self, input_0):
primals_2 = self.ln_inp.a2
primals_3 = self.ln_inp.b2
primals_4 = self.l0.weight
primals_5 = self.l0.bias
primals_6 = self.ln0.a2
primals_7 = self.ln0.b2
primals_8 = self.l1.weight
primals_9 = self.l1.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
NLP-Discourse-SoochowU/rst_dp2019Bottom2Up
|
MLP
| false
| 5,630
|
[
"MIT"
] | 1
|
ac1624127c9c8a3301685193ac8239357e01f6ca
|
https://github.com/NLP-Discourse-SoochowU/rst_dp2019Bottom2Up/tree/ac1624127c9c8a3301685193ac8239357e01f6ca
|
PEM
|
from _paritybench_helpers import _mock_config
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import init
import torch.nn.parallel
class PEM(torch.nn.Module):
def __init__(self, opt):
super(PEM, self).__init__()
self.feat_dim = opt['pem_feat_dim']
self.batch_size = opt['pem_batch_size']
self.hidden_dim = opt['pem_hidden_dim']
self.u_ratio_m = opt['pem_u_ratio_m']
self.u_ratio_l = opt['pem_u_ratio_l']
self.output_dim = 1
self.pem_best_loss = 1000000
self.fc1 = torch.nn.Linear(in_features=self.feat_dim, out_features=
self.hidden_dim, bias=True)
self.fc2 = torch.nn.Linear(in_features=self.hidden_dim,
out_features=self.output_dim, bias=True)
self.reset_params()
@staticmethod
def weight_init(m):
if isinstance(m, nn.Conv2d):
init.xavier_uniform_(m.weight)
init.constant(m.bias, 0)
def reset_params(self):
for i, m in enumerate(self.modules()):
self.weight_init(m)
def forward(self, x):
x = F.relu(0.1 * self.fc1(x))
x = torch.sigmoid(0.1 * self.fc2(x))
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'opt': _mock_config(pem_feat_dim=4, pem_batch_size=4,
pem_hidden_dim=4, pem_u_ratio_m=4, pem_u_ratio_l=4)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from torch.nn import init
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.1
tmp4 = tmp2 * tmp3
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = 0.0
tmp8 = tmp6 <= tmp7
tl.store(in_out_ptr0 + x2, tmp6, xmask)
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = 0.1
tmp5 = tmp3 * tmp4
tmp6 = tl.sigmoid(tmp5)
tl.store(in_out_ptr0 + x0, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 4), (4, 1))
assert_size_stride(primals_5, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_mul_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf2
triton_poi_fused_mul_sigmoid_1[grid(64)](buf3, primals_5, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_5
return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf3, primals_4, buf4
class PEMNew(torch.nn.Module):
def __init__(self, opt):
super(PEMNew, self).__init__()
self.feat_dim = opt['pem_feat_dim']
self.batch_size = opt['pem_batch_size']
self.hidden_dim = opt['pem_hidden_dim']
self.u_ratio_m = opt['pem_u_ratio_m']
self.u_ratio_l = opt['pem_u_ratio_l']
self.output_dim = 1
self.pem_best_loss = 1000000
self.fc1 = torch.nn.Linear(in_features=self.feat_dim, out_features=
self.hidden_dim, bias=True)
self.fc2 = torch.nn.Linear(in_features=self.hidden_dim,
out_features=self.output_dim, bias=True)
self.reset_params()
@staticmethod
def weight_init(m):
if isinstance(m, nn.Conv2d):
init.xavier_uniform_(m.weight)
init.constant(m.bias, 0)
def reset_params(self):
for i, m in enumerate(self.modules()):
self.weight_init(m)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
NEUdeep/BSN
|
PEM
| false
| 5,631
|
[
"MIT"
] | 1
|
e987cc159976ebe54027b562d833a92a5aadf864
|
https://github.com/NEUdeep/BSN/tree/e987cc159976ebe54027b562d833a92a5aadf864
|
FocalLoss2d
|
import torch
from torch import nn
class FocalLoss2d(nn.Module):
def __init__(self, gamma=2, ignore_index=255):
super().__init__()
self.gamma = gamma
self.ignore_index = ignore_index
def forward(self, outputs, targets):
outputs = outputs.contiguous()
targets = targets.contiguous()
eps = 1e-08
non_ignored = targets.view(-1) != self.ignore_index
targets = targets.view(-1)[non_ignored].float()
outputs = outputs.contiguous().view(-1)[non_ignored]
outputs = torch.clamp(outputs, eps, 1.0 - eps)
targets = torch.clamp(targets, eps, 1.0 - eps)
pt = (1 - targets) * (1 - outputs) + targets * outputs
return (-(1.0 - pt) ** self.gamma * torch.log(pt)).mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_ne_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 255.0
tmp2 = tmp0 != tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((256,), (1,), torch.bool)
get_raw_stream(0)
triton_poi_fused_ne_0[grid(256)](arg1_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
return reinterpret_tensor(arg1_1, (256,), (1,), 0), buf0, arg0_1
class FocalLoss2dNew(nn.Module):
def __init__(self, gamma=2, ignore_index=255):
super().__init__()
self.gamma = gamma
self.ignore_index = ignore_index
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Nareshvrao/Understanding-Clouds-from-Satellite-Images
|
FocalLoss2d
| false
| 5,632
|
[
"MIT"
] | 1
|
14c5e1f15e803e9638d7a3fa8b9e0d929a6015b6
|
https://github.com/Nareshvrao/Understanding-Clouds-from-Satellite-Images/tree/14c5e1f15e803e9638d7a3fa8b9e0d929a6015b6
|
LayerNormalization
|
import torch
import torch.nn as nn
class LayerNormalization(nn.Module):
def __init__(self, hidden_size, eps=1e-05):
super(LayerNormalization, self).__init__()
self.eps = eps
self.a2 = nn.Parameter(torch.ones(1, hidden_size), requires_grad=True)
self.b2 = nn.Parameter(torch.zeros(1, hidden_size), requires_grad=True)
def forward(self, z):
mu = torch.mean(z)
sigma = torch.std(z)
ln_out = (z - mu) / (sigma + self.eps)
ln_out = ln_out * self.a2 + self.b2
return ln_out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mean_mul_std_sub_0(in_out_ptr0, in_out_ptr1,
in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
r1 = rindex % 4
tmp0 = tl.load(in_ptr0 + r0, None)
tmp25 = tl.load(in_ptr1 + r1, None, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr2 + r1, None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0))
tmp5 = tl.broadcast_to(tmp1, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = tl.full([1], 256, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 256.0
tmp17 = tmp3 / tmp16
tmp18 = 255.0
tmp19 = tmp15 / tmp18
tmp20 = libdevice.sqrt(tmp19)
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = tmp0 - tmp17
tmp24 = tmp23 / tmp22
tmp26 = tmp24 * tmp25
tmp28 = tmp26 + tmp27
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None)
tl.debug_barrier()
tl.store(in_out_ptr1 + tl.full([1], 0, tl.int32), tmp22, None)
tl.store(out_ptr0 + tl.broadcast_to(r0, [RBLOCK]), tmp28, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4), (4, 1))
assert_size_stride(primals_3, (1, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
buf5 = buf3
del buf3
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_add_div_mean_mul_std_sub_0[grid(1)](buf1, buf5,
primals_1, primals_2, primals_3, buf6, 1, 256, num_warps=2,
num_stages=1)
del primals_2
del primals_3
return buf6, primals_1, buf1, buf5
class LayerNormalizationNew(nn.Module):
def __init__(self, hidden_size, eps=1e-05):
super(LayerNormalizationNew, self).__init__()
self.eps = eps
self.a2 = nn.Parameter(torch.ones(1, hidden_size), requires_grad=True)
self.b2 = nn.Parameter(torch.zeros(1, hidden_size), requires_grad=True)
def forward(self, input_0):
primals_2 = self.a2
primals_3 = self.b2
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
NLP-Discourse-SoochowU/rst_dp2019Bottom2Up
|
LayerNormalization
| false
| 5,633
|
[
"MIT"
] | 1
|
ac1624127c9c8a3301685193ac8239357e01f6ca
|
https://github.com/NLP-Discourse-SoochowU/rst_dp2019Bottom2Up/tree/ac1624127c9c8a3301685193ac8239357e01f6ca
|
TEM
|
from _paritybench_helpers import _mock_config
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import init
import torch.nn.parallel
class TEM(torch.nn.Module):
def __init__(self, opt):
super(TEM, self).__init__()
self.feat_dim = opt['tem_feat_dim']
self.temporal_dim = opt['temporal_scale']
self.batch_size = opt['tem_batch_size']
self.c_hidden = opt['tem_hidden_dim']
self.tem_best_loss = 10000000
self.output_dim = 3
self.conv1 = torch.nn.Conv1d(in_channels=self.feat_dim,
out_channels=self.c_hidden, kernel_size=3, stride=1, padding=1,
groups=1)
self.conv2 = torch.nn.Conv1d(in_channels=self.c_hidden,
out_channels=self.c_hidden, kernel_size=3, stride=1, padding=1,
groups=1)
self.conv3 = torch.nn.Conv1d(in_channels=self.c_hidden,
out_channels=self.output_dim, kernel_size=1, stride=1, padding=0)
self.reset_params()
@staticmethod
def weight_init(m):
if isinstance(m, nn.Conv2d):
init.xavier_normal(m.weight)
init.constant(m.bias, 0)
def reset_params(self):
for i, m in enumerate(self.modules()):
self.weight_init(m)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = torch.sigmoid(0.01 * self.conv3(x))
return x
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'opt': _mock_config(tem_feat_dim=4, temporal_scale=1.0,
tem_batch_size=4, tem_hidden_dim=4)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from torch.nn import init
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 12
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.01
tmp4 = tmp2 * tmp3
tmp5 = tl.sigmoid(tmp4)
tl.store(in_out_ptr0 + x2, tmp5, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3), (12, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 3), (12, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (3, 4, 1), (4, 1, 1))
assert_size_stride(primals_7, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1,
4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(1,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf0, (1, 4, 4), (16, 4, 1))
buf1 = reinterpret_tensor(buf0, (4, 4), (4, 1), 0)
del buf0
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(16)](buf1,
primals_2, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (1, 4, 4
), (0, 4, 1), 0), primals_4, stride=(1,), padding=(1,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf2, (1, 4, 4), (16, 4, 1))
buf3 = reinterpret_tensor(buf2, (4, 4), (4, 1), 0)
del buf2
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(16)](buf3,
primals_5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(reinterpret_tensor(buf3, (1, 4, 4
), (0, 4, 1), 0), primals_6, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf4, (1, 3, 4), (12, 4, 1))
buf5 = reinterpret_tensor(buf4, (3, 4), (4, 1), 0)
del buf4
triton_poi_fused_mul_sigmoid_1[grid(12)](buf5, primals_7, 12,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_7
return buf5, primals_1, primals_4, primals_6, reinterpret_tensor(primals_3,
(1, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (1, 4, 4), (16,
4, 1), 0), reinterpret_tensor(buf3, (1, 4, 4), (16, 4, 1), 0
), buf5, buf6, buf7
class TEMNew(torch.nn.Module):
def __init__(self, opt):
super(TEMNew, self).__init__()
self.feat_dim = opt['tem_feat_dim']
self.temporal_dim = opt['temporal_scale']
self.batch_size = opt['tem_batch_size']
self.c_hidden = opt['tem_hidden_dim']
self.tem_best_loss = 10000000
self.output_dim = 3
self.conv1 = torch.nn.Conv1d(in_channels=self.feat_dim,
out_channels=self.c_hidden, kernel_size=3, stride=1, padding=1,
groups=1)
self.conv2 = torch.nn.Conv1d(in_channels=self.c_hidden,
out_channels=self.c_hidden, kernel_size=3, stride=1, padding=1,
groups=1)
self.conv3 = torch.nn.Conv1d(in_channels=self.c_hidden,
out_channels=self.output_dim, kernel_size=1, stride=1, padding=0)
self.reset_params()
@staticmethod
def weight_init(m):
if isinstance(m, nn.Conv2d):
init.xavier_normal(m.weight)
init.constant(m.bias, 0)
def reset_params(self):
for i, m in enumerate(self.modules()):
self.weight_init(m)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
NEUdeep/BSN
|
TEM
| false
| 5,634
|
[
"MIT"
] | 1
|
e987cc159976ebe54027b562d833a92a5aadf864
|
https://github.com/NEUdeep/BSN/tree/e987cc159976ebe54027b562d833a92a5aadf864
|
WeightedBCE
|
import torch
from torch import nn
import torch.nn.functional as F
class WeightedBCE(nn.Module):
def __init__(self, weights=None):
super(WeightedBCE, self).__init__()
self.weights = weights
def forward(self, logit, truth):
batch_size, num_class = truth.shape
logit = logit.view(batch_size, num_class)
truth = truth.view(batch_size, num_class)
assert logit.shape == truth.shape
loss = F.binary_cross_entropy_with_logits(logit, truth, reduction=
'none')
if self.weights is None:
loss = loss.mean()
else:
pos = (truth > 0.5).float()
neg = (truth < 0.5).float()
pos_sum = pos.sum().item() + 1e-12
neg_sum = neg.sum().item() + 1e-12
loss = (self.weights[1] * pos * loss / pos_sum + self.weights[0
] * neg * loss / neg_sum).sum()
return loss
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_binary_cross_entropy_with_logits_mean_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.sum(tmp13, 1)[:, None]
tmp16 = 16.0
tmp17 = tmp15 / tmp16
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp17, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_binary_cross_entropy_with_logits_mean_0[grid(1)](buf1,
arg0_1, arg1_1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class WeightedBCENew(nn.Module):
def __init__(self, weights=None):
super(WeightedBCENew, self).__init__()
self.weights = weights
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Nareshvrao/Understanding-Clouds-from-Satellite-Images
|
WeightedBCE
| false
| 5,635
|
[
"MIT"
] | 1
|
14c5e1f15e803e9638d7a3fa8b9e0d929a6015b6
|
https://github.com/Nareshvrao/Understanding-Clouds-from-Satellite-Images/tree/14c5e1f15e803e9638d7a3fa8b9e0d929a6015b6
|
CNNCifar
|
from _paritybench_helpers import _mock_config
import torch
from torch import nn
import torch.nn.functional as F
class CNNCifar(nn.Module):
def __init__(self, args):
super(CNNCifar, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, args.num_classes)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return F.log_softmax(x, dim=1)
def get_inputs():
return [torch.rand([4, 3, 32, 32])]
def get_init_inputs():
return [[], {'args': _mock_config(num_classes=4)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 18816
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 784 % 6
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4704
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 14
x3 = xindex // 14
x2 = xindex // 1176
x4 = xindex % 1176
tmp0 = tl.load(in_ptr0 + (2 * x0 + 56 * x3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 56 * x3), xmask, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (28 + 2 * x0 + 56 * x3), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (29 + 2 * x0 + 56 * x3), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x4 + 1184 * x2), tmp6, xmask)
tl.store(out_ptr1 + (x4 + 1280 * x2), tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 100 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = xindex // 5
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 20 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 20 * x1), xmask, eviction_policy
='evict_last')
tmp7 = tl.load(in_ptr0 + (10 + 2 * x0 + 20 * x1), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (11 + 2 * x0 + 20 * x1), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x2, tmp15, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 480
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 120
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 336
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 84
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__log_softmax_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (6, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (6,), (1,))
assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1))
assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (120, 400), (400, 1))
assert_size_stride(primals_7, (120,), (1,))
assert_size_stride(primals_8, (84, 120), (120, 1))
assert_size_stride(primals_9, (84,), (1,))
assert_size_stride(primals_10, (4, 84), (84, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 6, 28, 28), (4704, 784, 28, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(18816)](buf1, primals_2,
18816, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 6, 14, 14), (1184, 196, 14, 1), torch
.float32)
buf3 = empty_strided_cuda((4, 6, 14, 14), (1280, 196, 14, 1), torch
.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(4704)](buf1, buf2,
buf3, 4704, XBLOCK=128, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 10, 10), (1600, 100, 10, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(6400)](buf5, primals_5,
6400, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.int8)
buf7 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.float32
)
triton_poi_fused_max_pool2d_with_indices_3[grid(1600)](buf5, buf6,
buf7, 1600, XBLOCK=256, num_warps=4, num_stages=1)
buf8 = empty_strided_cuda((4, 120), (120, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf7, (4, 400), (400, 1), 0),
reinterpret_tensor(primals_6, (400, 120), (1, 400), 0), out=buf8)
buf9 = buf8
del buf8
triton_poi_fused_relu_4[grid(480)](buf9, primals_7, 480, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_7
buf10 = empty_strided_cuda((4, 84), (84, 1), torch.float32)
extern_kernels.mm(buf9, reinterpret_tensor(primals_8, (120, 84), (1,
120), 0), out=buf10)
buf11 = buf10
del buf10
triton_poi_fused_relu_5[grid(336)](buf11, primals_9, 336, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_9
buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_11, buf11, reinterpret_tensor(
primals_10, (84, 4), (1, 84), 0), alpha=1, beta=1, out=buf12)
del primals_11
buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__log_softmax_6[grid(16)](buf12, buf13, 16, XBLOCK=
16, num_warps=1, num_stages=1)
buf14 = buf12
del buf12
triton_poi_fused__log_softmax_7[grid(16)](buf13, buf14, 16, XBLOCK=
16, num_warps=1, num_stages=1)
del buf13
return (buf14, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5,
buf6, reinterpret_tensor(buf7, (4, 400), (400, 1), 0), buf9, buf11,
buf14, primals_10, primals_8, primals_6)
class CNNCifarNew(nn.Module):
def __init__(self, args):
super(CNNCifarNew, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, args.num_classes)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_10 = self.fc3.weight
primals_11 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
Ilcyb/Federated-Learning-PyTorch
|
CNNCifar
| false
| 5,636
|
[
"MIT"
] | 1
|
4830a89ffa1ac0ad0e52a4551338532cfb4ca210
|
https://github.com/Ilcyb/Federated-Learning-PyTorch/tree/4830a89ffa1ac0ad0e52a4551338532cfb4ca210
|
SoftDiceLoss
|
import torch
from torch import nn
import torch.nn.functional as F
class SoftDiceLoss(nn.Module):
def __init__(self):
super(SoftDiceLoss, self).__init__()
def forward(self, logits, targets):
eps = 1e-09
num = targets.size(0)
probs = F.sigmoid(logits)
m1 = probs.view(num, -1)
m2 = targets.view(num, -1).float()
intersection = torch.sum(m1 * m2, 1)
union = torch.sum(m1, dim=1) + torch.sum(m2, dim=1)
score = (2 * intersection + eps) / (union + eps)
score = (1 - score).mean()
return score
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp10 = tl.where(xmask, tmp8, 0)
tmp11 = tl.sum(tmp10, 1)[:, None]
tmp12 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp14 = tl.where(xmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tl.store(out_ptr0 + x0, tmp7, xmask)
tl.store(out_ptr1 + x0, tmp11, xmask)
tl.store(out_ptr2 + x0, tmp15, xmask)
@triton.jit
def triton_per_fused_add_div_mean_mul_rsub_1(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp5 = tl.load(in_ptr1 + r0, None)
tmp6 = tl.load(in_ptr2 + r0, None)
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp3 = 1e-09
tmp4 = tmp2 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp7 + tmp3
tmp9 = tmp4 / tmp8
tmp10 = 1.0
tmp11 = tmp10 - tmp9
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.sum(tmp12, 1)[:, None]
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp16, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
buf2 = empty_strided_cuda((4,), (1,), torch.float32)
get_raw_stream(0)
triton_per_fused_mul_sum_0[grid(4)](arg1_1, arg0_1, buf0, buf1,
buf2, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3
del buf3
triton_per_fused_add_div_mean_mul_rsub_1[grid(1)](buf4, buf0, buf1,
buf2, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf1
del buf2
return buf4,
class SoftDiceLossNew(nn.Module):
def __init__(self):
super(SoftDiceLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Nareshvrao/Understanding-Clouds-from-Satellite-Images
|
SoftDiceLoss
| false
| 5,637
|
[
"MIT"
] | 1
|
14c5e1f15e803e9638d7a3fa8b9e0d929a6015b6
|
https://github.com/Nareshvrao/Understanding-Clouds-from-Satellite-Images/tree/14c5e1f15e803e9638d7a3fa8b9e0d929a6015b6
|
SoftDiceLoss_binary
|
import torch
from torch import nn
import torch.nn.functional as F
class SoftDiceLoss_binary(nn.Module):
def __init__(self):
super(SoftDiceLoss_binary, self).__init__()
def forward(self, input, target):
smooth = 0.01
batch_size = input.size(0)
input = F.sigmoid(input).view(batch_size, -1)
target = target.clone().view(batch_size, -1)
inter = torch.sum(input * target, 1) + smooth
union = torch.sum(input * input, 1) + torch.sum(target * target, 1
) + smooth
score = torch.sum(2.0 * inter / union) / float(batch_size)
score = 1.0 - torch.clamp(score, 0.0, 1.0 - 1e-07)
return score
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tmp1 * tmp1
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.where(xmask, tmp9, 0)
tmp12 = tl.sum(tmp11, 1)[:, None]
tmp13 = tmp2 * tmp2
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = tl.where(xmask, tmp14, 0)
tmp17 = tl.sum(tmp16, 1)[:, None]
tl.store(out_ptr0 + x0, tmp7, xmask)
tl.store(out_ptr1 + x0, tmp12, xmask)
tl.store(out_ptr2 + x0, tmp17, xmask)
@triton.jit
def triton_per_fused_add_clamp_div_mul_rsub_sum_1(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp5 = tl.load(in_ptr1 + r0, None)
tmp6 = tl.load(in_ptr2 + r0, None)
tmp1 = 0.01
tmp2 = tmp0 + tmp1
tmp3 = 2.0
tmp4 = tmp2 * tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp7 + tmp1
tmp9 = tmp4 / tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.sum(tmp10, 1)[:, None]
tmp13 = 0.25
tmp14 = tmp12 * tmp13
tmp15 = 0.0
tmp16 = triton_helpers.maximum(tmp14, tmp15)
tmp17 = 0.9999999
tmp18 = triton_helpers.minimum(tmp16, tmp17)
tmp19 = 1.0
tmp20 = tmp19 - tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp20, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
buf2 = empty_strided_cuda((4,), (1,), torch.float32)
get_raw_stream(0)
triton_per_fused_mul_sum_0[grid(4)](arg0_1, arg1_1, buf0, buf1,
buf2, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3
del buf3
triton_per_fused_add_clamp_div_mul_rsub_sum_1[grid(1)](buf4, buf0,
buf1, buf2, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf1
del buf2
return buf4,
class SoftDiceLoss_binaryNew(nn.Module):
def __init__(self):
super(SoftDiceLoss_binaryNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Nareshvrao/Understanding-Clouds-from-Satellite-Images
|
SoftDiceLoss_binary
| false
| 5,638
|
[
"MIT"
] | 1
|
14c5e1f15e803e9638d7a3fa8b9e0d929a6015b6
|
https://github.com/Nareshvrao/Understanding-Clouds-from-Satellite-Images/tree/14c5e1f15e803e9638d7a3fa8b9e0d929a6015b6
|
Atten
|
from _paritybench_helpers import _mock_config
import torch
from torch import nn
import torch.nn.functional as F
class Atten(nn.Module):
def __init__(self, config):
super(Atten, self).__init__()
hidden_size = config.hidden_size
classifier_dropout = (config.classifier_dropout if config.
classifier_dropout is not None else config.hidden_dropout_prob)
self.dropout = nn.Dropout(classifier_dropout)
num_classes = config.num_labels
self.fc = nn.Linear(hidden_size, num_classes)
self.tanh = nn.Tanh()
self.w = nn.Parameter(torch.zeros(hidden_size))
def forward(self, x):
x = self.dropout(x)
M = self.tanh(x)
alpha = F.softmax(torch.matmul(M, self.w), dim=1).unsqueeze(-1)
out = x * alpha
out = torch.sum(out, 1)
out = self.fc(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, classifier_dropout=
0.5, num_labels=4)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mv_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + 1)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp11 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr1 + 2)
tmp14 = tl.broadcast_to(tmp13, [XBLOCK])
tmp17 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr1 + 3)
tmp20 = tl.broadcast_to(tmp19, [XBLOCK])
tmp1 = libdevice.tanh(tmp0)
tmp4 = tmp1 * tmp3
tmp6 = libdevice.tanh(tmp5)
tmp9 = tmp6 * tmp8
tmp10 = tmp4 + tmp9
tmp12 = libdevice.tanh(tmp11)
tmp15 = tmp12 * tmp14
tmp16 = tmp10 + tmp15
tmp18 = libdevice.tanh(tmp17)
tmp21 = tmp18 * tmp20
tmp22 = tmp16 + tmp21
tl.store(out_ptr0 + x0, tmp22, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x3 = xindex % 16
x1 = xindex // 4 % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3 + 64 * x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x3 + 64 * x2), xmask)
tmp4 = tl.load(in_ptr1 + (4 + x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (32 + x3 + 64 * x2), xmask)
tmp8 = tl.load(in_ptr1 + (8 + x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x3 + 64 * x2), xmask)
tmp12 = tl.load(in_ptr1 + (12 + x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x4, tmp14, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64,), (1,), torch.float32)
get_raw_stream(0)
triton_poi_fused_mv_0[grid(64)](primals_1, primals_2, buf0, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(64)](buf0, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf2 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0)
del buf0
triton_poi_fused__softmax_2[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = buf1
del buf1
triton_poi_fused_mul_sum_3[grid(64)](primals_1, buf2, buf3, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf4 = reinterpret_tensor(buf2, (16, 4), (4, 1), 0)
del buf2
extern_kernels.addmm(primals_4, reinterpret_tensor(buf3, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_4
return reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0
), primals_1, primals_2, reinterpret_tensor(buf3, (16, 4), (4, 1), 0
), primals_3
class AttenNew(nn.Module):
def __init__(self, config):
super(AttenNew, self).__init__()
hidden_size = config.hidden_size
classifier_dropout = (config.classifier_dropout if config.
classifier_dropout is not None else config.hidden_dropout_prob)
self.dropout = nn.Dropout(classifier_dropout)
num_classes = config.num_labels
self.fc = nn.Linear(hidden_size, num_classes)
self.tanh = nn.Tanh()
self.w = nn.Parameter(torch.zeros(hidden_size))
def forward(self, input_0):
primals_2 = self.w
primals_3 = self.fc.weight
primals_4 = self.fc.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
NTDXYG/EL-CodeBert
|
Atten
| false
| 5,639
|
[
"MIT"
] | 1
|
62a2364db567f8887a339c40e2c7f7807bedfd50
|
https://github.com/NTDXYG/EL-CodeBert/tree/62a2364db567f8887a339c40e2c7f7807bedfd50
|
ActorCritic
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.distributions import Categorical
class ActorCritic(nn.Module):
def __init__(self):
super().__init__()
self.affine1 = nn.Linear(4, 128)
self.action_head = nn.Linear(128, 2)
self.value_head = nn.Linear(128, 1)
def forward(self, x):
x = F.relu(self.affine1(x))
action_scores = self.action_head(x)
state_values = self.value_head(x)
return F.softmax(action_scores, dim=-1), state_values
def select_action(self, state, values, select_props):
state = torch.from_numpy(state).float()
props, value = self(Variable(state))
dist = Categorical(props)
action = dist.sample()
log_props = dist.log_prob(action)
values.append(value)
select_props.append(log_props)
return action.data[0]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
from torch.autograd import Variable
from torch.distributions import Categorical
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 2
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 2 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 2 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp1 - tmp3
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp2 - tmp3
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tmp5 / tmp10
tl.store(out_ptr0 + x2, tmp11, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (128, 4), (4, 1))
assert_size_stride(primals_2, (128,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (2, 128), (128, 1))
assert_size_stride(primals_5, (2,), (1,))
assert_size_stride(primals_6, (1, 128), (128, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf0
buf6 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf1,
primals_2, buf6, 8192, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 128),
(128, 1), 0), reinterpret_tensor(primals_4, (128, 2), (1, 128),
0), alpha=1, beta=1, out=buf2)
del primals_5
buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 128),
(128, 1), 0), reinterpret_tensor(primals_6, (128, 1), (1, 128),
0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
triton_poi_fused__softmax_1[grid(128)](buf2, buf5, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del buf2
return buf5, reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 128), (128, 1), 0
), buf5, primals_6, primals_4, buf6
class ActorCriticNew(nn.Module):
def __init__(self):
super().__init__()
self.affine1 = nn.Linear(4, 128)
self.action_head = nn.Linear(128, 2)
self.value_head = nn.Linear(128, 1)
def select_action(self, state, values, select_props):
state = torch.from_numpy(state).float()
props, value = self(Variable(state))
dist = Categorical(props)
action = dist.sample()
log_props = dist.log_prob(action)
values.append(value)
select_props.append(log_props)
return action.data[0]
def forward(self, input_0):
primals_1 = self.affine1.weight
primals_2 = self.affine1.bias
primals_4 = self.action_head.weight
primals_5 = self.action_head.bias
primals_6 = self.value_head.weight
primals_7 = self.value_head.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0], output[1]
|
NeilWangziyu/torch_light
|
ActorCritic
| false
| 5,640
|
[
"MIT"
] | 1
|
daf8fd62f57885cf182f1b3edc3152156d229ef3
|
https://github.com/NeilWangziyu/torch_light/tree/daf8fd62f57885cf182f1b3edc3152156d229ef3
|
NlpCrossEntropy
|
import torch
import torch.nn as nn
class NlpCrossEntropy(nn.Module):
def __init__(self):
super().__init__()
def forward(self, props, tgt):
tgt_props = props.gather(2, tgt.unsqueeze(2)).squeeze()
mask = (tgt > 0).float()
return -(tgt_props * mask).sum() / mask.sum()
def get_inputs():
return [torch.ones([4, 4, 4], dtype=torch.int64), torch.ones([4, 4],
dtype=torch.int64)]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused__to_copy_div_gt_mul_neg_sum_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 4),
'index out of bounds: 0 <= tmp4 < 4')
tmp6 = tl.load(in_ptr1 + (tmp4 + 4 * r0), None, eviction_policy=
'evict_last')
tmp7 = tmp6.to(tl.float32)
tmp8 = tl.full([1, 1], 0, tl.int64)
tmp9 = tmp0 > tmp8
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp7 * tmp10
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.sum(tmp12, 1)[:, None]
tmp15 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp17 = tl.sum(tmp15, 1)[:, None]
tmp18 = -tmp14
tmp19 = tmp18 / tmp17
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp19, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused__to_copy_div_gt_mul_neg_sum_0[grid(1)](buf2,
arg1_1, arg0_1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf2,
class NlpCrossEntropyNew(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
NeilWangziyu/torch_light
|
NlpCrossEntropy
| false
| 5,641
|
[
"MIT"
] | 1
|
daf8fd62f57885cf182f1b3edc3152156d229ef3
|
https://github.com/NeilWangziyu/torch_light/tree/daf8fd62f57885cf182f1b3edc3152156d229ef3
|
SelfCriticCriterion
|
import torch
import torch.nn as nn
class SelfCriticCriterion(nn.Module):
def __init__(self):
super().__init__()
def forward(self, props, s_words, tgt, advantage):
advantage = (advantage - advantage.mean()) / advantage.std().clamp(min
=1e-08)
s_props = props.gather(2, s_words.unsqueeze(2)).squeeze()
mask = (tgt > 0).float()
advantage = advantage.unsqueeze(1).repeat(1, mask.size(1))
advantage = advantage.detach()
return -(s_props * mask * advantage).sum() / mask.sum()
def get_inputs():
return [torch.ones([4, 4, 4], dtype=torch.int64), torch.ones([4, 4],
dtype=torch.int64), torch.rand([4, 4]), torch.rand([4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mean_std_0(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel,
XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.sum(tmp1, 1)[:, None]
tmp5 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp7 = tl.sum(tmp5, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 4, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.sum(tmp13, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp3, None)
tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp15, None)
@triton.jit
def triton_per_fused__to_copy_div_gt_mul_neg_repeat_sum_1(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, xnumel, rnumel, XBLOCK: tl
.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
r1 = rindex // 4
tmp0 = tl.load(in_ptr0 + r2, None)
tmp8 = tl.load(in_ptr2 + r2, None)
tmp13 = tl.load(in_ptr3 + r1, None, eviction_policy='evict_last')
tmp14 = tl.load(in_out_ptr0 + 0)
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp19 = tl.load(in_ptr4 + 0)
tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK])
tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 4),
'index out of bounds: 0 <= tmp4 < 4')
tmp6 = tl.load(in_ptr1 + (tmp4 + 4 * r2), None, eviction_policy=
'evict_last')
tmp7 = tmp6.to(tl.float32)
tmp9 = 0.0
tmp10 = tmp8 > tmp9
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp7 * tmp11
tmp16 = 4.0
tmp17 = tmp15 / tmp16
tmp18 = tmp13 - tmp17
tmp21 = 3.0
tmp22 = tmp20 / tmp21
tmp23 = libdevice.sqrt(tmp22)
tmp24 = 1e-08
tmp25 = triton_helpers.maximum(tmp23, tmp24)
tmp26 = tmp18 / tmp25
tmp27 = tmp12 * tmp26
tmp28 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK])
tmp30 = tl.sum(tmp28, 1)[:, None]
tmp31 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp33 = tl.sum(tmp31, 1)[:, None]
tmp34 = -tmp30
tmp35 = tmp34 / tmp33
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp35, None)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4,), (1,))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
assert_size_stride(arg3_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_mean_std_0[grid(1)](arg0_1, buf0, buf2, 1, 4,
XBLOCK=1, num_warps=2, num_stages=1)
buf4 = buf0
del buf0
buf6 = buf4
del buf4
triton_per_fused__to_copy_div_gt_mul_neg_repeat_sum_1[grid(1)](buf6,
arg2_1, arg1_1, arg3_1, arg0_1, buf2, 1, 16, XBLOCK=1,
num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
del buf2
return buf6,
class SelfCriticCriterionNew(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0, input_1, input_2, input_3):
arg1_1 = input_0
arg2_1 = input_1
arg3_1 = input_2
arg0_1 = input_3
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0]
|
NeilWangziyu/torch_light
|
SelfCriticCriterion
| false
| 5,642
|
[
"MIT"
] | 1
|
daf8fd62f57885cf182f1b3edc3152156d229ef3
|
https://github.com/NeilWangziyu/torch_light/tree/daf8fd62f57885cf182f1b3edc3152156d229ef3
|
LeastSquaresGenerativeAdversarialLoss
|
import torch
import torch.nn as nn
import torch.utils.data
class LeastSquaresGenerativeAdversarialLoss(nn.Module):
"""
Loss for `Least Squares Generative Adversarial Network (LSGAN) <https://arxiv.org/abs/1611.04076>`_
Args:
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Default: ``'mean'``
Inputs:
- prediction (tensor): unnormalized discriminator predictions
- real (bool): if the ground truth label is for real images or fake images. Default: true
.. warning::
Do not use sigmoid as the last layer of Discriminator.
"""
def __init__(self, reduction='mean'):
super(LeastSquaresGenerativeAdversarialLoss, self).__init__()
self.mse_loss = nn.MSELoss(reduction=reduction)
def forward(self, prediction, real=True):
if real:
label = torch.ones_like(prediction)
else:
label = torch.zeros_like(prediction)
return self.mse_loss(prediction, label)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mse_loss_ones_like_0(in_out_ptr0, in_ptr0, xnumel, rnumel
):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = 1.0
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 256.0
tmp8 = tmp6 / tmp7
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp8, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mse_loss_ones_like_0[grid(1)](buf1, arg0_1, 1, 256,
num_warps=2, num_stages=1)
del arg0_1
return buf1,
class LeastSquaresGenerativeAdversarialLossNew(nn.Module):
"""
Loss for `Least Squares Generative Adversarial Network (LSGAN) <https://arxiv.org/abs/1611.04076>`_
Args:
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Default: ``'mean'``
Inputs:
- prediction (tensor): unnormalized discriminator predictions
- real (bool): if the ground truth label is for real images or fake images. Default: true
.. warning::
Do not use sigmoid as the last layer of Discriminator.
"""
def __init__(self, reduction='mean'):
super(LeastSquaresGenerativeAdversarialLossNew, self).__init__()
self.mse_loss = nn.MSELoss(reduction=reduction)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Neronjust2017/TransferBed
|
LeastSquaresGenerativeAdversarialLoss
| false
| 5,643
|
[
"MIT"
] | 1
|
eaa703a4bc10eaf6216fe1394cd272f6e75489e2
|
https://github.com/Neronjust2017/TransferBed/tree/eaa703a4bc10eaf6216fe1394cd272f6e75489e2
|
LayerNorm
|
import torch
import torch.nn as nn
class LayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-06):
super().__init__()
self.eps = eps
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
def forward(self, input):
mu = torch.mean(input, dim=-1, keepdim=True)
sigma = torch.std(input, dim=-1, keepdim=True).clamp(min=self.eps)
output = (input - mu) / sigma
return output * self.weight.expand_as(output) + self.bias.expand_as(
output)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_clamp_div_mean_mul_std_sub_0(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tmp11 = tmp1 - tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp2 - tmp9
tmp14 = tmp13 * tmp13
tmp15 = tmp12 + tmp14
tmp16 = tmp4 - tmp9
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp6 - tmp9
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = 3.0
tmp23 = tmp21 / tmp22
tmp24 = libdevice.sqrt(tmp23)
tmp25 = 1e-06
tmp26 = triton_helpers.maximum(tmp24, tmp25)
tmp27 = tmp10 / tmp26
tmp29 = tmp27 * tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + x2, tmp31, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_clamp_div_mean_mul_std_sub_0[grid(256)](primals_1,
primals_2, primals_3, buf0, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_2
del primals_3
return buf0, primals_1
class LayerNormNew(nn.Module):
def __init__(self, hidden_size, eps=1e-06):
super().__init__()
self.eps = eps
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
def forward(self, input_0):
primals_2 = self.weight
primals_3 = self.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
NeilWangziyu/torch_light
|
LayerNorm
| false
| 5,644
|
[
"MIT"
] | 1
|
daf8fd62f57885cf182f1b3edc3152156d229ef3
|
https://github.com/NeilWangziyu/torch_light/tree/daf8fd62f57885cf182f1b3edc3152156d229ef3
|
VanillaGenerativeAdversarialLoss
|
import torch
import torch.nn as nn
import torch.utils.data
class VanillaGenerativeAdversarialLoss(nn.Module):
"""
Loss for `Vanilla Generative Adversarial Network <https://arxiv.org/abs/1406.2661>`_
Args:
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Default: ``'mean'``
Inputs:
- prediction (tensor): unnormalized discriminator predictions
- real (bool): if the ground truth label is for real images or fake images. Default: true
.. warning::
Do not use sigmoid as the last layer of Discriminator.
"""
def __init__(self, reduction='mean'):
super(VanillaGenerativeAdversarialLoss, self).__init__()
self.bce_loss = nn.BCEWithLogitsLoss(reduction=reduction)
def forward(self, prediction, real=True):
if real:
label = torch.ones_like(prediction)
else:
label = torch.zeros_like(prediction)
return self.bce_loss(prediction, label)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_binary_cross_entropy_with_logits_0(in_out_ptr0,
in_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = 0.0
tmp2 = tmp1 * tmp0
tmp3 = triton_helpers.minimum(tmp1, tmp0)
tmp4 = tl_math.abs(tmp0)
tmp5 = -tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = libdevice.log1p(tmp6)
tmp8 = tmp3 - tmp7
tmp9 = tmp2 - tmp8
tmp10 = tl.broadcast_to(tmp9, [RBLOCK])
tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0))
tmp13 = 256.0
tmp14 = tmp12 / tmp13
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp14, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_binary_cross_entropy_with_logits_0[grid(1)](buf1,
arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
return buf1,
class VanillaGenerativeAdversarialLossNew(nn.Module):
"""
Loss for `Vanilla Generative Adversarial Network <https://arxiv.org/abs/1406.2661>`_
Args:
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Default: ``'mean'``
Inputs:
- prediction (tensor): unnormalized discriminator predictions
- real (bool): if the ground truth label is for real images or fake images. Default: true
.. warning::
Do not use sigmoid as the last layer of Discriminator.
"""
def __init__(self, reduction='mean'):
super(VanillaGenerativeAdversarialLossNew, self).__init__()
self.bce_loss = nn.BCEWithLogitsLoss(reduction=reduction)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Neronjust2017/TransferBed
|
VanillaGenerativeAdversarialLoss
| false
| 5,645
|
[
"MIT"
] | 1
|
eaa703a4bc10eaf6216fe1394cd272f6e75489e2
|
https://github.com/Neronjust2017/TransferBed/tree/eaa703a4bc10eaf6216fe1394cd272f6e75489e2
|
QMaxPooling2d
|
from torch.autograd import Function
import torch
import torch.nn as nn
import torch.nn.functional as F
def calcScaleZeroPoint(min_val, max_val, num_bits=8):
qmin = 0.0
qmax = 2.0 ** num_bits - 1.0
scale = float((max_val - min_val) / (qmax - qmin))
zero_point = qmax - max_val / scale
if zero_point < qmin:
zero_point = qmin
elif zero_point > qmax:
zero_point = qmax
zero_point = int(zero_point)
return scale, zero_point
def quantize_tensor(x, scale, zero_point, num_bits=8, signed=False):
if signed:
qmin = -2.0 ** (num_bits - 1)
qmax = 2.0 ** (num_bits - 1) - 1
else:
qmin = 0.0
qmax = 2.0 ** num_bits - 1.0
q_x = zero_point + x / scale
q_x.clamp_(qmin, qmax).round_()
return q_x
class FakeQuantize(Function):
@staticmethod
def forward(ctx, x, qparam):
x = qparam.quantize_tensor(x)
x = qparam.dequantize_tensor(x)
return x
@staticmethod
def backward(ctx, grad_output):
return grad_output, None
class QParam:
def __init__(self, num_bits=8):
self.num_bits = num_bits
self.scale = None
self.zero_point = None
self.min = None
self.max = None
def update(self, tensor):
if self.max is None or self.max < tensor.max():
self.max = tensor.max()
if self.min is None or self.min > tensor.min():
self.min = tensor.min()
self.scale, self.zero_point = calcScaleZeroPoint(self.min, self.max,
self.num_bits)
def quantize_tensor(self, tensor):
return quantize_tensor(tensor, self.scale, self.zero_point,
num_bits=self.num_bits)
def dequantize_tensor(self, q_x):
return dequantize_tensor(q_x, self.scale, self.zero_point)
def __str__(self):
info = 'scale: %.10f ' % self.scale
info += 'zp: %d ' % self.zero_point
info += 'min: %.6f ' % self.min
info += 'max: %.6f' % self.max
return info
class QModule(nn.Module):
def __init__(self, qi=True, qo=True, num_bits=8):
super(QModule, self).__init__()
if qi:
self.qi = QParam(num_bits=num_bits)
if qo:
self.qo = QParam(num_bits=num_bits)
def freeze(self):
pass
def quantize_inference(self, x):
raise NotImplementedError('quantize_inference should be implemented.')
class QMaxPooling2d(QModule):
def __init__(self, kernel_size=3, stride=1, padding=0, qi=False,
num_bits=None):
super(QMaxPooling2d, self).__init__(qi=qi, num_bits=num_bits)
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
def freeze(self, qi=None):
if hasattr(self, 'qi') and qi is not None:
raise ValueError('qi has been provided in init function.')
if not hasattr(self, 'qi') and qi is None:
raise ValueError('qi is not existed, should be provided.')
if qi is not None:
self.qi = qi
def forward(self, x):
if hasattr(self, 'qi'):
self.qi.update(x)
x = FakeQuantize.apply(x, self.qi)
x = F.max_pool2d(x, self.kernel_size, self.stride, self.padding)
return x
def quantize_inference(self, x):
return F.max_pool2d(x, self.kernel_size, self.stride, self.padding)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch.autograd import Function
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2 % 2
x2 = xindex // 4
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (1 + x0 + 4 * x1 + 16 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (2 + x0 + 4 * x1 + 16 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (4 + x0 + 4 * x1 + 16 * x2), xmask)
tmp7 = tl.load(in_ptr0 + (5 + x0 + 4 * x1 + 16 * x2), xmask)
tmp9 = tl.load(in_ptr0 + (6 + x0 + 4 * x1 + 16 * x2), xmask)
tmp11 = tl.load(in_ptr0 + (8 + x0 + 4 * x1 + 16 * x2), xmask)
tmp13 = tl.load(in_ptr0 + (9 + x0 + 4 * x1 + 16 * x2), xmask)
tmp15 = tl.load(in_ptr0 + (10 + x0 + 4 * x1 + 16 * x2), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tl.store(out_ptr0 + x3, tmp16, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0[grid(64)](arg0_1, buf0,
64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
return buf0,
def calcScaleZeroPoint(min_val, max_val, num_bits=8):
qmin = 0.0
qmax = 2.0 ** num_bits - 1.0
scale = float((max_val - min_val) / (qmax - qmin))
zero_point = qmax - max_val / scale
if zero_point < qmin:
zero_point = qmin
elif zero_point > qmax:
zero_point = qmax
zero_point = int(zero_point)
return scale, zero_point
def quantize_tensor(x, scale, zero_point, num_bits=8, signed=False):
if signed:
qmin = -2.0 ** (num_bits - 1)
qmax = 2.0 ** (num_bits - 1) - 1
else:
qmin = 0.0
qmax = 2.0 ** num_bits - 1.0
q_x = zero_point + x / scale
q_x.clamp_(qmin, qmax).round_()
return q_x
class FakeQuantize(Function):
@staticmethod
def forward(ctx, x, qparam):
x = qparam.quantize_tensor(x)
x = qparam.dequantize_tensor(x)
return x
@staticmethod
def backward(ctx, grad_output):
return grad_output, None
class QParam:
def __init__(self, num_bits=8):
self.num_bits = num_bits
self.scale = None
self.zero_point = None
self.min = None
self.max = None
def update(self, tensor):
if self.max is None or self.max < tensor.max():
self.max = tensor.max()
if self.min is None or self.min > tensor.min():
self.min = tensor.min()
self.scale, self.zero_point = calcScaleZeroPoint(self.min, self.max,
self.num_bits)
def quantize_tensor(self, tensor):
return quantize_tensor(tensor, self.scale, self.zero_point,
num_bits=self.num_bits)
def dequantize_tensor(self, q_x):
return dequantize_tensor(q_x, self.scale, self.zero_point)
def __str__(self):
info = 'scale: %.10f ' % self.scale
info += 'zp: %d ' % self.zero_point
info += 'min: %.6f ' % self.min
info += 'max: %.6f' % self.max
return info
class QModule(nn.Module):
def __init__(self, qi=True, qo=True, num_bits=8):
super(QModule, self).__init__()
if qi:
self.qi = QParam(num_bits=num_bits)
if qo:
self.qo = QParam(num_bits=num_bits)
def freeze(self):
pass
def quantize_inference(self, x):
raise NotImplementedError('quantize_inference should be implemented.')
class QMaxPooling2dNew(QModule):
def __init__(self, kernel_size=3, stride=1, padding=0, qi=False,
num_bits=None):
super(QMaxPooling2dNew, self).__init__(qi=qi, num_bits=num_bits)
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
def freeze(self, qi=None):
if hasattr(self, 'qi') and qi is not None:
raise ValueError('qi has been provided in init function.')
if not hasattr(self, 'qi') and qi is None:
raise ValueError('qi is not existed, should be provided.')
if qi is not None:
self.qi = qi
def quantize_inference(self, x):
return F.max_pool2d(x, self.kernel_size, self.stride, self.padding)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
NeekHua/quantization_pytorch_demo
|
QMaxPooling2d
| false
| 5,646
|
[
"Apache-2.0"
] | 1
|
930b03de977e48c0652d3801c710510ffc40aa38
|
https://github.com/NeekHua/quantization_pytorch_demo/tree/930b03de977e48c0652d3801c710510ffc40aa38
|
AdaptiveFeatureNorm
|
import torch
import torch.nn as nn
import torch.utils.data
class AdaptiveFeatureNorm(nn.Module):
"""
The `Stepwise Adaptive Feature Norm loss (ICCV 2019) <https://arxiv.org/pdf/1811.07456v2.pdf>`_
Instead of using restrictive scalar R to match the corresponding feature norm, Stepwise Adaptive Feature Norm
is used in order to learn task-specific features with large norms in a progressive manner.
Given feature representations :math:`f` on source or target domain, the definition of Stepwise Adaptive Feature Norm loss is
.. math::
norm\\_loss = \\mathbb{E}_{i}(\\Vert f_i \\Vert_2.detach() + delta - \\Vert f_i \\Vert_2)^2\\\\
Args:
delta (float): positive residual scalar to control the feature norm enlargement.
Inputs:
- f (tensor): feature representations on source or target domain.
Shape:
- f: :math:`(N, F)` where F means the dimension of input features.
- Outputs: scalar.
Examples::
>>> adaptive_feature_norm = AdaptiveFeatureNorm(delta=1)
>>> f_s = torch.randn(32, 1000)
>>> f_t = torch.randn(32, 1000)
>>> norm_loss = adaptive_feature_norm(f_s) + adaptive_feature_norm(f_t)
"""
def __init__(self, delta):
super(AdaptiveFeatureNorm, self).__init__()
self.delta = delta
def forward(self, f: 'torch.Tensor') ->torch.Tensor:
radius = f.norm(p=2, dim=1).detach()
assert radius.requires_grad is False
radius = radius + self.delta
loss = ((f.norm(p=2, dim=1) - radius) ** 2).mean()
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'delta': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_linalg_vector_norm_mean_pow_sub_0(in_out_ptr0,
in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp5 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp8 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = libdevice.sqrt(tmp10)
tmp12 = 4.0
tmp13 = tmp11 + tmp12
tmp14 = tmp11 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.sum(tmp16, 1)[:, None]
tmp19 = 64.0
tmp20 = tmp18 / tmp19
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp20, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_linalg_vector_norm_mean_pow_sub_0[grid(1)](buf1,
arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf1,
class AdaptiveFeatureNormNew(nn.Module):
"""
The `Stepwise Adaptive Feature Norm loss (ICCV 2019) <https://arxiv.org/pdf/1811.07456v2.pdf>`_
Instead of using restrictive scalar R to match the corresponding feature norm, Stepwise Adaptive Feature Norm
is used in order to learn task-specific features with large norms in a progressive manner.
Given feature representations :math:`f` on source or target domain, the definition of Stepwise Adaptive Feature Norm loss is
.. math::
norm\\_loss = \\mathbb{E}_{i}(\\Vert f_i \\Vert_2.detach() + delta - \\Vert f_i \\Vert_2)^2\\\\
Args:
delta (float): positive residual scalar to control the feature norm enlargement.
Inputs:
- f (tensor): feature representations on source or target domain.
Shape:
- f: :math:`(N, F)` where F means the dimension of input features.
- Outputs: scalar.
Examples::
>>> adaptive_feature_norm = AdaptiveFeatureNorm(delta=1)
>>> f_s = torch.randn(32, 1000)
>>> f_t = torch.randn(32, 1000)
>>> norm_loss = adaptive_feature_norm(f_s) + adaptive_feature_norm(f_t)
"""
def __init__(self, delta):
super(AdaptiveFeatureNormNew, self).__init__()
self.delta = delta
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Neronjust2017/TransferBed
|
AdaptiveFeatureNorm
| false
| 5,647
|
[
"MIT"
] | 1
|
eaa703a4bc10eaf6216fe1394cd272f6e75489e2
|
https://github.com/Neronjust2017/TransferBed/tree/eaa703a4bc10eaf6216fe1394cd272f6e75489e2
|
ANet
|
import torch
import torch.nn as nn
import torch.utils.data
class ANet(nn.Module):
def __init__(self, in_feature):
super(ANet, self).__init__()
self.layer = nn.Linear(in_feature, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = self.layer(x)
x = self.sigmoid(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_feature': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 4), (4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_sigmoid_0[grid(64)](buf1, primals_2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_2
return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1
class ANetNew(nn.Module):
def __init__(self, in_feature):
super(ANetNew, self).__init__()
self.layer = nn.Linear(in_feature, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, input_0):
primals_1 = self.layer.weight
primals_2 = self.layer.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Neronjust2017/TransferBed
|
ANet
| false
| 5,648
|
[
"MIT"
] | 1
|
eaa703a4bc10eaf6216fe1394cd272f6e75489e2
|
https://github.com/Neronjust2017/TransferBed/tree/eaa703a4bc10eaf6216fe1394cd272f6e75489e2
|
Auto_Encoder_Model
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Auto_Encoder_Model(nn.Module):
def __init__(self):
super(Auto_Encoder_Model, self).__init__()
self.conv1 = nn.Conv2d(1, 64, padding=1, kernel_size=3)
self.max_pool1 = nn.MaxPool2d(2)
self.conv2 = nn.Conv2d(64, 32, padding=1, kernel_size=3)
self.max_pool2 = nn.MaxPool2d(2)
self.conv3 = nn.Conv2d(32, 16, padding=1, kernel_size=3)
self.tran_conv1 = nn.ConvTranspose2d(16, 32, kernel_size=3, stride=
2, padding=1, output_padding=1)
self.conv4 = nn.Conv2d(32, 32, kernel_size=3, padding=1)
self.tran_conv2 = nn.ConvTranspose2d(32, 64, kernel_size=3, stride=
2, padding=1, output_padding=1)
self.conv5 = nn.Conv2d(64, 1, kernel_size=3, padding=1)
def forward_pass(self, x):
output = F.relu(self.conv1(x))
output = self.max_pool1(output)
output = F.relu(self.conv2(output))
output = self.max_pool2(output)
output = F.relu(self.conv3(output))
return output
def reconstruct_pass(self, x):
output = F.relu(self.tran_conv1(x))
output = F.relu(self.conv4(output))
output = F.relu(self.tran_conv2(output))
output = torch.sigmoid(self.conv5(output))
return output
def forward(self, x):
output = self.forward_pass(x)
output = self.reconstruct_pass(output)
return output
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 16
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_sigmoid_5(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15) = args
args.clear()
assert_size_stride(primals_1, (64, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (16, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_7, (16,), (1,))
assert_size_stride(primals_8, (16, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_9, (32,), (1,))
assert_size_stride(primals_10, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_11, (32,), (1,))
assert_size_stride(primals_12, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_13, (64,), (1,))
assert_size_stride(primals_14, (1, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_15, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(1048576)](buf1, primals_2,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.float32)
buf3 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(262144)](buf1, buf2,
buf3, 262144, XBLOCK=512, num_warps=8, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(131072)](buf5, primals_5,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1),
torch.float32)
buf7 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_3[grid(32768)](buf5, buf6,
buf7, 32768, XBLOCK=256, num_warps=4, num_stages=1)
buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 16, 16, 16), (4096, 256, 16, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_relu_4[grid(16384)](buf9, primals_7,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf10 = extern_kernels.convolution(buf9, primals_8, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=True,
output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf10, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf11 = buf10
del buf10
triton_poi_fused_convolution_relu_2[grid(131072)](buf11, primals_9,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_9
buf12 = extern_kernels.convolution(buf11, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf13 = buf12
del buf12
triton_poi_fused_convolution_relu_2[grid(131072)](buf13, primals_11,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_11
buf14 = extern_kernels.convolution(buf13, primals_12, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=True,
output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf14, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf15 = buf14
del buf14
triton_poi_fused_convolution_relu_0[grid(1048576)](buf15,
primals_13, 1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_13
buf16 = extern_kernels.convolution(buf15, primals_14, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf17 = buf16
del buf16
triton_poi_fused_convolution_sigmoid_5[grid(16384)](buf17,
primals_15, 16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_15
return (buf17, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, primals_12, primals_14, buf1, buf2, buf3, buf5, buf6,
buf7, buf9, buf11, buf13, buf15, buf17)
class Auto_Encoder_ModelNew(nn.Module):
def __init__(self):
super(Auto_Encoder_ModelNew, self).__init__()
self.conv1 = nn.Conv2d(1, 64, padding=1, kernel_size=3)
self.max_pool1 = nn.MaxPool2d(2)
self.conv2 = nn.Conv2d(64, 32, padding=1, kernel_size=3)
self.max_pool2 = nn.MaxPool2d(2)
self.conv3 = nn.Conv2d(32, 16, padding=1, kernel_size=3)
self.tran_conv1 = nn.ConvTranspose2d(16, 32, kernel_size=3, stride=
2, padding=1, output_padding=1)
self.conv4 = nn.Conv2d(32, 32, kernel_size=3, padding=1)
self.tran_conv2 = nn.ConvTranspose2d(32, 64, kernel_size=3, stride=
2, padding=1, output_padding=1)
self.conv5 = nn.Conv2d(64, 1, kernel_size=3, padding=1)
def forward_pass(self, x):
output = F.relu(self.conv1(x))
output = self.max_pool1(output)
output = F.relu(self.conv2(output))
output = self.max_pool2(output)
output = F.relu(self.conv3(output))
return output
def reconstruct_pass(self, x):
output = F.relu(self.tran_conv1(x))
output = F.relu(self.conv4(output))
output = F.relu(self.tran_conv2(output))
output = torch.sigmoid(self.conv5(output))
return output
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.tran_conv1.weight
primals_9 = self.tran_conv1.bias
primals_10 = self.conv4.weight
primals_11 = self.conv4.bias
primals_12 = self.tran_conv2.weight
primals_13 = self.tran_conv2.bias
primals_14 = self.conv5.weight
primals_15 = self.conv5.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15])
return output[0]
|
NNDEV1/QandMedicAid
|
Auto_Encoder_Model
| false
| 5,649
|
[
"MIT"
] | 1
|
f229f7dcf192fd79715eba07a2e5121a13c7a571
|
https://github.com/NNDEV1/QandMedicAid/tree/f229f7dcf192fd79715eba07a2e5121a13c7a571
|
AtteMatchLay
|
import torch
import torch.nn as nn
from torch.nn.functional import cosine_similarity
def multi_perspective_expand_for_2D(in_tensor, decompose_params):
"""
Return: [batch_size, decompse_dim, dim]
"""
in_tensor = in_tensor.unsqueeze(1)
decompose_params = decompose_params.unsqueeze(0)
return torch.mul(in_tensor, decompose_params)
class AtteMatchLay(nn.Module):
def __init__(self, mp_dim, cont_dim):
super(AtteMatchLay, self).__init__()
self.cont_dim = cont_dim
self.mp_dim = mp_dim
self.register_parameter('weight', nn.Parameter(torch.Tensor(mp_dim,
cont_dim)))
self.weight.data.uniform_(-1.0, 1.0)
def forward(self, repres, max_att):
"""
Args:
repres - [bsz, a_len|q_len, cont_dim]
max_att - [bsz, q_len|a_len, cont_dim]
Return:
size - [bsz, sentence_len, mp_dim]
"""
bsz = repres.size(0)
sent_len = repres.size(1)
repres = repres.view(-1, self.cont_dim)
max_att = max_att.view(-1, self.cont_dim)
repres = multi_perspective_expand_for_2D(repres, self.weight)
max_att = multi_perspective_expand_for_2D(max_att, self.weight)
temp = cosine_similarity(repres, max_att, repres.dim() - 1)
return temp.view(bsz, sent_len, self.mp_dim)
def get_inputs():
return [torch.rand([16, 4, 4]), torch.rand([64, 4])]
def get_init_inputs():
return [[], {'mp_dim': 4, 'cont_dim': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_sum_0(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr2 + 4 * x1, xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr2 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp29 = tl.load(in_ptr2 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp33 = tl.load(in_ptr2 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 * tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 * tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 * tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = libdevice.sqrt(tmp18)
tmp20 = 1e-08
tmp21 = triton_helpers.maximum(tmp19, tmp20)
tmp23 = tmp22 * tmp1
tmp24 = tmp23 * tmp23
tmp26 = tmp25 * tmp5
tmp27 = tmp26 * tmp26
tmp28 = tmp24 + tmp27
tmp30 = tmp29 * tmp10
tmp31 = tmp30 * tmp30
tmp32 = tmp28 + tmp31
tmp34 = tmp33 * tmp15
tmp35 = tmp34 * tmp34
tmp36 = tmp32 + tmp35
tmp37 = libdevice.sqrt(tmp36)
tmp38 = triton_helpers.maximum(tmp37, tmp20)
tmp39 = tmp23 / tmp38
tmp40 = tmp2 / tmp21
tmp41 = tmp39 * tmp40
tmp42 = tmp26 / tmp38
tmp43 = tmp6 / tmp21
tmp44 = tmp42 * tmp43
tmp45 = tmp41 + tmp44
tmp46 = tmp30 / tmp38
tmp47 = tmp11 / tmp21
tmp48 = tmp46 * tmp47
tmp49 = tmp45 + tmp48
tmp50 = tmp34 / tmp38
tmp51 = tmp16 / tmp21
tmp52 = tmp50 * tmp51
tmp53 = tmp49 + tmp52
tl.store(in_out_ptr0 + x2, tmp53, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (16, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (64, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4, 1), (4, 1, 256), torch.float32)
buf2 = reinterpret_tensor(buf0, (64, 4), (4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_sum_0[grid(256)](
buf2, primals_2, primals_3, primals_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
return reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0
), primals_1, primals_2, primals_3
def multi_perspective_expand_for_2D(in_tensor, decompose_params):
"""
Return: [batch_size, decompse_dim, dim]
"""
in_tensor = in_tensor.unsqueeze(1)
decompose_params = decompose_params.unsqueeze(0)
return torch.mul(in_tensor, decompose_params)
class AtteMatchLayNew(nn.Module):
def __init__(self, mp_dim, cont_dim):
super(AtteMatchLayNew, self).__init__()
self.cont_dim = cont_dim
self.mp_dim = mp_dim
self.register_parameter('weight', nn.Parameter(torch.Tensor(mp_dim,
cont_dim)))
self.weight.data.uniform_(-1.0, 1.0)
def forward(self, input_0, input_1):
primals_3 = self.weight
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
|
NeilWangziyu/torch_light
|
AtteMatchLay
| false
| 5,650
|
[
"MIT"
] | 1
|
daf8fd62f57885cf182f1b3edc3152156d229ef3
|
https://github.com/NeilWangziyu/torch_light/tree/daf8fd62f57885cf182f1b3edc3152156d229ef3
|
ChanNorm
|
import torch
from torch import nn
class ChanNorm(nn.Module):
def __init__(self, dim, eps=1e-05):
super().__init__()
self.eps = eps
self.g = nn.Parameter(torch.ones(1, dim, 1, 1))
self.b = nn.Parameter(torch.zeros(1, dim, 1, 1))
def forward(self, x):
std = torch.var(x, dim=1, unbiased=False, keepdim=True).sqrt()
mean = torch.mean(x, dim=1, keepdim=True)
return (x - mean) / (std + self.eps) * self.g + self.b
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tmp11 = tmp1 - tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp2 - tmp9
tmp14 = tmp13 * tmp13
tmp15 = tmp12 + tmp14
tmp16 = tmp4 - tmp9
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp6 - tmp9
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp21 / tmp8
tmp23 = libdevice.sqrt(tmp22)
tmp24 = 1e-05
tmp25 = tmp23 + tmp24
tmp26 = tmp10 / tmp25
tmp28 = tmp26 * tmp27
tmp30 = tmp28 + tmp29
tl.store(out_ptr0 + x3, tmp30, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0[grid(256)](primals_1,
primals_2, primals_3, buf0, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_2
del primals_3
return buf0, primals_1
class ChanNormNew(nn.Module):
def __init__(self, dim, eps=1e-05):
super().__init__()
self.eps = eps
self.g = nn.Parameter(torch.ones(1, dim, 1, 1))
self.b = nn.Parameter(torch.zeros(1, dim, 1, 1))
def forward(self, input_0):
primals_2 = self.g
primals_3 = self.b
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Netruk44/stylegan2-deepspeed
|
ChanNorm
| false
| 5,651
|
[
"MIT"
] | 1
|
d6efe64a2f8cdfa9477d2229652c5e1a2348d52d
|
https://github.com/Netruk44/stylegan2-deepspeed/tree/d6efe64a2f8cdfa9477d2229652c5e1a2348d52d
|
Out
|
import torch
import torch.nn as nn
class Out(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.conv = nn.Conv3d(in_channels, out_channels, kernel_size=1)
def forward(self, x):
return self.conv(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1, 1), (4, 1, 1, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1,
4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1,
1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(256)](buf1, primals_2, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_1, reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256,
64, 16, 4, 1), 0)
class OutNew(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.conv = nn.Conv3d(in_channels, out_channels, kernel_size=1)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Neuro-Vision/NeuroVision
|
Out
| false
| 5,652
|
[
"MIT"
] | 1
|
3da7bcc671b23693e979218e3acabb7098b77187
|
https://github.com/Neuro-Vision/NeuroVision/tree/3da7bcc671b23693e979218e3acabb7098b77187
|
Critic
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Critic(nn.Module):
def __init__(self, input_size):
super(Critic, self).__init__()
self.fc1 = nn.Linear(input_size, 200)
self.output = nn.Linear(200, 1)
def forward(self, x):
x = F.relu(self.fc1(x))
value = self.output(x)
return value
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 12800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 200
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (200, 4), (4, 1))
assert_size_stride(primals_2, (200,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 200), (200, 1))
assert_size_stride(primals_5, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 200), (200, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 200), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 200), (3200, 800, 200, 1), 0)
del buf0
buf4 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(12800)](buf1,
primals_2, buf4, 12800, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 200),
(200, 1), 0), reinterpret_tensor(primals_4, (200, 1), (1, 200),
0), alpha=1, beta=1, out=buf3)
del primals_5
return reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 200), (200, 1), 0), primals_4, buf4
class CriticNew(nn.Module):
def __init__(self, input_size):
super(CriticNew, self).__init__()
self.fc1 = nn.Linear(input_size, 200)
self.output = nn.Linear(200, 1)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.output.weight
primals_5 = self.output.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
NeuralFlux/rl-analysis
|
Critic
| false
| 5,653
|
[
"MIT"
] | 1
|
bb45e1f8bb9da4683cce4bd0a5e687770a4005e2
|
https://github.com/NeuralFlux/rl-analysis/tree/bb45e1f8bb9da4683cce4bd0a5e687770a4005e2
|
MockAccuracy
|
import torch
class _Metric(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input: 'torch.Tensor', target: 'torch.Tensor'):
raise NotImplementedError()
class Accuracy(_Metric):
def __init__(self):
super().__init__()
def forward(self, input: 'torch.Tensor', target: 'torch.Tensor'):
"""
:param input: [B, L]
:param target: [B, L]
:return:
"""
bool_acc = input.long() == target.long()
return bool_acc.sum() / bool_acc.numel()
class MockAccuracy(Accuracy):
def __init__(self):
super().__init__()
def forward(self, input: 'torch.Tensor', target: 'torch.Tensor'):
return super().forward(input, target)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused__to_copy_div_eq_sum_0(in_ptr0, in_ptr1, out_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp2 = tl.load(in_ptr1 + r0, None)
tmp1 = tmp0.to(tl.int64)
tmp3 = tmp2.to(tl.int64)
tmp4 = tmp1 == tmp3
tmp5 = tmp4.to(tl.int64)
tmp6 = tl.broadcast_to(tmp5, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = tmp8.to(tl.float32)
tmp10 = 0.00390625
tmp11 = tmp9 * tmp10
tl.store(out_ptr1 + tl.full([1], 0, tl.int32), tmp11, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused__to_copy_div_eq_sum_0[grid(1)](arg0_1, arg1_1,
buf1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class _Metric(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input: 'torch.Tensor', target: 'torch.Tensor'):
raise NotImplementedError()
class Accuracy(_Metric):
def __init__(self):
super().__init__()
def forward(self, input: 'torch.Tensor', target: 'torch.Tensor'):
"""
:param input: [B, L]
:param target: [B, L]
:return:
"""
bool_acc = input.long() == target.long()
return bool_acc.sum() / bool_acc.numel()
class MockAccuracyNew(Accuracy):
def __init__(self):
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
NestLakerJasonLIN/MusicTransformer-pytorch
|
MockAccuracy
| false
| 5,654
|
[
"MIT"
] | 1
|
5f183374833ff6b7e17f3a24e3594dedd93a5fe5
|
https://github.com/NestLakerJasonLIN/MusicTransformer-pytorch/tree/5f183374833ff6b7e17f3a24e3594dedd93a5fe5
|
ACNetwork
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class ACNetwork(nn.Module):
def __init__(self, input_size, action_size):
super(ACNetwork, self).__init__()
self.fc1 = nn.Linear(input_size, 256)
self.fc2 = nn.Linear(256, 256)
self.logits_p = nn.Linear(256, action_size)
self.v_values = nn.Linear(256, 1)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
return self.logits_p(x), self.v_values(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'action_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (256, 256), (256, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (4, 256), (256, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (1, 256), (256, 1))
assert_size_stride(primals_9, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf0
buf8 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf1,
primals_2, buf8, 16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0),
reinterpret_tensor(primals_4, (256, 256), (1, 256), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf2
buf7 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf3,
primals_5, buf7, 16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 256),
(256, 1), 0), reinterpret_tensor(primals_6, (256, 4), (1, 256),
0), alpha=1, beta=1, out=buf4)
del primals_7
buf6 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf3, (64, 256),
(256, 1), 0), reinterpret_tensor(primals_8, (256, 1), (1, 256),
0), alpha=1, beta=1, out=buf6)
del primals_9
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 256), (256, 1), 0
), reinterpret_tensor(buf3, (64, 256), (256, 1), 0
), primals_8, primals_6, buf7, primals_4, buf8
class ACNetworkNew(nn.Module):
def __init__(self, input_size, action_size):
super(ACNetworkNew, self).__init__()
self.fc1 = nn.Linear(input_size, 256)
self.fc2 = nn.Linear(256, 256)
self.logits_p = nn.Linear(256, action_size)
self.v_values = nn.Linear(256, 1)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.logits_p.weight
primals_7 = self.logits_p.bias
primals_8 = self.v_values.weight
primals_9 = self.v_values.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0], output[1]
|
NeuralFlux/rl-analysis
|
ACNetwork
| false
| 5,655
|
[
"MIT"
] | 1
|
bb45e1f8bb9da4683cce4bd0a5e687770a4005e2
|
https://github.com/NeuralFlux/rl-analysis/tree/bb45e1f8bb9da4683cce4bd0a5e687770a4005e2
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.