entry_point
stringlengths 1
65
| original_triton_python_code
stringlengths 208
619k
| optimised_triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
listlengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
|
|---|---|---|---|---|---|---|---|---|---|---|
DWT
|
import torch
import torch.nn as nn
import torch.fft
class DWT(nn.Module):
"""
2D Discrete Wavelet Transform as implemented in [1]_.
References
----------
.. [1] Liu, Pengju, et al. “Multi-Level Wavelet-CNN for Image Restoration.” ArXiv:1805.07071 [Cs], May 2018.
arXiv.org, http://arxiv.org/abs/1805.07071.
"""
def __init__(self):
super().__init__()
self.requires_grad = False
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
x01 = x[:, :, 0::2, :] / 2
x02 = x[:, :, 1::2, :] / 2
x1 = x01[:, :, :, 0::2]
x2 = x02[:, :, :, 0::2]
x3 = x01[:, :, :, 1::2]
x4 = x02[:, :, :, 1::2]
x_LL = x1 + x2 + x3 + x4
x_HL = -x1 - x2 + x3 + x4
x_LH = -x1 + x2 - x3 + x4
x_HH = x1 - x2 - x3 + x4
return torch.cat((x_LL, x_HL, x_LH, x_HH), 1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.fft
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 4 % 16
x0 = xindex % 2
x1 = xindex // 2 % 2
x3 = xindex // 64
x4 = xindex
tmp0 = x2
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (2 * x0 + 8 * x1 + 16 * x2 + 64 * x3), tmp4 &
xmask, eviction_policy='evict_last', other=0.0)
tmp6 = 0.5
tmp7 = tmp5 * tmp6
tmp8 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1 + 16 * x2 + 64 * x3),
tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp9 = tmp8 * tmp6
tmp10 = tmp7 + tmp9
tmp11 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1 + 16 * x2 + 64 * x3),
tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp11 * tmp6
tmp13 = tmp10 + tmp12
tmp14 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1 + 16 * x2 + 64 * x3),
tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tmp14 * tmp6
tmp16 = tmp13 + tmp15
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp4, tmp16, tmp17)
tmp19 = tmp0 >= tmp3
tmp20 = tl.full([1], 8, tl.int64)
tmp21 = tmp0 < tmp20
tmp22 = tmp19 & tmp21
tmp23 = tl.load(in_ptr0 + (2 * x0 + 8 * x1 + 16 * (-4 + x2) + 64 * x3),
tmp22 & xmask, eviction_policy='evict_last', other=0.0)
tmp24 = tmp23 * tmp6
tmp25 = -tmp24
tmp26 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1 + 16 * (-4 + x2) + 64 *
x3), tmp22 & xmask, eviction_policy='evict_last', other=0.0)
tmp27 = tmp26 * tmp6
tmp28 = tmp25 - tmp27
tmp29 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1 + 16 * (-4 + x2) + 64 *
x3), tmp22 & xmask, eviction_policy='evict_last', other=0.0)
tmp30 = tmp29 * tmp6
tmp31 = tmp28 + tmp30
tmp32 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1 + 16 * (-4 + x2) + 64 *
x3), tmp22 & xmask, eviction_policy='evict_last', other=0.0)
tmp33 = tmp32 * tmp6
tmp34 = tmp31 + tmp33
tmp35 = tl.full(tmp34.shape, 0.0, tmp34.dtype)
tmp36 = tl.where(tmp22, tmp34, tmp35)
tmp37 = tmp0 >= tmp20
tmp38 = tl.full([1], 12, tl.int64)
tmp39 = tmp0 < tmp38
tmp40 = tmp37 & tmp39
tmp41 = tl.load(in_ptr0 + (2 * x0 + 8 * x1 + 16 * (-8 + x2) + 64 * x3),
tmp40 & xmask, eviction_policy='evict_last', other=0.0)
tmp42 = tmp41 * tmp6
tmp43 = -tmp42
tmp44 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1 + 16 * (-8 + x2) + 64 *
x3), tmp40 & xmask, eviction_policy='evict_last', other=0.0)
tmp45 = tmp44 * tmp6
tmp46 = tmp43 + tmp45
tmp47 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1 + 16 * (-8 + x2) + 64 *
x3), tmp40 & xmask, eviction_policy='evict_last', other=0.0)
tmp48 = tmp47 * tmp6
tmp49 = tmp46 - tmp48
tmp50 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1 + 16 * (-8 + x2) + 64 *
x3), tmp40 & xmask, eviction_policy='evict_last', other=0.0)
tmp51 = tmp50 * tmp6
tmp52 = tmp49 + tmp51
tmp53 = tl.full(tmp52.shape, 0.0, tmp52.dtype)
tmp54 = tl.where(tmp40, tmp52, tmp53)
tmp55 = tmp0 >= tmp38
tl.full([1], 16, tl.int64)
tmp58 = tl.load(in_ptr0 + (2 * x0 + 8 * x1 + 16 * (-12 + x2) + 64 * x3),
tmp55 & xmask, eviction_policy='evict_last', other=0.0)
tmp59 = tmp58 * tmp6
tmp60 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1 + 16 * (-12 + x2) + 64 *
x3), tmp55 & xmask, eviction_policy='evict_last', other=0.0)
tmp61 = tmp60 * tmp6
tmp62 = tmp59 - tmp61
tmp63 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1 + 16 * (-12 + x2) + 64 *
x3), tmp55 & xmask, eviction_policy='evict_last', other=0.0)
tmp64 = tmp63 * tmp6
tmp65 = tmp62 - tmp64
tmp66 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1 + 16 * (-12 + x2) + 64 *
x3), tmp55 & xmask, eviction_policy='evict_last', other=0.0)
tmp67 = tmp66 * tmp6
tmp68 = tmp65 + tmp67
tmp69 = tl.full(tmp68.shape, 0.0, tmp68.dtype)
tmp70 = tl.where(tmp55, tmp68, tmp69)
tmp71 = tl.where(tmp40, tmp54, tmp70)
tmp72 = tl.where(tmp22, tmp36, tmp71)
tmp73 = tl.where(tmp4, tmp18, tmp72)
tl.store(out_ptr0 + x4, tmp73, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 16, 2, 2), (64, 4, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class DWTNew(nn.Module):
"""
2D Discrete Wavelet Transform as implemented in [1]_.
References
----------
.. [1] Liu, Pengju, et al. “Multi-Level Wavelet-CNN for Image Restoration.” ArXiv:1805.07071 [Cs], May 2018.
arXiv.org, http://arxiv.org/abs/1805.07071.
"""
def __init__(self):
super().__init__()
self.requires_grad = False
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
directgroup/direct
|
DWT
| false
| 15,187
|
[
"Apache-2.0"
] | 55
|
78cdd530b3c93e31c11d8963880e6329f0989243
|
https://github.com/directgroup/direct/tree/78cdd530b3c93e31c11d8963880e6329f0989243
|
CReLU_IN
|
import torch
import torch.nn.functional as F
import torch.nn as nn
class CReLU_IN(nn.Module):
def __init__(self, channels):
super(CReLU_IN, self).__init__()
self.bn = nn.InstanceNorm2d(channels * 2, eps=1e-05, momentum=0.1,
affine=True)
def forward(self, x):
cat = torch.cat((x, -x), 1)
x = self.bn(cat)
return F.leaky_relu(x, 0.01, inplace=True)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__native_batch_norm_legit_cat_leaky_relu_leaky_relu_backward_0(
in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3,
out_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 32
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
x0 = xindex % 8
r2 = rindex
x1 = xindex // 8
x3 = xindex
tmp37 = tl.load(in_ptr1 + x3 % 8, xmask, eviction_policy='evict_last')
tmp39 = tl.load(in_ptr2 + x3 % 8, xmask, eviction_policy='evict_last')
tmp0 = x0
tl.full([1, 1], 0, tl.int64)
tmp3 = tl.full([1, 1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (r2 + 16 * x0 + 64 * x1), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1, 1], 8, tl.int64)
tmp9 = tl.load(in_ptr0 + (r2 + 16 * (-4 + x0) + 64 * x1), tmp6 & xmask,
other=0.0)
tmp10 = -tmp9
tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype)
tmp12 = tl.where(tmp6, tmp10, tmp11)
tmp13 = tl.where(tmp4, tmp5, tmp12)
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tl.where(xmask, tmp14, 0)
tmp17 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp19 = tl.where(xmask, tmp17, 0)
tmp20 = tl.sum(tmp19, 1)[:, None]
tmp21 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp22 = tmp21.to(tl.float32)
tmp23 = tmp20 / tmp22
tmp24 = tmp14 - tmp23
tmp25 = tmp24 * tmp24
tmp26 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK])
tmp28 = tl.where(xmask, tmp26, 0)
tmp29 = tl.sum(tmp28, 1)[:, None]
tmp30 = tmp13 - tmp23
tmp31 = 16.0
tmp32 = tmp29 / tmp31
tmp33 = 1e-05
tmp34 = tmp32 + tmp33
tmp35 = libdevice.rsqrt(tmp34)
tmp36 = tmp30 * tmp35
tmp38 = tmp36 * tmp37
tmp40 = tmp38 + tmp39
tmp41 = 0.0
tmp42 = tmp40 > tmp41
tmp43 = 0.01
tmp44 = tmp40 * tmp43
tmp45 = tl.where(tmp42, tmp40, tmp44)
tmp46 = tmp45 > tmp41
tl.store(out_ptr0 + (r2 + 16 * x3), tmp13, xmask)
tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp45, xmask)
tl.store(out_ptr3 + (r2 + 16 * x3), tmp46, xmask)
tl.store(out_ptr4 + x3, tmp35, xmask)
tl.store(out_ptr1 + x3, tmp23, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (8,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.float32
)
buf5 = empty_strided_cuda((1, 32, 4, 4), (512, 16, 4, 1), torch.float32
)
buf6 = reinterpret_tensor(buf5, (4, 8, 4, 4), (128, 16, 4, 1), 0)
del buf5
buf7 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.bool)
buf4 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.float32
)
get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_cat_leaky_relu_leaky_relu_backward_0[
grid(32)](buf6, primals_1, primals_2, primals_3, buf0, buf1,
buf7, buf4, 32, 16, XBLOCK=32, num_warps=4, num_stages=1)
del primals_1
del primals_2
del primals_3
return buf6, buf0, reinterpret_tensor(buf4, (32,), (1,), 0
), buf7, reinterpret_tensor(buf1, (1, 32, 1, 1), (32, 1, 1, 1), 0)
class CReLU_INNew(nn.Module):
def __init__(self, channels):
super(CReLU_INNew, self).__init__()
self.bn = nn.InstanceNorm2d(channels * 2, eps=1e-05, momentum=0.1,
affine=True)
def forward(self, input_0):
primals_2 = self.bn.weight
primals_3 = self.bn.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
dipikakhullar/ocr
|
CReLU_IN
| false
| 15,188
|
[
"MIT"
] | 284
|
a55e70d82f42803be5ed63f8f59e4fa597fcf8d6
|
https://github.com/dipikakhullar/ocr/tree/a55e70d82f42803be5ed63f8f59e4fa597fcf8d6
|
BinaryCrossEntropyLabelSmooth
|
import torch
class BinaryCrossEntropyLabelSmooth(torch.nn.BCEWithLogitsLoss):
def __init__(self, num_classes, epsilon=0.1, weight=None, size_average=
None, reduce=None, reduction='mean', pos_weight=None):
super(BinaryCrossEntropyLabelSmooth, self).__init__(weight,
size_average, reduce, reduction, pos_weight)
self.num_classes = num_classes
self.epsilon = epsilon
def forward(self, input, target):
target = (1 - self.epsilon) * target + self.epsilon
return super(BinaryCrossEntropyLabelSmooth, self).forward(input, target
)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_classes': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_binary_cross_entropy_with_logits_mul_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp7 = tl.load(in_ptr1 + r0, None)
tmp1 = 0.9
tmp2 = tmp0 * tmp1
tmp3 = 0.1
tmp4 = tmp2 + tmp3
tmp5 = 1.0
tmp6 = tmp5 - tmp4
tmp8 = tmp6 * tmp7
tmp9 = 0.0
tmp10 = triton_helpers.minimum(tmp9, tmp7)
tmp11 = tl_math.abs(tmp7)
tmp12 = -tmp11
tmp13 = tl_math.exp(tmp12)
tmp14 = libdevice.log1p(tmp13)
tmp15 = tmp10 - tmp14
tmp16 = tmp8 - tmp15
tmp17 = tl.broadcast_to(tmp16, [RBLOCK])
tmp19 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0))
tmp20 = 256.0
tmp21 = tmp19 / tmp20
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp21, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_binary_cross_entropy_with_logits_mul_0[grid(1)](
buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class BinaryCrossEntropyLabelSmoothNew(torch.nn.BCEWithLogitsLoss):
def __init__(self, num_classes, epsilon=0.1, weight=None, size_average=
None, reduce=None, reduction='mean', pos_weight=None):
super(BinaryCrossEntropyLabelSmoothNew, self).__init__(weight,
size_average, reduce, reduction, pos_weight)
self.num_classes = num_classes
self.epsilon = epsilon
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
dianjixz/AutoDL
|
BinaryCrossEntropyLabelSmooth
| false
| 15,189
|
[
"Apache-2.0"
] | 1,044
|
48db4eb04d55ce69e93d4a3bdc24592bdb34a868
|
https://github.com/dianjixz/AutoDL/tree/48db4eb04d55ce69e93d4a3bdc24592bdb34a868
|
ProteinResNetPooler
|
from _paritybench_helpers import _mock_config
import torch
from torch import nn
class ProteinResNetPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.attention_weights = nn.Linear(config.hidden_size, 1)
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states, mask=None):
attention_scores = self.attention_weights(hidden_states)
if mask is not None:
attention_scores += -10000.0 * (1 - mask)
attention_weights = torch.softmax(attention_scores, -1)
weighted_mean_embedding = torch.matmul(hidden_states.transpose(1, 2
), attention_weights).squeeze(2)
pooled_output = self.dense(weighted_mean_embedding)
pooled_output = self.activation(pooled_output)
return pooled_output
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0 - tmp0
tmp2 = tl_math.exp(tmp1)
tmp3 = tmp2 / tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (1, 4), (4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_1
del primals_2
buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(16)](buf1, buf2, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_3, (4, 4, 4), (16, 1,
4), 0), buf2, out=buf3)
buf4 = reinterpret_tensor(buf2, (4, 4), (4, 1), 0)
del buf2
extern_kernels.mm(reinterpret_tensor(buf3, (4, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf4)
buf5 = buf4
del buf4
triton_poi_fused_tanh_1[grid(16)](buf5, primals_5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_5
return buf5, primals_3, buf1, reinterpret_tensor(buf3, (4, 4), (4, 1), 0
), buf5, primals_4
class ProteinResNetPoolerNew(nn.Module):
def __init__(self, config):
super().__init__()
self.attention_weights = nn.Linear(config.hidden_size, 1)
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, input_0):
primals_1 = self.attention_weights.weight
primals_2 = self.attention_weights.bias
primals_4 = self.dense.weight
primals_5 = self.dense.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
IC-hub/ProteinLM
|
ProteinResNetPooler
| false
| 15,190
|
[
"Apache-2.0"
] | 59
|
58fbf1f674569cf814becf32f71dd0d8f0c592fa
|
https://github.com/IC-hub/ProteinLM/tree/58fbf1f674569cf814becf32f71dd0d8f0c592fa
|
DiceLoss
|
import torch
import torch.nn as nn
class DiceLoss(nn.Module):
def __init__(self, loss_weight=1.0):
super(DiceLoss, self).__init__()
self.loss_weight = loss_weight
def forward(self, input, target, mask, reduce=True):
batch_size = input.size(0)
input = torch.sigmoid(input)
input = input.contiguous().view(batch_size, -1)
target = target.contiguous().view(batch_size, -1).float()
mask = mask.contiguous().view(batch_size, -1).float()
input = input * mask
target = target * mask
a = torch.sum(input * target, dim=1)
b = torch.sum(input * input, dim=1) + 0.001
c = torch.sum(target * target, dim=1) + 0.001
d = 2 * a / (b + c)
loss = 1 - d
loss = self.loss_weight * loss
if reduce:
loss = torch.mean(loss)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0)
tmp4 = tl.load(in_ptr2 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp5 = tmp4 * tmp2
tmp6 = tmp3 * tmp5
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp3 * tmp3
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.where(xmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tmp16 = tmp5 * tmp5
tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK])
tmp19 = tl.where(xmask, tmp17, 0)
tmp20 = tl.sum(tmp19, 1)[:, None]
tl.store(out_ptr0 + x0, tmp10, xmask)
tl.store(out_ptr1 + x0, tmp15, xmask)
tl.store(out_ptr2 + x0, tmp20, xmask)
@triton.jit
def triton_per_fused_add_div_mean_mul_rsub_1(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp6 = tl.load(in_ptr2 + r0, None)
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp4 = 0.001
tmp5 = tmp3 + tmp4
tmp7 = tmp6 + tmp4
tmp8 = tmp5 + tmp7
tmp9 = tmp2 / tmp8
tmp10 = 1.0
tmp11 = tmp10 - tmp9
tmp12 = tmp11 * tmp10
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.sum(tmp13, 1)[:, None]
tmp16 = 4.0
tmp17 = tmp15 / tmp16
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp17, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
buf2 = empty_strided_cuda((4,), (1,), torch.float32)
get_raw_stream(0)
triton_per_fused_mul_sum_0[grid(4)](arg0_1, arg2_1, arg1_1, buf0,
buf1, buf2, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3
del buf3
triton_per_fused_add_div_mean_mul_rsub_1[grid(1)](buf4, buf0, buf1,
buf2, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf1
del buf2
return buf4,
class DiceLossNew(nn.Module):
def __init__(self, loss_weight=1.0):
super(DiceLossNew, self).__init__()
self.loss_weight = loss_weight
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
doem97/PSENet
|
DiceLoss
| false
| 15,192
|
[
"Apache-2.0"
] | 1,213
|
4d95395658662f2223805c36dcd573d9e190ce26
|
https://github.com/doem97/PSENet/tree/4d95395658662f2223805c36dcd573d9e190ce26
|
Net
|
import torch
import torch.nn as nn
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(4, 64)
self.fc2 = nn.Linear(64, 64)
self.fc3 = nn.Linear(64, 2)
def forward(self, x):
x = torch.tanh(self.fc1(x))
x = torch.tanh(self.fc2(x))
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (64, 4), (4, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (64, 64), (64, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (2, 64), (64, 1))
assert_size_stride(primals_7, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(4096)](buf1, primals_2, 4096, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0),
reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf2
triton_poi_fused_tanh_0[grid(4096)](buf3, primals_5, 4096, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64),
(64, 1), 0), reinterpret_tensor(primals_6, (64, 2), (1, 64), 0),
alpha=1, beta=1, out=buf4)
del primals_7
return reinterpret_tensor(buf4, (4, 4, 4, 2), (32, 8, 2, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, buf3, primals_6, primals_4
class NetNew(nn.Module):
def __init__(self):
super(NetNew, self).__init__()
self.fc1 = nn.Linear(4, 64)
self.fc2 = nn.Linear(64, 64)
self.fc3 = nn.Linear(64, 2)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
dongminlee94/Samsung-DRL-Code
|
Net
| false
| 15,193
|
[
"MIT"
] | 116
|
c96f8739a09cfd708c265954ee8ecf0ea3b67395
|
https://github.com/dongminlee94/Samsung-DRL-Code/tree/c96f8739a09cfd708c265954ee8ecf0ea3b67395
|
MNISTClassifier
|
import torch
import torchvision
import torchvision.ops
from torch import nn
class DeformableConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1,
padding=1, bias=False):
super(DeformableConv2d, self).__init__()
assert type(kernel_size) == tuple or type(kernel_size) == int
kernel_size = kernel_size if type(kernel_size) == tuple else (
kernel_size, kernel_size)
self.stride = stride if type(stride) == tuple else (stride, stride)
self.padding = padding
self.offset_conv = nn.Conv2d(in_channels, 2 * kernel_size[0] *
kernel_size[1], kernel_size=kernel_size, stride=stride, padding
=self.padding, bias=True)
nn.init.constant_(self.offset_conv.weight, 0.0)
nn.init.constant_(self.offset_conv.bias, 0.0)
self.modulator_conv = nn.Conv2d(in_channels, 1 * kernel_size[0] *
kernel_size[1], kernel_size=kernel_size, stride=stride, padding
=self.padding, bias=True)
nn.init.constant_(self.modulator_conv.weight, 0.0)
nn.init.constant_(self.modulator_conv.bias, 0.0)
self.regular_conv = nn.Conv2d(in_channels=in_channels, out_channels
=out_channels, kernel_size=kernel_size, stride=stride, padding=
self.padding, bias=bias)
def forward(self, x):
offset = self.offset_conv(x)
modulator = 2.0 * torch.sigmoid(self.modulator_conv(x))
x = torchvision.ops.deform_conv2d(input=x, offset=offset, weight=
self.regular_conv.weight, bias=self.regular_conv.bias, padding=
self.padding, mask=modulator, stride=self.stride)
return x
class MNISTClassifier(nn.Module):
def __init__(self, deformable=False):
super(MNISTClassifier, self).__init__()
self.conv1 = nn.Conv2d(1, 32, kernel_size=3, stride=1, padding=1,
bias=True)
self.conv2 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1,
bias=True)
self.conv3 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1,
bias=True)
conv = nn.Conv2d if deformable is False else DeformableConv2d
self.conv4 = conv(32, 32, kernel_size=3, stride=1, padding=1, bias=True
)
self.conv5 = conv(32, 32, kernel_size=3, stride=1, padding=1, bias=True
)
self.pool = nn.MaxPool2d(2)
self.gap = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(32, 10)
def forward(self, x):
x = torch.relu(self.conv1(x))
x = self.pool(x)
x = torch.relu(self.conv2(x))
x = self.pool(x)
x = torch.relu(self.conv3(x))
x = torch.relu(self.conv4(x))
x = torch.relu(self.conv5(x))
x = self.gap(x)
x = x.flatten(start_dim=1)
x = self.fc(x)
return x
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torchvision
import torchvision.ops
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_per_fused_convolution_mean_relu_threshold_backward_5(in_out_ptr0,
in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_ptr0 + (r2 + 256 * x3), None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = tl.broadcast_to(tmp4, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = 0.0
tmp9 = tmp4 <= tmp8
tmp10 = 256.0
tmp11 = tmp7 / tmp10
tl.store(out_ptr0 + (r2 + 256 * x3), tmp9, None)
tl.debug_barrier()
tl.store(in_out_ptr0 + x3, tmp11, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (32, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_7, (32,), (1,))
assert_size_stride(primals_8, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_9, (32,), (1,))
assert_size_stride(primals_10, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_11, (32,), (1,))
assert_size_stride(primals_12, (10, 32), (32, 1))
assert_size_stride(primals_13, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(524288)](buf1, primals_2,
524288, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1),
torch.float32)
buf3 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(131072)](buf1, buf2,
buf3, 131072, XBLOCK=512, num_warps=8, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(131072)](buf5, primals_5,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1),
torch.float32)
buf7 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_3[grid(32768)](buf5, buf6,
buf7, 32768, XBLOCK=128, num_warps=4, num_stages=1)
buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 32, 16, 16), (8192, 256, 16, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_relu_4[grid(32768)](buf9, primals_7,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf10 = extern_kernels.convolution(buf9, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 32, 16, 16), (8192, 256, 16, 1))
buf11 = buf10
del buf10
triton_poi_fused_convolution_relu_4[grid(32768)](buf11, primals_9,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf12 = extern_kernels.convolution(buf11, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 32, 16, 16), (8192, 256, 16, 1))
buf13 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf16 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1),
torch.bool)
buf14 = buf13
del buf13
triton_per_fused_convolution_mean_relu_threshold_backward_5[grid(128)](
buf14, buf12, primals_11, buf16, 128, 256, num_warps=2,
num_stages=1)
del buf12
del primals_11
buf15 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_13, reinterpret_tensor(buf14, (4, 32),
(32, 1), 0), reinterpret_tensor(primals_12, (32, 10), (1, 32),
0), alpha=1, beta=1, out=buf15)
del primals_13
return (buf15, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, buf1, buf2, buf3, buf5, buf6, buf7, buf9, buf11,
reinterpret_tensor(buf14, (4, 32), (32, 1), 0), primals_12, buf16)
class DeformableConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1,
padding=1, bias=False):
super(DeformableConv2d, self).__init__()
assert type(kernel_size) == tuple or type(kernel_size) == int
kernel_size = kernel_size if type(kernel_size) == tuple else (
kernel_size, kernel_size)
self.stride = stride if type(stride) == tuple else (stride, stride)
self.padding = padding
self.offset_conv = nn.Conv2d(in_channels, 2 * kernel_size[0] *
kernel_size[1], kernel_size=kernel_size, stride=stride, padding
=self.padding, bias=True)
nn.init.constant_(self.offset_conv.weight, 0.0)
nn.init.constant_(self.offset_conv.bias, 0.0)
self.modulator_conv = nn.Conv2d(in_channels, 1 * kernel_size[0] *
kernel_size[1], kernel_size=kernel_size, stride=stride, padding
=self.padding, bias=True)
nn.init.constant_(self.modulator_conv.weight, 0.0)
nn.init.constant_(self.modulator_conv.bias, 0.0)
self.regular_conv = nn.Conv2d(in_channels=in_channels, out_channels
=out_channels, kernel_size=kernel_size, stride=stride, padding=
self.padding, bias=bias)
def forward(self, x):
offset = self.offset_conv(x)
modulator = 2.0 * torch.sigmoid(self.modulator_conv(x))
x = torchvision.ops.deform_conv2d(input=x, offset=offset, weight=
self.regular_conv.weight, bias=self.regular_conv.bias, padding=
self.padding, mask=modulator, stride=self.stride)
return x
class MNISTClassifierNew(nn.Module):
def __init__(self, deformable=False):
super(MNISTClassifierNew, self).__init__()
self.conv1 = nn.Conv2d(1, 32, kernel_size=3, stride=1, padding=1,
bias=True)
self.conv2 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1,
bias=True)
self.conv3 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1,
bias=True)
conv = nn.Conv2d if deformable is False else DeformableConv2d
self.conv4 = conv(32, 32, kernel_size=3, stride=1, padding=1, bias=True
)
self.conv5 = conv(32, 32, kernel_size=3, stride=1, padding=1, bias=True
)
self.pool = nn.MaxPool2d(2)
self.gap = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(32, 10)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.conv5.weight
primals_11 = self.conv5.bias
primals_12 = self.fc.weight
primals_13 = self.fc.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
|
developer0hye/PyTorch-Deformable-Convolution-v2
|
MNISTClassifier
| false
| 15,194
|
[
"MIT"
] | 70
|
3ed601fa70ee111278b95b134caf29e085642bc2
|
https://github.com/developer0hye/PyTorch-Deformable-Convolution-v2/tree/3ed601fa70ee111278b95b134caf29e085642bc2
|
Net
|
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import Parameter
class Conv1dExt(nn.Conv1d):
def __init__(self, *args, **kwargs):
super(Conv1dExt, self).__init__(*args, **kwargs)
self.init_ncc()
self.input_tied_modules = []
self.output_tied_modules = []
def init_ncc(self):
w = self.weight.view(self.weight.size(0), -1)
mean = torch.mean(w, dim=1).unsqueeze(1)
self.t0_factor = w - mean
self.t0_norm = torch.norm(w, p=2, dim=1)
self.start_ncc = Variable(torch.zeros(self.out_channels))
self.start_ncc = self.normalized_cross_correlation()
def normalized_cross_correlation(self):
w = self.weight.view(self.weight.size(0), -1)
t_norm = torch.norm(w, p=2, dim=1)
if self.in_channels == 1 & sum(self.kernel_size) == 1:
ncc = w.squeeze() / torch.norm(self.t0_norm, p=2)
ncc = ncc - self.start_ncc
return ncc
mean = torch.mean(w, dim=1).unsqueeze(1)
t_factor = w - mean
h_product = self.t0_factor * t_factor
cov = torch.sum(h_product, dim=1)
denom = self.t0_norm * t_norm
ncc = cov / denom
ncc = ncc - self.start_ncc
return ncc
def split_output_channel(self, channel_i):
"""Split one output channel (a feature) into two, but retain summed value
Args:
channel_i: (int) number of channel to be split. the ith channel
"""
self.out_channels += 1
orig_weight = self.weight.data
split_pos = 2 * torch.rand(self.in_channels, self.kernel_size[0])
new_weight = torch.zeros(self.out_channels, self.in_channels, self.
kernel_size[0])
if channel_i > 0:
new_weight[:channel_i, :, :] = orig_weight[:channel_i, :, :]
new_weight[channel_i, :, :] = orig_weight[channel_i, :, :] * split_pos
new_weight[channel_i + 1, :, :] = orig_weight[channel_i, :, :] * (2 -
split_pos)
if channel_i + 2 < self.out_channels:
new_weight[channel_i + 2, :, :] = orig_weight[channel_i + 1, :, :]
if self.bias is not None:
orig_bias = self.bias.data
new_bias = torch.zeros(self.out_channels)
new_bias[:channel_i + 1] = orig_bias[:channel_i + 1]
new_bias[channel_i + 1:] = orig_bias[channel_i:]
self.bias = Parameter(new_bias)
self.weight = Parameter(new_weight)
self.init_ncc()
def split_input_channel(self, channel_i):
if channel_i > self.in_channels:
None
return
self.in_channels += 1
orig_weight = self.weight.data
dup_slice = orig_weight[:, channel_i, :] * 0.5
new_weight = torch.zeros(self.out_channels, self.in_channels, self.
kernel_size[0])
if channel_i > 0:
new_weight[:, :channel_i, :] = orig_weight[:, :channel_i, :]
new_weight[:, channel_i, :] = dup_slice
new_weight[:, channel_i + 1, :] = dup_slice
if channel_i + 1 < self.in_channels:
new_weight[:, channel_i + 2, :] = orig_weight[:, channel_i + 1, :]
self.weight = Parameter(new_weight)
self.init_ncc()
def split_feature(self, feature_i):
"""Splits feature in output and input channels
Args:
feature_i: (int)
"""
self.split_output_channel(channel_i=feature_i)
for dep in self.input_tied_modules:
dep.split_input_channel(channel_i=feature_i)
for dep in self.output_tied_modules:
dep.split_output_channel(channel_i=feature_i)
def split_features(self, threshold):
"""Decides which features to split if they are below a specific threshold
Args:
threshold: (float?) less than 1.
"""
ncc = self.normalized_cross_correlation()
for i, ncc_val in enumerate(ncc):
if ncc_val < threshold:
None
self.split_feature(i)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = Conv1dExt(in_channels=1, out_channels=4, kernel_size=1,
bias=False)
self.conv2 = Conv1dExt(in_channels=1, out_channels=4, kernel_size=1,
bias=False)
self.conv3 = Conv1dExt(in_channels=4, out_channels=4, kernel_size=1,
bias=False)
self.conv4 = Conv1dExt(in_channels=4, out_channels=2, kernel_size=1,
bias=True)
self.conv1.input_tied_modules = [self.conv3]
self.conv1.output_tied_modules = [self.conv2]
self.conv2.input_tied_modules = [self.conv3]
self.conv2.output_tied_modules = [self.conv1]
self.conv3.input_tied_modules = [self.conv4]
def forward(self, x):
x1 = self.conv1(x)
x2 = self.conv2(x)
x = nn.functional.relu(x1 + x2)
x = nn.functional.relu(self.conv3(x))
x = nn.functional.relu(self.conv4(x))
return x
def get_inputs():
return [torch.rand([4, 1, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_2(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 64 % 2
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr0 + x3, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 1), (1, 1, 1))
assert_size_stride(primals_2, (4, 1, 64), (64, 64, 1))
assert_size_stride(primals_3, (4, 1, 1), (1, 1, 1))
assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_5, (2, 4, 1), (4, 1, 1))
assert_size_stride(primals_6, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 64), (256, 64, 1))
buf1 = extern_kernels.convolution(primals_2, primals_3, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 64), (256, 64, 1))
buf2 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_add_relu_0[grid(1024)](buf2, buf1, 1024, XBLOCK=
128, num_warps=4, num_stages=1)
del buf1
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 64), (256, 64, 1))
buf4 = buf3
del buf3
triton_poi_fused_relu_1[grid(1024)](buf4, 1024, XBLOCK=128,
num_warps=4, num_stages=1)
buf5 = extern_kernels.convolution(buf4, primals_5, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf5, (4, 2, 64), (128, 64, 1))
buf6 = buf5
del buf5
buf7 = empty_strided_cuda((4, 2, 64), (128, 64, 1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_2[grid(512)](buf6,
primals_6, buf7, 512, XBLOCK=256, num_warps=4, num_stages=1)
del primals_6
return (buf6, primals_1, primals_2, primals_3, primals_4, primals_5,
buf2, buf4, buf7)
class Conv1dExt(nn.Conv1d):
def __init__(self, *args, **kwargs):
super(Conv1dExt, self).__init__(*args, **kwargs)
self.init_ncc()
self.input_tied_modules = []
self.output_tied_modules = []
def init_ncc(self):
w = self.weight.view(self.weight.size(0), -1)
mean = torch.mean(w, dim=1).unsqueeze(1)
self.t0_factor = w - mean
self.t0_norm = torch.norm(w, p=2, dim=1)
self.start_ncc = Variable(torch.zeros(self.out_channels))
self.start_ncc = self.normalized_cross_correlation()
def normalized_cross_correlation(self):
w = self.weight.view(self.weight.size(0), -1)
t_norm = torch.norm(w, p=2, dim=1)
if self.in_channels == 1 & sum(self.kernel_size) == 1:
ncc = w.squeeze() / torch.norm(self.t0_norm, p=2)
ncc = ncc - self.start_ncc
return ncc
mean = torch.mean(w, dim=1).unsqueeze(1)
t_factor = w - mean
h_product = self.t0_factor * t_factor
cov = torch.sum(h_product, dim=1)
denom = self.t0_norm * t_norm
ncc = cov / denom
ncc = ncc - self.start_ncc
return ncc
def split_output_channel(self, channel_i):
"""Split one output channel (a feature) into two, but retain summed value
Args:
channel_i: (int) number of channel to be split. the ith channel
"""
self.out_channels += 1
orig_weight = self.weight.data
split_pos = 2 * torch.rand(self.in_channels, self.kernel_size[0])
new_weight = torch.zeros(self.out_channels, self.in_channels, self.
kernel_size[0])
if channel_i > 0:
new_weight[:channel_i, :, :] = orig_weight[:channel_i, :, :]
new_weight[channel_i, :, :] = orig_weight[channel_i, :, :] * split_pos
new_weight[channel_i + 1, :, :] = orig_weight[channel_i, :, :] * (2 -
split_pos)
if channel_i + 2 < self.out_channels:
new_weight[channel_i + 2, :, :] = orig_weight[channel_i + 1, :, :]
if self.bias is not None:
orig_bias = self.bias.data
new_bias = torch.zeros(self.out_channels)
new_bias[:channel_i + 1] = orig_bias[:channel_i + 1]
new_bias[channel_i + 1:] = orig_bias[channel_i:]
self.bias = Parameter(new_bias)
self.weight = Parameter(new_weight)
self.init_ncc()
def split_input_channel(self, channel_i):
if channel_i > self.in_channels:
None
return
self.in_channels += 1
orig_weight = self.weight.data
dup_slice = orig_weight[:, channel_i, :] * 0.5
new_weight = torch.zeros(self.out_channels, self.in_channels, self.
kernel_size[0])
if channel_i > 0:
new_weight[:, :channel_i, :] = orig_weight[:, :channel_i, :]
new_weight[:, channel_i, :] = dup_slice
new_weight[:, channel_i + 1, :] = dup_slice
if channel_i + 1 < self.in_channels:
new_weight[:, channel_i + 2, :] = orig_weight[:, channel_i + 1, :]
self.weight = Parameter(new_weight)
self.init_ncc()
def split_feature(self, feature_i):
"""Splits feature in output and input channels
Args:
feature_i: (int)
"""
self.split_output_channel(channel_i=feature_i)
for dep in self.input_tied_modules:
dep.split_input_channel(channel_i=feature_i)
for dep in self.output_tied_modules:
dep.split_output_channel(channel_i=feature_i)
def split_features(self, threshold):
"""Decides which features to split if they are below a specific threshold
Args:
threshold: (float?) less than 1.
"""
ncc = self.normalized_cross_correlation()
for i, ncc_val in enumerate(ncc):
if ncc_val < threshold:
None
self.split_feature(i)
class NetNew(nn.Module):
def __init__(self):
super(NetNew, self).__init__()
self.conv1 = Conv1dExt(in_channels=1, out_channels=4, kernel_size=1,
bias=False)
self.conv2 = Conv1dExt(in_channels=1, out_channels=4, kernel_size=1,
bias=False)
self.conv3 = Conv1dExt(in_channels=4, out_channels=4, kernel_size=1,
bias=False)
self.conv4 = Conv1dExt(in_channels=4, out_channels=2, kernel_size=1,
bias=True)
self.conv1.input_tied_modules = [self.conv3]
self.conv1.output_tied_modules = [self.conv2]
self.conv2.input_tied_modules = [self.conv3]
self.conv2.output_tied_modules = [self.conv1]
self.conv3.input_tied_modules = [self.conv4]
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_3 = self.conv2.weight
primals_4 = self.conv3.weight
primals_5 = self.conv4.weight
primals_6 = self.conv4.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
dhpollack/fast-wavenet.pytorch
|
Net
| false
| 15,195
|
[
"MIT"
] | 98
|
853f6ecb1e8d23a5c01fc2455640c6637d30f2f9
|
https://github.com/dhpollack/fast-wavenet.pytorch/tree/853f6ecb1e8d23a5c01fc2455640c6637d30f2f9
|
ReduceBranch
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class ReduceBranch(nn.Module):
def __init__(self, planes, stride=2):
super(ReduceBranch, self).__init__()
self.conv1 = nn.Conv2d(planes, planes, kernel_size=1, stride=1,
padding=0, bias=False)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=1, stride=1,
padding=0, bias=False)
self.avg_pool = nn.AvgPool2d(kernel_size=1, stride=stride, padding=0)
def forward(self, x):
out1 = self.conv1(self.avg_pool(x))
shift_x = x[:, :, 1:, 1:]
shift_x = F.pad(shift_x, (0, 1, 0, 1))
out2 = self.conv2(self.avg_pool(shift_x))
out = torch.cat([out1, out2], dim=1)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'planes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_avg_pool2d_constant_pad_nd_1(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 2 % 2
x0 = xindex % 2
x3 = xindex // 2
x4 = xindex
tmp0 = 2 * x1
tmp1 = tl.full([1], 3, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = 2 * x0
tmp4 = tmp3 < tmp1
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x3), tmp5 & xmask,
eviction_policy='evict_last', other=0.0)
tmp7 = 1.0
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x4, tmp8, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 8
x0 = xindex % 4
x2 = xindex // 32
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 4 * (-4 + x1) + 16 * x2), tmp6 & xmask,
other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x3, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_avg_pool2d_0[grid(64)](primals_1, buf0, 64, XBLOCK
=64, num_warps=1, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 2, 2), (16, 4, 2, 1))
buf2 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
triton_poi_fused_avg_pool2d_constant_pad_nd_1[grid(64)](primals_1,
buf2, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_1
buf3 = extern_kernels.convolution(buf2, primals_3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 2, 2), (16, 4, 2, 1))
buf4 = empty_strided_cuda((4, 8, 2, 2), (32, 4, 2, 1), torch.float32)
triton_poi_fused_cat_2[grid(128)](buf1, buf3, buf4, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del buf1
del buf3
return buf4, primals_2, primals_3, buf0, buf2
class ReduceBranchNew(nn.Module):
def __init__(self, planes, stride=2):
super(ReduceBranchNew, self).__init__()
self.conv1 = nn.Conv2d(planes, planes, kernel_size=1, stride=1,
padding=0, bias=False)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=1, stride=1,
padding=0, bias=False)
self.avg_pool = nn.AvgPool2d(kernel_size=1, stride=stride, padding=0)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv2.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
dnddnjs/pytorch-vision
|
ReduceBranch
| false
| 15,196
|
[
"MIT"
] | 48
|
d432b467774f838bef37372d6cff3576c6559803
|
https://github.com/dnddnjs/pytorch-vision/tree/d432b467774f838bef37372d6cff3576c6559803
|
InstanceNorm
|
import torch
import torch.utils.data
import torch.nn as nn
from torch.nn.parameter import Parameter
class InstanceNorm(nn.Module):
def __init__(self, num_features, affine=True, eps=1e-05):
"""`num_features` number of feature channels
"""
super(InstanceNorm, self).__init__()
self.num_features = num_features
self.affine = affine
self.eps = eps
self.scale = Parameter(torch.Tensor(num_features))
self.shift = Parameter(torch.Tensor(num_features))
self.reset_parameters()
def reset_parameters(self):
if self.affine:
self.scale.data.normal_(mean=0.0, std=0.02)
self.shift.data.zero_()
def forward(self, input):
size = input.size()
x_reshaped = input.view(size[0], size[1], size[2] * size[3])
mean = x_reshaped.mean(2, keepdim=True)
centered_x = x_reshaped - mean
std = torch.rsqrt((centered_x ** 2).mean(2, keepdim=True) + self.eps)
norm_features = (centered_x * std).view(*size)
if self.affine:
output = norm_features * self.scale[:, None, None] + self.shift[
:, None, None]
else:
output = norm_features
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_features': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.utils.data
import torch.nn as nn
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_mean_mul_pow_rsqrt_sub_0(in_out_ptr0, in_out_ptr1,
in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp18 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tmp7 = tmp0 - tmp6
tmp8 = tmp7 * tmp7
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.where(xmask, tmp9, 0)
tmp12 = tl.sum(tmp11, 1)[:, None]
tmp13 = tmp12 / tmp5
tmp14 = 1e-05
tmp15 = tmp13 + tmp14
tmp16 = libdevice.rsqrt(tmp15)
tmp17 = tmp7 * tmp16
tmp19 = tmp17 * tmp18
tmp21 = tmp19 + tmp20
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x0, tmp16, xmask)
tl.store(out_ptr0 + (r1 + 16 * x0), tmp21, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1), (4, 1, 1), 0)
del buf0
buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf3 = reinterpret_tensor(buf2, (4, 4, 1), (4, 1, 1), 0)
del buf2
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_add_mean_mul_pow_rsqrt_sub_0[grid(16)](buf1, buf3,
primals_1, primals_2, primals_3, buf4, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del primals_2
del primals_3
return buf4, primals_1, buf1, buf3
class InstanceNormNew(nn.Module):
def __init__(self, num_features, affine=True, eps=1e-05):
"""`num_features` number of feature channels
"""
super(InstanceNormNew, self).__init__()
self.num_features = num_features
self.affine = affine
self.eps = eps
self.scale = Parameter(torch.Tensor(num_features))
self.shift = Parameter(torch.Tensor(num_features))
self.reset_parameters()
def reset_parameters(self):
if self.affine:
self.scale.data.normal_(mean=0.0, std=0.02)
self.shift.data.zero_()
def forward(self, input_0):
primals_2 = self.scale
primals_3 = self.shift
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
doantientai/augmented_cyclegan
|
InstanceNorm
| false
| 15,197
|
[
"MIT"
] | 133
|
821274577e71c412198356ad6302c982554d558c
|
https://github.com/doantientai/augmented_cyclegan/tree/821274577e71c412198356ad6302c982554d558c
|
Actor
|
from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
class Actor(nn.Module):
def __init__(self, state_size, action_size, args, log_std_min=-20,
log_std_max=2):
super(Actor, self).__init__()
self.log_std_min = log_std_min
self.log_std_max = log_std_max
self.fc1 = nn.Linear(state_size, args.hidden_size)
self.fc2 = nn.Linear(args.hidden_size, args.hidden_size)
self.fc3 = nn.Linear(args.hidden_size, action_size)
self.fc4 = nn.Linear(args.hidden_size, action_size)
def forward(self, x):
x = torch.relu(self.fc1(x))
x = torch.relu(self.fc2(x))
mu = self.fc3(x)
log_std = self.fc4(x)
log_std = torch.clamp(log_std, min=self.log_std_min, max=self.
log_std_max)
std = torch.exp(log_std)
return mu, std
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_size': 4, 'action_size': 4, 'args': _mock_config(
hidden_size=4)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_clamp_exp_ge_le_logical_and_1(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = -20.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 2.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp2 >= tmp3
tmp9 = tmp2 <= tmp5
tmp10 = tmp8 & tmp9
tl.store(out_ptr0 + x2, tmp7, xmask)
tl.store(out_ptr1 + x2, tmp10, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf9, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3,
primals_5, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_clamp_exp_ge_le_logical_and_1[grid(256)](buf5,
primals_9, buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf5
del primals_9
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(
buf3, (64, 4), (4, 1), 0
), buf6, buf7, primals_8, primals_6, buf8, primals_4, buf9
class ActorNew(nn.Module):
def __init__(self, state_size, action_size, args, log_std_min=-20,
log_std_max=2):
super(ActorNew, self).__init__()
self.log_std_min = log_std_min
self.log_std_max = log_std_max
self.fc1 = nn.Linear(state_size, args.hidden_size)
self.fc2 = nn.Linear(args.hidden_size, args.hidden_size)
self.fc3 = nn.Linear(args.hidden_size, action_size)
self.fc4 = nn.Linear(args.hidden_size, action_size)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_8 = self.fc4.weight
primals_9 = self.fc4.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0], output[1]
|
dongminlee94/Samsung-DRL-Code
|
Actor
| false
| 15,198
|
[
"MIT"
] | 116
|
c96f8739a09cfd708c265954ee8ecf0ea3b67395
|
https://github.com/dongminlee94/Samsung-DRL-Code/tree/c96f8739a09cfd708c265954ee8ecf0ea3b67395
|
MultiHeadedAttention
|
import torch
from torch import nn
from torch.nn import functional as F
def same_tensor(tensor, *args):
""" Do the input tensors all point to the same underlying data """
for other in args:
if not torch.is_tensor(other):
return False
if tensor.device != other.device:
return False
if tensor.dtype != other.dtype:
return False
if tensor.data_ptr() != other.data_ptr():
return False
return True
class MultiHeadedAttention(nn.Module):
""" Implement a multi-headed attention module """
def __init__(self, embed_dim, num_heads=1):
""" Initialize the attention module """
super(MultiHeadedAttention, self).__init__()
assert embed_dim % num_heads == 0, f'num_heads={num_heads} should evenly divide embed_dim={embed_dim}'
self.embed_dim = embed_dim
self.num_heads = num_heads
self.projection_dim = embed_dim // num_heads
self.scale = self.projection_dim ** -0.5
self.input_weights = nn.Parameter(torch.Tensor(3 * embed_dim,
embed_dim))
self.output_projection = nn.Linear(embed_dim, embed_dim, bias=False)
self.reset_parameters()
def reset_parameters(self):
""" Reset parameters using xavier initialization """
gain = nn.init.calculate_gain('linear')
nn.init.xavier_uniform_(self.input_weights, gain)
nn.init.xavier_uniform_(self.output_projection.weight, gain)
def project(self, inputs, index=0, chunks=1):
""" Produce a linear projection using the weights """
batch_size = inputs.shape[0]
start = index * self.embed_dim
end = start + chunks * self.embed_dim
projections = F.linear(inputs, self.input_weights[start:end]).chunk(
chunks, dim=-1)
output_projections = []
for projection in projections:
output_projections.append(projection.view(batch_size, -1, self.
num_heads, self.projection_dim).transpose(2, 1).contiguous(
).view(batch_size * self.num_heads, -1, self.projection_dim))
return output_projections
def attention(self, values, keys, queries, key_mask=None, mask=None):
""" Scaled dot product attention with optional masks """
logits = self.scale * torch.bmm(queries, keys.transpose(2, 1))
if mask is not None:
logits += mask
if key_mask is not None:
logits_shape = logits.shape
batch_size = logits_shape[0] // self.num_heads
logits = logits.view(batch_size, self.num_heads, logits_shape[1
], logits_shape[2])
logits.masked_fill_(key_mask[:, None, None], float('-inf'))
logits = logits.view(logits_shape)
attended = torch.bmm(F.softmax(logits, dim=-1), values)
batch_size = queries.shape[0] // self.num_heads
return attended.view(batch_size, self.num_heads, -1, self.
projection_dim).transpose(2, 1).contiguous().view(batch_size, -
1, self.num_heads * self.projection_dim)
def forward(self, values, keys, queries, key_mask=None, attention_mask=
None, num_queries=0):
""" Forward pass of the attention """
if same_tensor(values, keys, queries):
values, keys, queries = self.project(values, chunks=3)
elif same_tensor(values, keys):
values, keys = self.project(values, chunks=2)
queries, = self.project(queries, 2)
else:
values, = self.project(values, 0)
keys, = self.project(keys, 1)
queries, = self.project(queries, 2)
if num_queries:
queries = queries[:, -num_queries:]
attended = self.attention(values, keys, queries, key_mask,
attention_mask)
return self.output_projection(attended)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'embed_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
from torch.nn import functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__softmax_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, float('-inf'))
tmp6 = triton_helpers.max2(tmp5, 1)[:, None]
tmp7 = tmp2 - tmp6
tmp8 = 0.5
tmp9 = tmp7 * tmp8
tmp10 = tl_math.exp(tmp9)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tmp15 = tmp10 / tmp14
tl.store(out_ptr2 + (r1 + 16 * x0), tmp15, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 16), out=buf1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 32), out=buf2)
del primals_2
buf3 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf2, (4, 16, 4), (64, 4, 1),
0), reinterpret_tensor(buf1, (4, 4, 16), (64, 1, 4), 0), out=buf3)
buf6 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
get_raw_stream(0)
triton_per_fused__softmax_0[grid(64)](buf3, buf6, 64, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del buf3
buf7 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
extern_kernels.bmm(buf6, reinterpret_tensor(buf0, (4, 16, 4), (64,
4, 1), 0), out=buf7)
buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf7, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf8)
return reinterpret_tensor(buf8, (4, 16, 4), (64, 4, 1), 0
), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_4, (64, 4), (4, 1), 0
), buf6, reinterpret_tensor(buf7, (64, 4), (4, 1), 0
), primals_5, reinterpret_tensor(buf0, (4, 4, 16), (64, 1, 4), 0
), reinterpret_tensor(buf2, (4, 4, 16), (64, 1, 4), 0
), reinterpret_tensor(buf1, (4, 16, 4), (64, 4, 1), 0)
def same_tensor(tensor, *args):
""" Do the input tensors all point to the same underlying data """
for other in args:
if not torch.is_tensor(other):
return False
if tensor.device != other.device:
return False
if tensor.dtype != other.dtype:
return False
if tensor.data_ptr() != other.data_ptr():
return False
return True
class MultiHeadedAttentionNew(nn.Module):
""" Implement a multi-headed attention module """
def __init__(self, embed_dim, num_heads=1):
""" Initialize the attention module """
super(MultiHeadedAttentionNew, self).__init__()
assert embed_dim % num_heads == 0, f'num_heads={num_heads} should evenly divide embed_dim={embed_dim}'
self.embed_dim = embed_dim
self.num_heads = num_heads
self.projection_dim = embed_dim // num_heads
self.scale = self.projection_dim ** -0.5
self.input_weights = nn.Parameter(torch.Tensor(3 * embed_dim,
embed_dim))
self.output_projection = nn.Linear(embed_dim, embed_dim, bias=False)
self.reset_parameters()
def reset_parameters(self):
""" Reset parameters using xavier initialization """
gain = nn.init.calculate_gain('linear')
nn.init.xavier_uniform_(self.input_weights, gain)
nn.init.xavier_uniform_(self.output_projection.weight, gain)
def project(self, inputs, index=0, chunks=1):
""" Produce a linear projection using the weights """
batch_size = inputs.shape[0]
start = index * self.embed_dim
end = start + chunks * self.embed_dim
projections = F.linear(inputs, self.input_weights[start:end]).chunk(
chunks, dim=-1)
output_projections = []
for projection in projections:
output_projections.append(projection.view(batch_size, -1, self.
num_heads, self.projection_dim).transpose(2, 1).contiguous(
).view(batch_size * self.num_heads, -1, self.projection_dim))
return output_projections
def attention(self, values, keys, queries, key_mask=None, mask=None):
""" Scaled dot product attention with optional masks """
logits = self.scale * torch.bmm(queries, keys.transpose(2, 1))
if mask is not None:
logits += mask
if key_mask is not None:
logits_shape = logits.shape
batch_size = logits_shape[0] // self.num_heads
logits = logits.view(batch_size, self.num_heads, logits_shape[1
], logits_shape[2])
logits.masked_fill_(key_mask[:, None, None], float('-inf'))
logits = logits.view(logits_shape)
attended = torch.bmm(F.softmax(logits, dim=-1), values)
batch_size = queries.shape[0] // self.num_heads
return attended.view(batch_size, self.num_heads, -1, self.
projection_dim).transpose(2, 1).contiguous().view(batch_size, -
1, self.num_heads * self.projection_dim)
def forward(self, input_0, input_1, input_2):
primals_2 = self.input_weights
primals_5 = self.output_projection.weight
primals_1 = input_0
primals_3 = input_1
primals_4 = input_2
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
dojoteef/synst
|
MultiHeadedAttention
| false
| 15,199
|
[
"BSD-3-Clause"
] | 81
|
a1842682cf757e8a501cd9cee16f20e1a14158f1
|
https://github.com/dojoteef/synst/tree/a1842682cf757e8a501cd9cee16f20e1a14158f1
|
GeneralizedMeanPooling
|
from torch.nn import Module
import torch
import torch.nn.functional as F
from torch.nn.modules import Module
class GeneralizedMeanPooling(Module):
"""Applies a 2D power-average adaptive pooling over an input signal composed of several input planes.
The function computed is: :math:`f(X) = pow(sum(pow(X, p)), 1/p)`
- At p = infinity, one gets Max Pooling
- At p = 1, one gets Average Pooling
The output is of size H x W, for any input size.
The number of output features is equal to the number of input planes.
Args:
output_size: the target output size of the image of the form H x W.
Can be a tuple (H, W) or a single H for a square image H x H
H and W can be either a ``int``, or ``None`` which means the size will
be the same as that of the input.
"""
def __init__(self, norm, output_size=1, eps=1e-06):
super(GeneralizedMeanPooling, self).__init__()
assert norm > 0
self.p = float(norm)
self.output_size = output_size
self.eps = eps
def forward(self, x):
x = x.clamp(min=self.eps).pow(self.p)
return F.adaptive_avg_pool2d(x, self.output_size).pow(1.0 / self.p)
def __repr__(self):
return self.__class__.__name__ + '(' + str(self.p
) + ', ' + 'output_size=' + str(self.output_size) + ')'
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'norm': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch.nn import Module
from torch.nn.modules import Module
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_clamp_mean_pow_0(in_out_ptr0, in_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = 1e-06
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = tmp2 * tmp2
tmp4 = tmp3 * tmp3
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp5, 0)
tmp8 = tl.sum(tmp7, 1)[:, None]
tmp9 = 16.0
tmp10 = tmp8 / tmp9
tmp11 = 0.25
tmp12 = libdevice.pow(tmp10, tmp11)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp12, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_clamp_mean_pow_0[grid(16)](buf1, arg0_1, 16, 16,
XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf1,
class GeneralizedMeanPoolingNew(Module):
"""Applies a 2D power-average adaptive pooling over an input signal composed of several input planes.
The function computed is: :math:`f(X) = pow(sum(pow(X, p)), 1/p)`
- At p = infinity, one gets Max Pooling
- At p = 1, one gets Average Pooling
The output is of size H x W, for any input size.
The number of output features is equal to the number of input planes.
Args:
output_size: the target output size of the image of the form H x W.
Can be a tuple (H, W) or a single H for a square image H x H
H and W can be either a ``int``, or ``None`` which means the size will
be the same as that of the input.
"""
def __init__(self, norm, output_size=1, eps=1e-06):
super(GeneralizedMeanPoolingNew, self).__init__()
assert norm > 0
self.p = float(norm)
self.output_size = output_size
self.eps = eps
def __repr__(self):
return self.__class__.__name__ + '(' + str(self.p
) + ', ' + 'output_size=' + str(self.output_size) + ')'
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
dongan-beta/deep-image-retrieval
|
GeneralizedMeanPooling
| false
| 15,200
|
[
"BSD-3-Clause"
] | 253
|
3e0885f88da328aefb7abb2fa350f8860a4bd52d
|
https://github.com/dongan-beta/deep-image-retrieval/tree/3e0885f88da328aefb7abb2fa350f8860a4bd52d
|
TripletLogExpLoss
|
import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
class TripletLogExpLoss(nn.Module):
"""Creates a criterion that measures the triplet loss given an input
tensors x1, x2, x3.
This is used for measuring a relative similarity between samples. A triplet
is composed by `a`, `p` and `n`: anchor, positive examples and negative
example respectively. The shape of all input variables should be
:math:`(N, D)`.
The distance is described in detail in the paper `Improving Pairwise Ranking for Multi-Label
Image Classification`_ by Y. Li et al.
.. math::
L(a, p, n) = log \\left( 1 + exp(d(a_i, p_i) - d(a_i, n_i) \\right)
Args:
anchor: anchor input tensor
positive: positive input tensor
negative: negative input tensor
Shape:
- Input: :math:`(N, D)` where `D = vector dimension`
- Output: :math:`(N, 1)`
>>> triplet_loss = nn.TripletLogExpLoss(p=2)
>>> input1 = autograd.Variable(torch.randn(100, 128))
>>> input2 = autograd.Variable(torch.randn(100, 128))
>>> input3 = autograd.Variable(torch.randn(100, 128))
>>> output = triplet_loss(input1, input2, input3)
>>> output.backward()
.. _Learning shallow convolutional feature descriptors with triplet losses:
http://www.iis.ee.ic.ac.uk/%7Evbalnt/shallow_descr/TFeat_paper.pdf
"""
def __init__(self, p=2, eps=1e-06, swap=False):
super(TripletLogExpLoss, self).__init__()
self.p = p
self.eps = eps
self.swap = swap
def forward(self, anchor, positive, negative):
assert anchor.size() == positive.size(
), 'Input sizes between positive and negative must be equal.'
assert anchor.size() == negative.size(
), 'Input sizes between anchor and negative must be equal.'
assert positive.size() == negative.size(
), 'Input sizes between positive and negative must be equal.'
assert anchor.dim() == 2, 'Input must be a 2D matrix.'
d_p = F.pairwise_distance(anchor, positive, self.p, self.eps)
d_n = F.pairwise_distance(anchor, negative, self.p, self.eps)
if self.swap:
d_s = F.pairwise_distance(positive, negative, self.p, self.eps)
d_n = torch.min(d_n, d_s)
dist = torch.log(1 + torch.exp(d_p - d_n))
loss = torch.mean(dist)
return loss
def eval_func(self, dp, dn):
return np.log(1 + np.exp(dp - dn))
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_exp_log_mean_norm_sub_0(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr2 + 4 * r0, None, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr2 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr2 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp39 = tl.load(in_ptr2 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = 1e-06
tmp4 = tmp2 + tmp3
tmp5 = tmp4 * tmp4
tmp8 = tmp6 - tmp7
tmp9 = tmp8 + tmp3
tmp10 = tmp9 * tmp9
tmp11 = tmp5 + tmp10
tmp14 = tmp12 - tmp13
tmp15 = tmp14 + tmp3
tmp16 = tmp15 * tmp15
tmp17 = tmp11 + tmp16
tmp20 = tmp18 - tmp19
tmp21 = tmp20 + tmp3
tmp22 = tmp21 * tmp21
tmp23 = tmp17 + tmp22
tmp24 = libdevice.sqrt(tmp23)
tmp26 = tmp0 - tmp25
tmp27 = tmp26 + tmp3
tmp28 = tmp27 * tmp27
tmp30 = tmp6 - tmp29
tmp31 = tmp30 + tmp3
tmp32 = tmp31 * tmp31
tmp33 = tmp28 + tmp32
tmp35 = tmp12 - tmp34
tmp36 = tmp35 + tmp3
tmp37 = tmp36 * tmp36
tmp38 = tmp33 + tmp37
tmp40 = tmp18 - tmp39
tmp41 = tmp40 + tmp3
tmp42 = tmp41 * tmp41
tmp43 = tmp38 + tmp42
tmp44 = libdevice.sqrt(tmp43)
tmp45 = tmp24 - tmp44
tmp46 = tl_math.exp(tmp45)
tmp47 = 1.0
tmp48 = tmp46 + tmp47
tmp49 = tl_math.log(tmp48)
tmp50 = tl.broadcast_to(tmp49, [XBLOCK, RBLOCK])
tmp52 = tl.sum(tmp50, 1)[:, None]
tmp53 = 4.0
tmp54 = tmp52 / tmp53
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp54, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_add_exp_log_mean_norm_sub_0[grid(1)](buf2, arg0_1,
arg1_1, arg2_1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf2,
class TripletLogExpLossNew(nn.Module):
"""Creates a criterion that measures the triplet loss given an input
tensors x1, x2, x3.
This is used for measuring a relative similarity between samples. A triplet
is composed by `a`, `p` and `n`: anchor, positive examples and negative
example respectively. The shape of all input variables should be
:math:`(N, D)`.
The distance is described in detail in the paper `Improving Pairwise Ranking for Multi-Label
Image Classification`_ by Y. Li et al.
.. math::
L(a, p, n) = log \\left( 1 + exp(d(a_i, p_i) - d(a_i, n_i) \\right)
Args:
anchor: anchor input tensor
positive: positive input tensor
negative: negative input tensor
Shape:
- Input: :math:`(N, D)` where `D = vector dimension`
- Output: :math:`(N, 1)`
>>> triplet_loss = nn.TripletLogExpLoss(p=2)
>>> input1 = autograd.Variable(torch.randn(100, 128))
>>> input2 = autograd.Variable(torch.randn(100, 128))
>>> input3 = autograd.Variable(torch.randn(100, 128))
>>> output = triplet_loss(input1, input2, input3)
>>> output.backward()
.. _Learning shallow convolutional feature descriptors with triplet losses:
http://www.iis.ee.ic.ac.uk/%7Evbalnt/shallow_descr/TFeat_paper.pdf
"""
def __init__(self, p=2, eps=1e-06, swap=False):
super(TripletLogExpLossNew, self).__init__()
self.p = p
self.eps = eps
self.swap = swap
def eval_func(self, dp, dn):
return np.log(1 + np.exp(dp - dn))
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
dongan-beta/deep-image-retrieval
|
TripletLogExpLoss
| false
| 15,201
|
[
"BSD-3-Clause"
] | 253
|
3e0885f88da328aefb7abb2fa350f8860a4bd52d
|
https://github.com/dongan-beta/deep-image-retrieval/tree/3e0885f88da328aefb7abb2fa350f8860a4bd52d
|
APLoss_dist
|
import torch
import numpy as np
import torch.nn as nn
def sim_to_dist(scores):
return 1 - torch.sqrt(2.001 - 2 * scores)
class APLoss(nn.Module):
""" Differentiable AP loss, through quantization. From the paper:
Learning with Average Precision: Training Image Retrieval with a Listwise Loss
Jerome Revaud, Jon Almazan, Rafael Sampaio de Rezende, Cesar de Souza
https://arxiv.org/abs/1906.07589
Input: (N, M) values in [min, max]
label: (N, M) values in {0, 1}
Returns: 1 - mAP (mean AP for each n in {1..N})
Note: typically, this is what you wanna minimize
"""
def __init__(self, nq=25, min=0, max=1):
nn.Module.__init__(self)
assert isinstance(nq, int) and 2 <= nq <= 100
self.nq = nq
self.min = min
self.max = max
gap = max - min
assert gap > 0
self.quantizer = q = nn.Conv1d(1, 2 * nq, kernel_size=1, bias=True)
q.weight = nn.Parameter(q.weight.detach(), requires_grad=False)
q.bias = nn.Parameter(q.bias.detach(), requires_grad=False)
a = (nq - 1) / gap
q.weight[:nq] = -a
q.bias[:nq] = torch.from_numpy(a * min + np.arange(nq, 0, -1))
q.weight[nq:] = a
q.bias[nq:] = torch.from_numpy(np.arange(2 - nq, 2, 1) - a * min)
q.weight[0] = q.weight[-1] = 0
q.bias[0] = q.bias[-1] = 1
def forward(self, x, label, qw=None, ret='1-mAP'):
assert x.shape == label.shape
N, M = x.shape
q = self.quantizer(x.unsqueeze(1))
q = torch.min(q[:, :self.nq], q[:, self.nq:]).clamp(min=0)
nbs = q.sum(dim=-1)
rec = (q * label.view(N, 1, M).float()).sum(dim=-1)
prec = rec.cumsum(dim=-1) / (1e-16 + nbs.cumsum(dim=-1))
rec /= rec.sum(dim=-1).unsqueeze(1)
ap = (prec * rec).sum(dim=-1)
if ret == '1-mAP':
if qw is not None:
ap *= qw
return 1 - ap.mean()
elif ret == 'AP':
assert qw is None
return ap
else:
raise ValueError('Bad return type for APLoss(): %s' % str(ret))
def measures(self, x, gt, loss=None):
if loss is None:
loss = self.forward(x, gt)
return {'loss_ap': float(loss)}
class APLoss_dist(APLoss):
def forward(self, x, label, **kw):
d = sim_to_dist(x)
return APLoss.forward(self, d, label, **kw)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_rsub_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp3 = 2.001
tmp4 = tmp3 - tmp2
tmp5 = libdevice.sqrt(tmp4)
tmp6 = 1.0
tmp7 = tmp6 - tmp5
tl.store(out_ptr0 + x0, tmp7, xmask)
@triton.jit
def _triton_helper_fn_add0(arg0_0, arg1_0):
tmp0 = arg0_0 + arg1_0
return tmp0
@triton.jit
def triton_per_fused_add_clamp_cumsum_div_minimum_mul_sum_1(in_out_ptr1,
in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 25
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4 * r1 + 200 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tl.load(in_ptr1 + r1, rmask, eviction_policy='evict_last', other=0.0
)
tmp3 = tl.load(in_ptr0 + (100 + 4 * r1 + 200 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp4 = tl.load(in_ptr1 + (25 + r1), rmask, eviction_policy='evict_last',
other=0.0)
tmp9 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (1 + 4 * r1 + 200 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp13 = tl.load(in_ptr0 + (101 + 4 * r1 + 200 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp17 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr0 + (2 + 4 * r1 + 200 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp22 = tl.load(in_ptr0 + (102 + 4 * r1 + 200 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp26 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp29 = tl.load(in_ptr0 + (3 + 4 * r1 + 200 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp31 = tl.load(in_ptr0 + (103 + 4 * r1 + 200 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp35 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = triton_helpers.minimum(tmp2, tmp5)
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp10 = tmp8 * tmp9
tmp12 = tmp11 + tmp1
tmp14 = tmp13 + tmp4
tmp15 = triton_helpers.minimum(tmp12, tmp14)
tmp16 = triton_helpers.maximum(tmp15, tmp7)
tmp18 = tmp16 * tmp17
tmp19 = tmp10 + tmp18
tmp21 = tmp20 + tmp1
tmp23 = tmp22 + tmp4
tmp24 = triton_helpers.minimum(tmp21, tmp23)
tmp25 = triton_helpers.maximum(tmp24, tmp7)
tmp27 = tmp25 * tmp26
tmp28 = tmp19 + tmp27
tmp30 = tmp29 + tmp1
tmp32 = tmp31 + tmp4
tmp33 = triton_helpers.minimum(tmp30, tmp32)
tmp34 = triton_helpers.maximum(tmp33, tmp7)
tmp36 = tmp34 * tmp35
tmp37 = tmp28 + tmp36
tmp38 = tmp8 + tmp16
tmp39 = tmp38 + tmp25
tmp40 = tmp39 + tmp34
tmp41 = tmp40.to(tl.float32)
tmp42 = tl.broadcast_to(tmp41, [XBLOCK, RBLOCK])
tmp43, = tl.associative_scan((tmp42,), 1, _triton_helper_fn_add0)
tmp44 = tl.broadcast_to(tmp37, [XBLOCK, RBLOCK])
tmp46 = tl.where(rmask & xmask, tmp44, 0)
tmp47 = tl.sum(tmp46, 1)[:, None]
tmp48 = tmp37.to(tl.float32)
tmp49 = tl.broadcast_to(tmp48, [XBLOCK, RBLOCK])
tmp50, = tl.associative_scan((tmp49,), 1, _triton_helper_fn_add0)
tmp51 = 1e-16
tmp52 = tmp43 + tmp51
tmp53 = tmp50 / tmp52
tmp54 = tmp37 / tmp47
tmp55 = tmp53 * tmp54
tmp56 = tl.broadcast_to(tmp55, [XBLOCK, RBLOCK])
tmp58 = tl.where(rmask & xmask, tmp56, 0)
tmp59 = tl.sum(tmp58, 1)[:, None]
tl.store(in_out_ptr1 + x0, tmp59, xmask)
@triton.jit
def triton_per_fused_mean_rsub_2(in_out_ptr0, in_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.sum(tmp1, 1)[:, None]
tmp4 = 4.0
tmp5 = tmp3 / tmp4
tmp6 = 1.0
tmp7 = tmp6 - tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp7, None)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (50, 1, 1), (1, 1, 1))
assert_size_stride(arg3_1, (50,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_rsub_sqrt_0[grid(16)](arg0_1, buf0, 16, XBLOCK
=16, num_warps=1, num_stages=1)
del arg0_1
buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (4, 1, 4
), (4, 0, 1), 0), arg2_1, stride=(1,), padding=(0,), dilation=(
1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 50, 4), (200, 4, 1))
del arg2_1
del buf0
buf6 = empty_strided_cuda((4,), (1,), torch.float32)
buf7 = buf6
del buf6
triton_per_fused_add_clamp_cumsum_div_minimum_mul_sum_1[grid(4)](buf7,
buf1, arg3_1, arg1_1, 4, 25, XBLOCK=1, num_warps=2, num_stages=1)
del arg1_1
del arg3_1
del buf1
buf8 = empty_strided_cuda((), (), torch.float32)
buf9 = buf8
del buf8
triton_per_fused_mean_rsub_2[grid(1)](buf9, buf7, 1, 4, XBLOCK=1,
num_warps=2, num_stages=1)
del buf7
return buf9,
def sim_to_dist(scores):
return 1 - torch.sqrt(2.001 - 2 * scores)
class APLoss(nn.Module):
""" Differentiable AP loss, through quantization. From the paper:
Learning with Average Precision: Training Image Retrieval with a Listwise Loss
Jerome Revaud, Jon Almazan, Rafael Sampaio de Rezende, Cesar de Souza
https://arxiv.org/abs/1906.07589
Input: (N, M) values in [min, max]
label: (N, M) values in {0, 1}
Returns: 1 - mAP (mean AP for each n in {1..N})
Note: typically, this is what you wanna minimize
"""
def __init__(self, nq=25, min=0, max=1):
nn.Module.__init__(self)
assert isinstance(nq, int) and 2 <= nq <= 100
self.nq = nq
self.min = min
self.max = max
gap = max - min
assert gap > 0
self.quantizer = q = nn.Conv1d(1, 2 * nq, kernel_size=1, bias=True)
q.weight = nn.Parameter(q.weight.detach(), requires_grad=False)
q.bias = nn.Parameter(q.bias.detach(), requires_grad=False)
a = (nq - 1) / gap
q.weight[:nq] = -a
q.bias[:nq] = torch.from_numpy(a * min + np.arange(nq, 0, -1))
q.weight[nq:] = a
q.bias[nq:] = torch.from_numpy(np.arange(2 - nq, 2, 1) - a * min)
q.weight[0] = q.weight[-1] = 0
q.bias[0] = q.bias[-1] = 1
def forward(self, x, label, qw=None, ret='1-mAP'):
assert x.shape == label.shape
N, M = x.shape
q = self.quantizer(x.unsqueeze(1))
q = torch.min(q[:, :self.nq], q[:, self.nq:]).clamp(min=0)
nbs = q.sum(dim=-1)
rec = (q * label.view(N, 1, M).float()).sum(dim=-1)
prec = rec.cumsum(dim=-1) / (1e-16 + nbs.cumsum(dim=-1))
rec /= rec.sum(dim=-1).unsqueeze(1)
ap = (prec * rec).sum(dim=-1)
if ret == '1-mAP':
if qw is not None:
ap *= qw
return 1 - ap.mean()
elif ret == 'AP':
assert qw is None
return ap
else:
raise ValueError('Bad return type for APLoss(): %s' % str(ret))
def measures(self, x, gt, loss=None):
if loss is None:
loss = self.forward(x, gt)
return {'loss_ap': float(loss)}
class APLoss_distNew(APLoss):
def forward(self, input_0, input_1):
arg2_1 = self.quantizer.weight
arg3_1 = self.quantizer.bias
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0]
|
dongan-beta/deep-image-retrieval
|
APLoss_dist
| false
| 15,202
|
[
"BSD-3-Clause"
] | 253
|
3e0885f88da328aefb7abb2fa350f8860a4bd52d
|
https://github.com/dongan-beta/deep-image-retrieval/tree/3e0885f88da328aefb7abb2fa350f8860a4bd52d
|
Conv
|
import torch
from torch import nn
from torch.nn.functional import interpolate
from typing import cast
class Interpolate(nn.Module):
def __init__(self, scale_factor: 'float'=1.0, mode: 'str'='nearest'
) ->None:
super().__init__()
self.scale_factor = scale_factor
self.mode = mode
def forward(self, input: 'torch.Tensor') ->torch.Tensor:
return cast(torch.Tensor, interpolate(input, scale_factor=self.
scale_factor, mode=self.mode))
def extra_repr(self) ->str:
extras = [f'scale_factor={self.scale_factor}']
if self.mode != 'nearest':
extras.append(f'mode={self.mode}')
return ', '.join(extras)
class Conv(nn.Module):
def __init__(self, in_channels: 'int', out_channels: 'int', kernel_size:
'int', *, stride: int=1, upsample: bool=False, norm: bool=True,
activation: bool=True):
super().__init__()
self.upsample = Interpolate(scale_factor=stride) if upsample else None
self.pad = nn.ReflectionPad2d(kernel_size // 2)
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride=1 if upsample else stride)
self.norm = nn.InstanceNorm2d(out_channels, affine=True
) if norm else None
self.activation = nn.ReLU() if activation else None
def forward(self, input: 'torch.Tensor') ->torch.Tensor:
if self.upsample:
input = self.upsample(input)
output = self.conv(self.pad(input))
if self.norm:
output = self.norm(output)
if self.activation:
output = self.activation(output)
return cast(torch.Tensor, output)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
from torch.nn.functional import interpolate
from typing import cast
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8 % 8
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-2 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-2 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_threshold_backward_1(
in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3,
out_ptr4, out_ptr5, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
rnumel = 25
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
x0 = xindex
r3 = rindex
x1 = xindex % 4
tmp0 = tl.load(in_ptr0 + x0 % 4, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_out_ptr0 + (r3 + 25 * x0), rmask & xmask, other=0.0)
tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tl.where(rmask & xmask, tmp4, 0)
tmp7 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl.full([XBLOCK, 1], 25, tl.int32)
tmp12 = tmp11.to(tl.float32)
tmp13 = tmp10 / tmp12
tmp14 = tmp4 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.where(rmask & xmask, tmp16, 0)
tmp19 = tl.sum(tmp18, 1)[:, None]
tmp20 = tmp3 - tmp13
tmp21 = 25.0
tmp22 = tmp19 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.rsqrt(tmp24)
tmp26 = tmp20 * tmp25
tmp27 = tmp26 * tmp0
tmp29 = tmp27 + tmp28
tmp30 = tl.full([1, 1], 0, tl.int32)
tmp31 = triton_helpers.maximum(tmp30, tmp29)
tmp32 = 0.0
tmp33 = tmp31 <= tmp32
tl.store(out_ptr0 + x0, tmp0, xmask)
tl.store(in_out_ptr0 + (r3 + 25 * x0), tmp3, rmask & xmask)
tl.store(out_ptr3 + (r3 + 25 * x0), tmp31, rmask & xmask)
tl.store(out_ptr4 + (r3 + 25 * x0), tmp33, rmask & xmask)
tl.store(out_ptr5 + x0, tmp25, xmask)
tl.store(out_ptr1 + x0, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0[grid(1024)](primals_1, buf0,
1024, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 5, 5), (100, 25, 5, 1))
buf3 = empty_strided_cuda((16,), (1,), torch.float32)
buf2 = buf1
del buf1
buf4 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf8 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32)
buf9 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.bool)
buf7 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_threshold_backward_1[
grid(16)](buf2, primals_4, primals_3, primals_5, buf3, buf4,
buf8, buf9, buf7, 16, 25, XBLOCK=1, num_warps=2, num_stages=1)
del primals_3
del primals_4
del primals_5
return buf8, primals_2, buf0, buf2, buf3, reinterpret_tensor(buf7, (16,
), (1,), 0), buf9, reinterpret_tensor(buf4, (1, 16, 1, 1), (16, 1,
1, 1), 0)
class Interpolate(nn.Module):
def __init__(self, scale_factor: 'float'=1.0, mode: 'str'='nearest'
) ->None:
super().__init__()
self.scale_factor = scale_factor
self.mode = mode
def forward(self, input: 'torch.Tensor') ->torch.Tensor:
return cast(torch.Tensor, interpolate(input, scale_factor=self.
scale_factor, mode=self.mode))
def extra_repr(self) ->str:
extras = [f'scale_factor={self.scale_factor}']
if self.mode != 'nearest':
extras.append(f'mode={self.mode}')
return ', '.join(extras)
class ConvNew(nn.Module):
def __init__(self, in_channels: 'int', out_channels: 'int', kernel_size:
'int', *, stride: int=1, upsample: bool=False, norm: bool=True,
activation: bool=True):
super().__init__()
self.upsample = Interpolate(scale_factor=stride) if upsample else None
self.pad = nn.ReflectionPad2d(kernel_size // 2)
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride=1 if upsample else stride)
self.norm = nn.InstanceNorm2d(out_channels, affine=True
) if norm else None
self.activation = nn.ReLU() if activation else None
def forward(self, input_0):
primals_1 = self.conv.weight
primals_3 = self.conv.bias
primals_4 = self.norm.weight
primals_5 = self.norm.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
dooglewoogle/pystiche
|
Conv
| false
| 15,203
|
[
"BSD-3-Clause"
] | 129
|
14b61123ede2abdb00daaa5b4981de6d7edaf034
|
https://github.com/dooglewoogle/pystiche/tree/14b61123ede2abdb00daaa5b4981de6d7edaf034
|
_nms
|
import torch
import torch.utils.data
import torch
import torch.nn as nn
class _nms(nn.Module):
def __init__(self):
super(_nms, self).__init__()
kernel = 3
pad = (kernel - 1) // 2
self.maxpool = nn.MaxPool2d(kernel_size=kernel, stride=1, padding=pad)
def forward(self, heat):
hmax = self.maxpool(heat)
keep = (hmax == heat).float()
return heat * keep
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
import torch
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy_eq_max_pool2d_with_indices_mul_0(in_out_ptr0,
in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x3 = xindex
tmp52 = tl.load(in_ptr0 + x3, xmask)
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-5 + x3), tmp10 & xmask, other=float('-inf'))
tmp12 = x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-4 + x3), tmp16 & xmask, other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + x0
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-3 + x3), tmp23 & xmask, other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = x1
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (-1 + x3), tmp30 & xmask, other=float('-inf'))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + x3, tmp33 & xmask, other=float('-inf'))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (1 + x3), tmp36 & xmask, other=float('-inf'))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 1 + x1
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (3 + x3), tmp43 & xmask, other=float('-inf'))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (4 + x3), tmp46 & xmask, other=float('-inf'))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (5 + x3), tmp49 & xmask, other=float('-inf'))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tmp53 = tmp51 == tmp52
tmp54 = tmp53.to(tl.float32)
tmp55 = tmp52 * tmp54
tl.store(in_out_ptr0 + x3, tmp55, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused__to_copy_eq_max_pool2d_with_indices_mul_0[grid(256)](
buf1, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf1,
class _nmsNew(nn.Module):
def __init__(self):
super(_nmsNew, self).__init__()
kernel = 3
pad = (kernel - 1) // 2
self.maxpool = nn.MaxPool2d(kernel_size=kernel, stride=1, padding=pad)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
donnyyou/centerX
|
_nms
| false
| 15,204
|
[
"Apache-2.0"
] | 350
|
6e381cb669a6014d02e31a43915271237690531c
|
https://github.com/donnyyou/centerX/tree/6e381cb669a6014d02e31a43915271237690531c
|
UpConv
|
import torch
import torch.nn as nn
class UpConv(nn.Module):
def __init__(self, input_nc, output_nc, kernel_size):
super(UpConv, self).__init__()
self.deconv = nn.ConvTranspose2d(in_channels=input_nc, out_channels
=output_nc, kernel_size=2, bias=True, stride=2, padding=0)
self.activation_fn = nn.ELU()
def forward(self, input):
return self.activation_fn(self.deconv(input))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_nc': 4, 'output_nc': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_elu_0(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 64 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 1.0
tmp6 = tmp2 * tmp5
tmp7 = libdevice.expm1(tmp6)
tmp8 = tmp7 * tmp5
tmp9 = tl.where(tmp4, tmp6, tmp8)
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp9, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 2, 2), (16, 4, 2, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2,
2), padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 8, 8), (256, 64, 8, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_elu_0[grid(1024)](buf1, primals_2,
buf2, 1024, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return buf2, primals_1, primals_3, buf1
class UpConvNew(nn.Module):
def __init__(self, input_nc, output_nc, kernel_size):
super(UpConvNew, self).__init__()
self.deconv = nn.ConvTranspose2d(in_channels=input_nc, out_channels
=output_nc, kernel_size=2, bias=True, stride=2, padding=0)
self.activation_fn = nn.ELU()
def forward(self, input_0):
primals_1 = self.deconv.weight
primals_2 = self.deconv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
dong1015323606/LKVOLearner
|
UpConv
| false
| 15,205
|
[
"BSD-3-Clause"
] | 237
|
6ac9fb5d3c22d6a81529063f8c52d6aa34166b2a
|
https://github.com/dong1015323606/LKVOLearner/tree/6ac9fb5d3c22d6a81529063f8c52d6aa34166b2a
|
DetLoss
|
import torch
from torch import nn
class DetLoss(nn.Module):
def __init__(self):
super().__init__()
self.hm_criterion = nn.BCEWithLogitsLoss(reduction='none')
self.ori_criterion = nn.SmoothL1Loss(reduction='none')
self.box_criterion = nn.SmoothL1Loss(reduction='none')
def forward(self, pred_heatmaps, heatmaps, pred_sizemaps, sizemaps,
pred_orimaps, orimaps):
size_w, _ = heatmaps.max(dim=1, keepdim=True)
p_det = torch.sigmoid(pred_heatmaps * (1 - 2 * heatmaps))
det_loss = (self.hm_criterion(pred_heatmaps, heatmaps) * p_det).mean(
) / p_det.mean()
box_loss = (size_w * self.box_criterion(pred_sizemaps, sizemaps)).mean(
) / size_w.mean()
ori_loss = (size_w * self.ori_criterion(pred_orimaps, orimaps)).mean(
) / size_w.mean()
return det_loss, box_loss, ori_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]),
torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_binary_cross_entropy_with_logits_div_mean_mul_rsub_sigmoid_0(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp13 = 2.0
tmp14 = tmp0 * tmp13
tmp15 = tmp1 - tmp14
tmp16 = tmp3 * tmp15
tmp17 = tl.sigmoid(tmp16)
tmp18 = tmp12 * tmp17
tmp19 = tl.broadcast_to(tmp18, [RBLOCK])
tmp21 = triton_helpers.promote_to_tensor(tl.sum(tmp19, 0))
tmp22 = tl.broadcast_to(tmp17, [RBLOCK])
tmp24 = triton_helpers.promote_to_tensor(tl.sum(tmp22, 0))
tmp25 = 256.0
tmp26 = tmp21 / tmp25
tmp27 = tmp24 / tmp25
tmp28 = tmp26 / tmp27
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp28, None)
@triton.jit
def triton_per_fused_max_mean_mul_smooth_l1_loss_1(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex % 16
r2 = rindex // 64
r3 = rindex
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last'
)
tmp1 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr1 + r3, None)
tmp8 = tl.load(in_ptr2 + r3, None)
tmp23 = tl.load(in_ptr3 + r3, None)
tmp24 = tl.load(in_ptr4 + r3, None)
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp9 = tmp7 - tmp8
tmp10 = tl_math.abs(tmp9)
tmp11 = 1.0
tmp12 = tmp10 < tmp11
tmp13 = tmp10 * tmp10
tmp14 = 0.5
tmp15 = tmp13 * tmp14
tmp16 = tmp15 * tmp11
tmp17 = tmp10 - tmp14
tmp18 = tl.where(tmp12, tmp16, tmp17)
tmp19 = tmp6 * tmp18
tmp20 = tl.broadcast_to(tmp19, [RBLOCK])
tmp22 = triton_helpers.promote_to_tensor(tl.sum(tmp20, 0))
tmp25 = tmp23 - tmp24
tmp26 = tl_math.abs(tmp25)
tmp27 = tmp26 < tmp11
tmp28 = tmp26 * tmp26
tmp29 = tmp28 * tmp14
tmp30 = tmp29 * tmp11
tmp31 = tmp26 - tmp14
tmp32 = tl.where(tmp27, tmp30, tmp31)
tmp33 = tmp6 * tmp32
tmp34 = tl.broadcast_to(tmp33, [RBLOCK])
tmp36 = triton_helpers.promote_to_tensor(tl.sum(tmp34, 0))
tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp22, None)
tl.store(out_ptr1 + tl.full([1], 0, tl.int32), tmp36, None)
@triton.jit
def triton_per_fused_div_max_mean_mul_smooth_l1_loss_2(in_out_ptr0,
in_out_ptr1, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp1 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp3 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp5 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp10 = tl.load(in_out_ptr0 + 0)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, 1])
tmp17 = tl.load(in_out_ptr1 + 0)
tmp18 = tl.broadcast_to(tmp17, [XBLOCK, 1])
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.sum(tmp7, 1)[:, None]
tmp12 = 256.0
tmp13 = tmp11 / tmp12
tmp14 = 64.0
tmp15 = tmp9 / tmp14
tmp16 = tmp13 / tmp15
tmp19 = tmp18 / tmp12
tmp20 = tmp19 / tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp16, None)
tl.debug_barrier()
tl.store(in_out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp20, None)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg4_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg5_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf6 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_binary_cross_entropy_with_logits_div_mean_mul_rsub_sigmoid_0[
grid(1)](buf6, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf4 = empty_strided_cuda((), (), torch.float32)
triton_per_fused_max_mean_mul_smooth_l1_loss_1[grid(1)](arg0_1,
arg3_1, arg2_1, arg5_1, arg4_1, buf2, buf4, 1, 256, num_warps=2,
num_stages=1)
del arg2_1
del arg3_1
del arg4_1
del arg5_1
buf8 = buf4
del buf4
buf7 = buf2
del buf2
triton_per_fused_div_max_mean_mul_smooth_l1_loss_2[grid(1)](buf8,
buf7, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf6, buf7, buf8
class DetLossNew(nn.Module):
def __init__(self):
super().__init__()
self.hm_criterion = nn.BCEWithLogitsLoss(reduction='none')
self.ori_criterion = nn.SmoothL1Loss(reduction='none')
self.box_criterion = nn.SmoothL1Loss(reduction='none')
def forward(self, input_0, input_1, input_2, input_3, input_4, input_5):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
arg4_1 = input_4
arg5_1 = input_5
output = call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1])
return output[0], output[1], output[2]
|
dotchen/LAV
|
DetLoss
| false
| 15,206
|
[
"Apache-2.0"
] | 122
|
dc9b4cfca39abd50c7438e8749d49f6ac0fe5e4e
|
https://github.com/dotchen/LAV/tree/dc9b4cfca39abd50c7438e8749d49f6ac0fe5e4e
|
Critic
|
from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
class Critic(nn.Module):
def __init__(self, state_size, action_size, args):
super(Critic, self).__init__()
self.fc1 = nn.Linear(state_size + action_size, args.hidden_size)
self.fc2 = nn.Linear(args.hidden_size, args.hidden_size)
self.fc3 = nn.Linear(args.hidden_size, 1)
self.fc4 = nn.Linear(state_size + action_size, args.hidden_size)
self.fc5 = nn.Linear(args.hidden_size, args.hidden_size)
self.fc6 = nn.Linear(args.hidden_size, 1)
def forward(self, states, actions):
x = torch.cat([states, actions], dim=1)
x1 = torch.relu(self.fc1(x))
x1 = torch.relu(self.fc2(x1))
q_value1 = self.fc3(x1)
x2 = torch.relu(self.fc4(x))
x2 = torch.relu(self.fc5(x2))
q_value2 = self.fc6(x2)
return q_value1, q_value2
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'state_size': 4, 'action_size': 4, 'args': _mock_config(
hidden_size=4)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (1, 4), (4, 1))
assert_size_stride(primals_8, (1,), (1,))
assert_size_stride(primals_9, (4, 8), (8, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (1, 4), (4, 1))
assert_size_stride(primals_14, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8
), 0), out=buf1)
del primals_3
buf2 = buf1
del buf1
triton_poi_fused_relu_1[grid(16)](buf2, primals_4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (4, 4), (1, 4
), 0), out=buf3)
buf4 = buf3
del buf3
triton_poi_fused_relu_1[grid(16)](buf4, primals_6, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_6
buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7,
(4, 1), (1, 4), 0), alpha=1, beta=1, out=buf6)
del primals_8
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_9, (8, 4), (1, 8
), 0), out=buf7)
del primals_9
buf8 = buf7
del buf7
triton_poi_fused_relu_1[grid(16)](buf8, primals_10, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_10
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf8, reinterpret_tensor(primals_11, (4, 4), (1,
4), 0), out=buf9)
buf10 = buf9
del buf9
triton_poi_fused_relu_1[grid(16)](buf10, primals_12, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_12
buf12 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_14, buf10, reinterpret_tensor(
primals_13, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf12)
del primals_14
return (buf6, buf12, buf0, buf2, buf4, buf8, buf10, primals_13,
primals_11, primals_7, primals_5)
class CriticNew(nn.Module):
def __init__(self, state_size, action_size, args):
super(CriticNew, self).__init__()
self.fc1 = nn.Linear(state_size + action_size, args.hidden_size)
self.fc2 = nn.Linear(args.hidden_size, args.hidden_size)
self.fc3 = nn.Linear(args.hidden_size, 1)
self.fc4 = nn.Linear(state_size + action_size, args.hidden_size)
self.fc5 = nn.Linear(args.hidden_size, args.hidden_size)
self.fc6 = nn.Linear(args.hidden_size, 1)
def forward(self, input_0, input_1):
primals_3 = self.fc1.weight
primals_4 = self.fc1.bias
primals_1 = self.fc2.weight
primals_6 = self.fc2.bias
primals_7 = self.fc3.weight
primals_8 = self.fc3.bias
primals_9 = self.fc4.weight
primals_10 = self.fc4.bias
primals_2 = self.fc5.weight
primals_12 = self.fc5.bias
primals_13 = self.fc6.weight
primals_14 = self.fc6.bias
primals_5 = input_0
primals_11 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14])
return output[0], output[1]
|
dongminlee94/Samsung-DRL-Code
|
Critic
| false
| 15,207
|
[
"MIT"
] | 116
|
c96f8739a09cfd708c265954ee8ecf0ea3b67395
|
https://github.com/dongminlee94/Samsung-DRL-Code/tree/c96f8739a09cfd708c265954ee8ecf0ea3b67395
|
AngleSimpleLinear
|
import torch
from torch.nn import functional as F
from torch import nn
from torchvision import models as models
from torch.nn import Parameter
from torch.nn.parameter import Parameter
import torch.onnx
import torch.nn
class AngleSimpleLinear(nn.Module):
"""Computes cos of angles between input vectors and weights vectors"""
def __init__(self, in_features, out_features):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(in_features, out_features))
self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-05).mul_(100000.0)
def forward(self, x):
cos_theta = F.normalize(x, dim=1).mm(F.normalize(self.weight, dim=0))
return cos_theta.clamp(-1.0 + 1e-07, 1.0 - 1e-07),
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
from torchvision import models as models
from torch.nn import Parameter
from torch.nn.parameter import Parameter
import torch.onnx
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_clamp_ge_le_logical_and_2(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = -0.9999999
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 0.9999999
tmp4 = triton_helpers.minimum(tmp2, tmp3)
tmp5 = tmp0 >= tmp1
tmp6 = tmp0 <= tmp3
tmp7 = tmp5 & tmp6
tl.store(out_ptr0 + x0, tmp4, xmask)
tl.store(out_ptr1 + x0, tmp7, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(16)](primals_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_div_1[grid(16)](primals_2, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, buf1, out=buf2)
buf3 = buf1
del buf1
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_clamp_ge_le_logical_and_2[grid(16)](buf2, buf3,
buf4, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf2
return buf3, primals_2, buf4, reinterpret_tensor(buf0, (4, 4), (1, 4), 0)
class AngleSimpleLinearNew(nn.Module):
"""Computes cos of angles between input vectors and weights vectors"""
def __init__(self, in_features, out_features):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(in_features, out_features))
self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-05).mul_(100000.0)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
dqawami/openvino_training_extensions
|
AngleSimpleLinear
| false
| 15,208
|
[
"Apache-2.0"
] | 256
|
dddda1dfd651eaae2d59cecda84275b1b03bd0ad
|
https://github.com/dqawami/openvino_training_extensions/tree/dddda1dfd651eaae2d59cecda84275b1b03bd0ad
|
LogitKLDivLoss
|
import torch
from torch.nn import functional as F
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
class LogitKLDivLoss(nn.Module):
"""Kullback–Leibler divergence loss. Inputs predicted and ground truth logits.
Args:
T (float): Softmax temperature.
"""
def __init__(self, T=1):
super().__init__()
self.T = T
def forward(self, p_logits, q_logits, **kwargs):
log_p = F.log_softmax(p_logits / self.T, dim=1)
q = F.softmax(q_logits / self.T, dim=1)
return F.kl_div(log_p, q, reduction='batchmean') * self.T ** 2
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tl.store(out_ptr0 + x3, tmp15, xmask)
@triton.jit
def triton_per_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = rindex // 64
tmp0 = tl.load(in_ptr0 + r3, None)
tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last'
)
tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr1 + r3, None)
tmp18 = tl.load(in_ptr1 + (r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr1 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr1 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp26 = tl.load(in_ptr1 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = libdevice.isnan(tmp8).to(tl.int1)
tmp10 = 0.0
tmp11 = tmp8 == tmp10
tmp12 = tl_math.log(tmp8)
tmp13 = tmp8 * tmp12
tmp14 = tl.where(tmp11, tmp10, tmp13)
tmp15 = float('nan')
tmp16 = tl.where(tmp9, tmp15, tmp14)
tmp19 = tl_math.exp(tmp18)
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tl_math.log(tmp28)
tmp30 = tmp17 - tmp29
tmp31 = tmp8 * tmp30
tmp32 = tmp16 - tmp31
tmp33 = tl.broadcast_to(tmp32, [RBLOCK])
tmp35 = triton_helpers.promote_to_tensor(tl.sum(tmp33, 0))
tmp36 = 0.25
tmp37 = tmp35 * tmp36
tmp38 = 1.0
tmp39 = tmp37 * tmp38
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp39, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_1[grid(256)](arg0_1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3
del buf3
triton_per_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2[grid(1)
](buf4, buf0, buf2, 1, 256, num_warps=2, num_stages=1)
del buf0
del buf2
return buf4,
class LogitKLDivLossNew(nn.Module):
"""Kullback–Leibler divergence loss. Inputs predicted and ground truth logits.
Args:
T (float): Softmax temperature.
"""
def __init__(self, T=1):
super().__init__()
self.T = T
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
dqawami/openvino_training_extensions
|
LogitKLDivLoss
| false
| 15,209
|
[
"Apache-2.0"
] | 256
|
dddda1dfd651eaae2d59cecda84275b1b03bd0ad
|
https://github.com/dqawami/openvino_training_extensions/tree/dddda1dfd651eaae2d59cecda84275b1b03bd0ad
|
LengthPredictor
|
import torch
from torch.nn import functional as F
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
class LengthPredictionLoss(nn.Module):
def __init__(self, max_delta=50):
super().__init__()
self.max_delta = max_delta
def forward(self, logits, src_mask, tgt_mask):
src_lens, tgt_lens = src_mask.sum(1), tgt_mask.sum(1)
delta = (tgt_lens - src_lens + self.max_delta).clamp(0, self.
max_delta * 2 - 1).long()
loss = F.cross_entropy(logits, delta, reduction='mean')
return {'length_prediction_loss': loss}
class LengthPredictor(nn.Module):
def __init__(self, hidden_size, max_delta=50):
super().__init__()
self.hidden_size = hidden_size
self.max_delta = max_delta
self._init_modules()
self._init_loss()
def forward(self, src, src_mask, tgt_len=None):
src_mean = self._compute_mean_emb(src, src_mask)
logits, delta = self._predict_delta(src_mean)
return logits, delta
def _predict_delta(self, src):
logits = self.length_predictor(src)
delta = logits.argmax(-1) - float(self.max_delta)
return logits, delta
def _compute_mean_emb(self, src, src_mask):
mean_emb = (src * src_mask[:, :, None]).sum(1) / src_mask.sum(1)[:,
None]
return mean_emb
def _init_modules(self):
self.length_predictor = nn.Linear(self.hidden_size, self.max_delta * 2)
def _init_loss(self):
self.loss = LengthPredictionLoss(self.max_delta)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch.nn import functional as F
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 64
x0 = xindex % 16
x2 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (64 + x3), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (128 + x3), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (192 + x3), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp15 = tmp1 + tmp4
tmp16 = tmp15 + tmp8
tmp17 = tmp16 + tmp12
tmp18 = tmp14 / tmp17
tl.store(out_ptr0 + x4, tmp18, xmask)
@triton.jit
def triton_per_fused_argmax_sub_1(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 64
rnumel = 100
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 100 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = tl.broadcast_to(rindex, tmp3.shape)
_, tmp2_tmp = triton_helpers.max_with_index(tmp3, tmp4, 1)
tmp2 = tmp2_tmp[:, None]
tmp5 = tmp2.to(tl.float32)
tmp6 = 50.0
tmp7 = tmp5 - tmp6
tl.store(out_ptr1 + x0, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (100, 4), (4, 1))
assert_size_stride(primals_4, (100,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_mul_sum_0[grid(256)](primals_2, primals_1,
buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 100), (100, 1), torch.float32)
extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_3, (4, 100), (1, 4), 0),
alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_per_fused_argmax_sub_1[grid(64)](buf1, buf3, 64, 100, XBLOCK
=8, num_warps=8, num_stages=1)
return reinterpret_tensor(buf1, (4, 4, 4, 100), (1600, 400, 100, 1), 0
), buf3, reinterpret_tensor(buf0, (64, 4), (4, 1), 0)
class LengthPredictionLoss(nn.Module):
def __init__(self, max_delta=50):
super().__init__()
self.max_delta = max_delta
def forward(self, logits, src_mask, tgt_mask):
src_lens, tgt_lens = src_mask.sum(1), tgt_mask.sum(1)
delta = (tgt_lens - src_lens + self.max_delta).clamp(0, self.
max_delta * 2 - 1).long()
loss = F.cross_entropy(logits, delta, reduction='mean')
return {'length_prediction_loss': loss}
class LengthPredictorNew(nn.Module):
def __init__(self, hidden_size, max_delta=50):
super().__init__()
self.hidden_size = hidden_size
self.max_delta = max_delta
self._init_modules()
self._init_loss()
def _predict_delta(self, src):
logits = self.length_predictor(src)
delta = logits.argmax(-1) - float(self.max_delta)
return logits, delta
def _compute_mean_emb(self, src, src_mask):
mean_emb = (src * src_mask[:, :, None]).sum(1) / src_mask.sum(1)[:,
None]
return mean_emb
def _init_modules(self):
self.length_predictor = nn.Linear(self.hidden_size, self.max_delta * 2)
def _init_loss(self):
self.loss = LengthPredictionLoss(self.max_delta)
def forward(self, input_0, input_1):
primals_3 = self.length_predictor.weight
primals_4 = self.length_predictor.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0], output[1]
|
dqawami/openvino_training_extensions
|
LengthPredictor
| false
| 15,210
|
[
"Apache-2.0"
] | 256
|
dddda1dfd651eaae2d59cecda84275b1b03bd0ad
|
https://github.com/dqawami/openvino_training_extensions/tree/dddda1dfd651eaae2d59cecda84275b1b03bd0ad
|
ResNet_conv1
|
import math
import torch
import torch.utils.data
import torch.nn as nn
class ResNet_conv1(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet_conv1, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=1, padding=3,
bias=False)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
y = x.clone()
y[:, 0, :, :] = (y[:, 0, :, :] - 0.485) / 0.229
y[:, 1, :, :] = (y[:, 1, :, :] - 0.485) / 0.224
y[:, 2, :, :] = (y[:, 2, :, :] - 0.485) / 0.224
x = self.conv1(y)
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {'block': 4, 'layers': 1}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import torch.utils.data
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_copy_div_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 3
x0 = xindex % 4096
x2 = xindex // 12288
x3 = xindex
tmp7 = tl.load(in_ptr0 + (x0 + 12288 * x2), None, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr0 + (4096 + x0 + 12288 * x2), None,
eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (8192 + x0 + 12288 * x2), None,
eviction_policy='evict_last')
tmp25 = tl.load(in_ptr0 + x3, None)
tmp0 = x1
tmp1 = tl.full([1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = tl.full([1], 1, tl.int32)
tmp4 = tmp1 == tmp3
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = tmp3 == tmp5
tmp8 = 0.485
tmp9 = tmp7 - tmp8
tmp10 = 4.366812227074235
tmp11 = tmp9 * tmp10
tmp13 = tl.where(tmp6, tmp11, tmp12)
tmp14 = tmp13 - tmp8
tmp15 = 4.464285714285714
tmp16 = tmp14 * tmp15
tmp17 = tmp1 == tmp5
tmp19 = tl.where(tmp17, tmp11, tmp18)
tmp20 = tl.where(tmp4, tmp16, tmp19)
tmp21 = tmp20 - tmp8
tmp22 = tmp21 * tmp15
tmp23 = tmp0 == tmp3
tmp24 = tmp0 == tmp5
tmp26 = tl.where(tmp24, tmp11, tmp25)
tmp27 = tl.where(tmp23, tmp16, tmp26)
tmp28 = tl.where(tmp2, tmp22, tmp27)
tl.store(out_ptr0 + x3, tmp28, None)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_2, (64, 3, 7, 7), (147, 49, 7, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_copy_div_sub_0[grid(49152)](primals_1, buf0, 49152,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 64, 64, 64), (262144, 4096, 64, 1))
return buf1, primals_2, buf0
class ResNet_conv1New(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet_conv1New, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=1, padding=3,
bias=False)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
donegaci/memc-net
|
ResNet_conv1
| false
| 15,211
|
[
"MIT"
] | 145
|
9bdb0ab6ce99af22a165db2cedacd148dd6083c0
|
https://github.com/donegaci/memc-net/tree/9bdb0ab6ce99af22a165db2cedacd148dd6083c0
|
Norm
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
class Norm(nn.Module):
def __init__(self, dims):
super(Norm, self).__init__()
self.dims = dims
def forward(self, x):
z2 = torch.norm(x, p=2)
out = z2 - self.dims
out = out * out
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dims': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_linalg_vector_norm_mul_sub_0(in_out_ptr0, in_ptr0,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [RBLOCK])
tmp4 = triton_helpers.promote_to_tensor(tl.sum(tmp2, 0))
tmp5 = libdevice.sqrt(tmp4)
tmp6 = 4.0
tmp7 = tmp5 - tmp6
tmp8 = tmp7 * tmp7
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp8, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_linalg_vector_norm_mul_sub_0[grid(1)](buf1, arg0_1,
1, 256, num_warps=2, num_stages=1)
del arg0_1
return buf1,
class NormNew(nn.Module):
def __init__(self, dims):
super(NormNew, self).__init__()
self.dims = dims
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
doudoulaile/RL-GAN-Net
|
Norm
| false
| 15,212
|
[
"MIT"
] | 112
|
9c221223d1878bc24f0f39ad34928c1bb2974ae3
|
https://github.com/doudoulaile/RL-GAN-Net/tree/9c221223d1878bc24f0f39ad34928c1bb2974ae3
|
StateInitZero
|
import torch
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
class StateInitZero(nn.Module):
def __init__(self, hidden_size, num_layers=1, batch_first=False):
super(StateInitZero, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.batch_first = batch_first
def forward(self, input: 'torch.Tensor'):
h0 = input.new_zeros((self.num_layers, input.size(0 if self.
batch_first else 1), self.hidden_size))
c0 = input.new_zeros((self.num_layers, input.size(0 if self.
batch_first else 1), self.hidden_size))
return h0, c0
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_new_zeros_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + x0, tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_new_zeros_0[grid(16)](buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_new_zeros_0[grid(16)](buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
return buf0, buf1
class StateInitZeroNew(nn.Module):
def __init__(self, hidden_size, num_layers=1, batch_first=False):
super(StateInitZeroNew, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.batch_first = batch_first
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0], output[1]
|
dqawami/openvino_training_extensions
|
StateInitZero
| false
| 15,213
|
[
"Apache-2.0"
] | 256
|
dddda1dfd651eaae2d59cecda84275b1b03bd0ad
|
https://github.com/dqawami/openvino_training_extensions/tree/dddda1dfd651eaae2d59cecda84275b1b03bd0ad
|
ScaledDotProductAttention
|
import torch
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
class ScaledDotProductAttention(nn.Module):
def __init__(self, dropout=0, scale=True):
super().__init__()
self.dropout = nn.Dropout(p=dropout)
self.softmax = nn.Softmax(dim=2)
self.scale = scale
def forward(self, q, k, v, mask=None):
attn = torch.bmm(q, k.permute(0, 2, 1))
if self.scale:
dimention = torch.sqrt(torch.tensor(k.shape[-1]))
attn = attn / dimention
if mask is not None:
attn = attn.masked_fill(mask == 0, -1000000000.0)
attn = self.softmax(attn)
attn = self.dropout(attn)
output = torch.bmm(attn, v)
return output, attn
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp8 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 2.0
tmp2 = 0.0
tmp3 = tmp1 >= tmp2
tmp4 = 1.0
tmp5 = -1.0
tmp6 = tl.where(tmp3, tmp4, tmp5)
tmp7 = tmp0 * tmp6
tmp9 = tmp8 * tmp6
tmp11 = tmp10 * tmp6
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp14 = tmp13 * tmp6
tmp15 = triton_helpers.maximum(tmp12, tmp14)
tmp17 = tmp16 * tmp6
tmp18 = triton_helpers.maximum(tmp15, tmp17)
tmp19 = tmp7 - tmp18
tmp20 = tmp6 * tmp1
tmp21 = tmp19 / tmp20
tmp22 = tl_math.exp(tmp21)
tl.store(out_ptr0 + x2, tmp22, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), (
16, 1, 4), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_sqrt_0[grid(64)](buf0, buf1, 64, XBLOCK=
64, num_warps=1, num_stages=1)
buf2 = buf0
del buf0
triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = buf1
del buf1
extern_kernels.bmm(buf2, arg2_1, out=buf3)
del arg2_1
return buf3, buf2
class ScaledDotProductAttentionNew(nn.Module):
def __init__(self, dropout=0, scale=True):
super().__init__()
self.dropout = nn.Dropout(p=dropout)
self.softmax = nn.Softmax(dim=2)
self.scale = scale
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0], output[1]
|
dqawami/openvino_training_extensions
|
ScaledDotProductAttention
| false
| 15,214
|
[
"Apache-2.0"
] | 256
|
dddda1dfd651eaae2d59cecda84275b1b03bd0ad
|
https://github.com/dqawami/openvino_training_extensions/tree/dddda1dfd651eaae2d59cecda84275b1b03bd0ad
|
GateAddNorm
|
import torch
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
class GatedLinearUnit(nn.Module):
def __init__(self, input_size, output_size, dropout=0):
super().__init__()
self.dropout = nn.Dropout(dropout)
self.w4 = nn.Linear(input_size, output_size)
self.w5 = nn.Linear(input_size, output_size)
self.act = nn.Sigmoid()
def forward(self, x):
x = self.dropout(x)
x = self.act(self.w4(x)) * self.w5(x)
return x
class GateAddNorm(nn.Module):
def __init__(self, input_size, output_size, dropout):
super().__init__()
self.glu = GatedLinearUnit(input_size, output_size, dropout)
self.norm = nn.LayerNorm(output_size)
def forward(self, x, skip):
return self.norm(self.glu(x) + skip)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'output_size': 4, 'dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_native_layer_norm_sigmoid_0(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp24 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp5 = tmp3 + tmp4
tmp7 = tl.sigmoid(tmp6)
tmp9 = tmp7 * tmp8
tmp11 = tmp9 + tmp10
tmp12 = tmp5 + tmp11
tmp14 = tl.sigmoid(tmp13)
tmp16 = tmp14 * tmp15
tmp18 = tmp16 + tmp17
tmp19 = tmp12 + tmp18
tmp21 = tl.sigmoid(tmp20)
tmp23 = tmp21 * tmp22
tmp25 = tmp23 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + x0, tmp28, xmask)
tl.store(out_ptr1 + x0, tmp40, xmask)
@triton.jit
def triton_poi_fused_add_mul_native_layer_norm_sigmoid_1(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x2, xmask)
tmp6 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 - tmp6
tmp9 = 1e-05
tmp10 = tmp8 + tmp9
tmp11 = libdevice.rsqrt(tmp10)
tmp12 = tmp7 * tmp11
tmp14 = tmp12 * tmp13
tmp16 = tmp14 + tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64,
4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_native_layer_norm_sigmoid_0[grid(64)](buf0,
buf1, primals_6, buf2, buf3, 64, XBLOCK=64, num_warps=1,
num_stages=1)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_native_layer_norm_sigmoid_1[grid(256)](buf0,
buf1, primals_6, buf2, buf3, primals_7, primals_8, buf4, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del buf2
del buf3
del primals_8
return buf4, primals_6, primals_7, reinterpret_tensor(primals_1, (64, 4
), (4, 1), 0), buf0, buf1
class GatedLinearUnit(nn.Module):
def __init__(self, input_size, output_size, dropout=0):
super().__init__()
self.dropout = nn.Dropout(dropout)
self.w4 = nn.Linear(input_size, output_size)
self.w5 = nn.Linear(input_size, output_size)
self.act = nn.Sigmoid()
def forward(self, x):
x = self.dropout(x)
x = self.act(self.w4(x)) * self.w5(x)
return x
class GateAddNormNew(nn.Module):
def __init__(self, input_size, output_size, dropout):
super().__init__()
self.glu = GatedLinearUnit(input_size, output_size, dropout)
self.norm = nn.LayerNorm(output_size)
def forward(self, input_0, input_1):
primals_2 = self.glu.w4.weight
primals_3 = self.glu.w4.bias
primals_4 = self.glu.w5.weight
primals_5 = self.glu.w5.bias
primals_7 = self.norm.weight
primals_8 = self.norm.bias
primals_1 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
|
dqawami/openvino_training_extensions
|
GateAddNorm
| false
| 15,215
|
[
"Apache-2.0"
] | 256
|
dddda1dfd651eaae2d59cecda84275b1b03bd0ad
|
https://github.com/dqawami/openvino_training_extensions/tree/dddda1dfd651eaae2d59cecda84275b1b03bd0ad
|
_MCLSTMCell
|
from _paritybench_helpers import _mock_config
import torch
from typing import Tuple
import torch.nn as nn
class _Gate(nn.Module):
"""Utility class to implement a standard sigmoid gate"""
def __init__(self, in_features: 'int', out_features: 'int'):
super(_Gate, self).__init__()
self.fc = nn.Linear(in_features=in_features, out_features=out_features)
self._reset_parameters()
def _reset_parameters(self):
nn.init.orthogonal_(self.fc.weight)
nn.init.zeros_(self.fc.bias)
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
"""Perform forward pass through the normalised gate"""
return torch.sigmoid(self.fc(x))
class _NormalizedGate(nn.Module):
"""Utility class to implement a gate with normalised activation function"""
def __init__(self, in_features: 'int', out_shape: 'Tuple[int, int]',
normalizer: 'str'):
super(_NormalizedGate, self).__init__()
self.fc = nn.Linear(in_features=in_features, out_features=out_shape
[0] * out_shape[1])
self.out_shape = out_shape
if normalizer == 'normalized_sigmoid':
self.activation = nn.Sigmoid()
elif normalizer == 'normalized_relu':
self.activation = nn.ReLU()
else:
raise ValueError(
f"Unknown normalizer {normalizer}. Must be one of {'normalized_sigmoid', 'normalized_relu'}"
)
self._reset_parameters()
def _reset_parameters(self):
nn.init.orthogonal_(self.fc.weight)
nn.init.zeros_(self.fc.bias)
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
"""Perform forward pass through the normalized gate"""
h = self.fc(x).view(-1, *self.out_shape)
return torch.nn.functional.normalize(self.activation(h), p=1, dim=-1)
class _MCLSTMCell(nn.Module):
"""The logic of the MC-LSTM cell"""
def __init__(self, mass_input_size: 'int', aux_input_size: 'int',
hidden_size: 'int', cfg: 'Config'):
super(_MCLSTMCell, self).__init__()
self.cfg = cfg
self._hidden_size = hidden_size
gate_inputs = aux_input_size + hidden_size + mass_input_size
self.output_gate = _Gate(in_features=gate_inputs, out_features=
hidden_size)
self.input_gate = _NormalizedGate(in_features=gate_inputs,
out_shape=(mass_input_size, hidden_size), normalizer=
'normalized_sigmoid')
self.redistribution = _NormalizedGate(in_features=gate_inputs,
out_shape=(hidden_size, hidden_size), normalizer='normalized_relu')
self._reset_parameters()
def _reset_parameters(self):
if self.cfg.initial_forget_bias is not None:
nn.init.constant_(self.output_gate.fc.bias, val=self.cfg.
initial_forget_bias)
def forward(self, x_m: 'torch.Tensor', x_a: 'torch.Tensor') ->Tuple[
torch.Tensor, torch.Tensor]:
"""Perform forward pass on the MC-LSTM cell.
Parameters
----------
x_m : torch.Tensor
Mass input that will be conserved by the network.
x_a : torch.Tensor
Auxiliary inputs that will be used to modulate the gates but whose information won't be stored internally
in the MC-LSTM cells.
Returns
-------
Tuple[torch.Tensor, torch.Tensor]
Outgoing mass and memory cells per time step of shape [sequence length, batch size, hidden size]
"""
_, batch_size, _ = x_m.size()
ct = x_m.new_zeros((batch_size, self._hidden_size))
m_out, c = [], []
for xt_m, xt_a in zip(x_m, x_a):
mt_out, ct = self._step(xt_m, xt_a, ct)
m_out.append(mt_out)
c.append(ct)
m_out, c = torch.stack(m_out), torch.stack(c)
return m_out, c
def _step(self, xt_m, xt_a, c):
""" Make a single time step in the MCLSTM. """
features = torch.cat([xt_m, xt_a, c / (c.norm(1) + 1e-05)], dim=-1)
i = self.input_gate(features)
r = self.redistribution(features)
o = self.output_gate(features)
m_in = torch.matmul(xt_m.unsqueeze(-2), i).squeeze(-2)
m_sys = torch.matmul(c.unsqueeze(-2), r).squeeze(-2)
m_new = m_in + m_sys
return o * m_new, (1 - o) * m_new
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'mass_input_size': 4, 'aux_input_size': 4, 'hidden_size':
4, 'cfg': _mock_config(initial_forget_bias=4)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from typing import Tuple
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_div_linalg_vector_norm_new_zeros_0(out_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex % 4
r2 = rindex // 4
tmp0 = 0.0
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.sum(tmp1, 1)[:, None]
tmp4 = 1e-05
tmp5 = tmp3 + tmp4
tmp6 = tmp0 / tmp5
tl.store(out_ptr1 + tl.broadcast_to(r1 + 12 * r2, [XBLOCK, RBLOCK]),
tmp6, None)
@triton.jit
def triton_for_fused_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1):
pid = tl.program_id(0)
XBLOCK: tl.constexpr = 1024
num_xblocks_0 = tl.cdiv(16, XBLOCK)
num_xblocks_1 = num_xblocks_0 + tl.cdiv(16, XBLOCK)
if pid < num_xblocks_0:
pid_offset = pid
xnumel = 16
xoffset = pid_offset * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tl.store(out_ptr0 + (x0 + 12 * x1), tmp0, xmask)
elif pid < num_xblocks_1:
pid_offset = pid - num_xblocks_0
xnumel = 16
xoffset = pid_offset * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x5 = xindex
x3 = xindex % 4
x4 = xindex // 4
tmp1 = tl.load(in_ptr1 + x5, xmask)
tl.store(out_ptr1 + (x3 + 12 * x4), tmp1, xmask)
else:
pass
@triton.jit
def triton_poi_fused_div_sigmoid_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tl.sigmoid(tmp2)
tmp4 = tl_math.abs(tmp3)
tmp6 = tl.sigmoid(tmp5)
tmp7 = tl_math.abs(tmp6)
tmp8 = tmp4 + tmp7
tmp10 = tl.sigmoid(tmp9)
tmp11 = tl_math.abs(tmp10)
tmp12 = tmp8 + tmp11
tmp14 = tl.sigmoid(tmp13)
tmp15 = tl_math.abs(tmp14)
tmp16 = tmp12 + tmp15
tmp17 = 1e-12
tmp18 = triton_helpers.maximum(tmp16, tmp17)
tmp19 = tmp1 / tmp18
tl.store(out_ptr0 + x2, tmp19, xmask)
@triton.jit
def triton_poi_fused_new_zeros_3(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_div_relu_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp1, tmp3)
tmp5 = tl_math.abs(tmp4)
tmp7 = triton_helpers.maximum(tmp1, tmp6)
tmp8 = tl_math.abs(tmp7)
tmp9 = tmp5 + tmp8
tmp11 = triton_helpers.maximum(tmp1, tmp10)
tmp12 = tl_math.abs(tmp11)
tmp13 = tmp9 + tmp12
tmp15 = triton_helpers.maximum(tmp1, tmp14)
tmp16 = tl_math.abs(tmp15)
tmp17 = tmp13 + tmp16
tmp18 = 1e-12
tmp19 = triton_helpers.maximum(tmp17, tmp18)
tmp20 = tmp2 / tmp19
tl.store(out_ptr0 + x2, tmp20, xmask)
@triton.jit
def triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_5(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_out_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 + tmp1
tmp4 = tl.sigmoid(tmp3)
tmp5 = 1.0
tmp6 = tmp5 - tmp4
tmp7 = tmp6 * tmp2
tmp8 = tl_math.abs(tmp7)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.sum(tmp9, 1)[:, None]
tmp12 = 1e-05
tmp13 = tmp11 + tmp12
tl.store(in_out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp2, None)
tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp7, None)
tl.debug_barrier()
tl.store(in_out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp13, None)
@triton.jit
def triton_poi_fused_cat_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 12
x1 = xindex // 12
x2 = xindex
tmp15 = tl.load(in_ptr3 + 0)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (16 + 4 * x1 + x0), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (16 + 4 * x1 + (-4 + x0)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tl.full([1], 12, tl.int64)
tmp14 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp11 & xmask,
eviction_policy='evict_last', other=0.0)
tmp17 = tmp14 / tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp11, tmp17, tmp18)
tmp20 = tl.where(tmp9, tmp10, tmp19)
tmp21 = tl.where(tmp4, tmp5, tmp20)
tl.store(out_ptr0 + x2, tmp21, xmask)
@triton.jit
def triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_7(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_out_ptr0 + r0, None)
tmp1 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 + tmp1
tmp4 = tl.sigmoid(tmp3)
tmp5 = 1.0
tmp6 = tmp5 - tmp4
tmp7 = tmp6 * tmp2
tmp8 = tl_math.abs(tmp7)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.sum(tmp9, 1)[:, None]
tmp12 = 1e-05
tmp13 = tmp11 + tmp12
tl.store(in_out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp2, None)
tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp7, None)
tl.debug_barrier()
tl.store(in_out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp13, None)
@triton.jit
def triton_poi_fused_cat_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 12
x1 = xindex // 12
x2 = xindex
tmp15 = tl.load(in_ptr3 + 0)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (32 + 4 * x1 + x0), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (32 + 4 * x1 + (-4 + x0)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tl.full([1], 12, tl.int64)
tmp14 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp11 & xmask,
eviction_policy='evict_last', other=0.0)
tmp17 = tmp14 / tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp11, tmp17, tmp18)
tmp20 = tl.where(tmp9, tmp10, tmp19)
tmp21 = tl.where(tmp4, tmp5, tmp20)
tl.store(out_ptr0 + x2, tmp21, xmask)
@triton.jit
def triton_poi_fused_cat_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 12
x1 = xindex // 12
x2 = xindex
tmp15 = tl.load(in_ptr3 + 0)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (48 + 4 * x1 + x0), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (48 + 4 * x1 + (-4 + x0)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tl.full([1], 12, tl.int64)
tmp14 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp11 & xmask,
eviction_policy='evict_last', other=0.0)
tmp17 = tmp14 / tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp11, tmp17, tmp18)
tmp20 = tl.where(tmp9, tmp10, tmp19)
tmp21 = tl.where(tmp4, tmp5, tmp20)
tl.store(out_ptr0 + x2, tmp21, xmask)
@triton.jit
def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_stack_11(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1), tmp4 & xmask, other=0.0)
tmp6 = tl.sigmoid(tmp5)
tmp7 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp4 & xmask, other=0.0)
tmp8 = tmp6 * tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp4, tmp8, tmp9)
tmp11 = tmp0 >= tmp3
tmp12 = tl.full([1], 8, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + (x0 + 4 * (-4 + x1)), tmp14 & xmask, other=0.0)
tmp16 = tl.sigmoid(tmp15)
tmp17 = tl.load(in_ptr3 + (x0 + 4 * (-4 + x1)), tmp14 & xmask, other=0.0)
tmp18 = tmp16 * tmp17
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp14, tmp18, tmp19)
tmp21 = tmp0 >= tmp12
tmp22 = tl.full([1], 12, tl.int64)
tmp23 = tmp0 < tmp22
tmp24 = tmp21 & tmp23
tmp25 = tl.load(in_ptr4 + (x0 + 4 * (-8 + x1)), tmp24 & xmask, other=0.0)
tmp26 = tl.sigmoid(tmp25)
tmp27 = tl.load(in_ptr5 + (x0 + 4 * (-8 + x1)), tmp24 & xmask, other=0.0)
tmp28 = tmp26 * tmp27
tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp30 = tl.where(tmp24, tmp28, tmp29)
tmp31 = tmp0 >= tmp22
tl.full([1], 16, tl.int64)
tmp34 = tl.load(in_ptr6 + (x0 + 4 * (-12 + x1)), tmp31 & xmask, other=0.0)
tmp35 = tl.sigmoid(tmp34)
tmp36 = tl.load(in_ptr7 + (x0 + 4 * (-12 + x1)), tmp31 & xmask, other=0.0)
tmp37 = tmp35 * tmp36
tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype)
tmp39 = tl.where(tmp31, tmp37, tmp38)
tmp40 = tl.where(tmp24, tmp30, tmp39)
tmp41 = tl.where(tmp14, tmp20, tmp40)
tmp42 = tl.where(tmp4, tmp10, tmp41)
tmp43 = tl.load(in_ptr8 + (x0 + 4 * x1), tmp4 & xmask, other=0.0)
tmp44 = tl.load(in_ptr9 + (x0 + 4 * (-4 + x1)), tmp14 & xmask, other=0.0)
tmp45 = tl.load(in_ptr10 + (x0 + 4 * (-8 + x1)), tmp24 & xmask, other=0.0)
tmp46 = 1.0
tmp47 = tmp46 - tmp35
tmp48 = tmp47 * tmp36
tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype)
tmp50 = tl.where(tmp31, tmp48, tmp49)
tmp51 = tl.where(tmp24, tmp45, tmp50)
tmp52 = tl.where(tmp14, tmp44, tmp51)
tmp53 = tl.where(tmp4, tmp43, tmp52)
tl.store(out_ptr0 + x2, tmp42, xmask)
tl.store(out_ptr1 + x2, tmp53, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (16, 12), (12, 1))
assert_size_stride(primals_4, (16,), (1,))
assert_size_stride(primals_5, (16, 12), (12, 1))
assert_size_stride(primals_6, (16,), (1,))
assert_size_stride(primals_7, (4, 12), (12, 1))
assert_size_stride(primals_8, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf4 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
buf3 = reinterpret_tensor(buf4, (4, 4), (12, 1), 8)
get_raw_stream(0)
triton_per_fused_add_div_linalg_vector_norm_new_zeros_0[grid(1)](buf3,
1, 16, XBLOCK=1, num_warps=2, num_stages=1)
buf1 = reinterpret_tensor(buf4, (4, 4), (12, 1), 0)
buf2 = reinterpret_tensor(buf4, (4, 4), (12, 1), 4)
triton_for_fused_1[2, 1, 1](primals_1, primals_2, buf1, buf2,
num_warps=8, num_stages=1)
buf5 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_4, buf4, reinterpret_tensor(primals_3,
(12, 16), (1, 12), 0), alpha=1, beta=1, out=buf5)
buf6 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_6, buf4, reinterpret_tensor(primals_5,
(12, 16), (1, 12), 0), alpha=1, beta=1, out=buf6)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7,
(12, 4), (1, 12), 0), alpha=1, beta=1, out=buf7)
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_div_sigmoid_2[grid(64)](buf5, buf8, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf9 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 1, 4), (4, 4,
1), 0), buf8, out=buf9)
buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_new_zeros_3[grid(16)](buf10, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf11 = buf8
del buf8
triton_poi_fused_div_relu_4[grid(64)](buf6, buf11, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf12 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf10, (4, 1, 4), (4, 0, 1),
0), buf11, out=buf12)
buf13 = reinterpret_tensor(buf12, (4, 4), (4, 1), 0)
del buf12
buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf15 = empty_strided_cuda((), (), torch.float32)
buf16 = buf15
del buf15
triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_5[grid(1)](
buf13, buf16, buf9, buf7, buf14, 1, 16, XBLOCK=1, num_warps=2,
num_stages=1)
buf17 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
triton_poi_fused_cat_6[grid(48)](primals_1, primals_2, buf14, buf16,
buf17, 48, XBLOCK=64, num_warps=1, num_stages=1)
buf18 = reinterpret_tensor(buf11, (4, 16), (16, 1), 0)
del buf11
extern_kernels.addmm(primals_4, buf17, reinterpret_tensor(primals_3,
(12, 16), (1, 12), 0), alpha=1, beta=1, out=buf18)
buf19 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_6, buf17, reinterpret_tensor(primals_5,
(12, 16), (1, 12), 0), alpha=1, beta=1, out=buf19)
buf20 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_div_relu_4[grid(64)](buf19, buf20, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf21 = reinterpret_tensor(buf9, (4, 4), (4, 1), 0)
del buf9
extern_kernels.addmm(primals_8, buf17, reinterpret_tensor(primals_7,
(12, 4), (1, 12), 0), alpha=1, beta=1, out=buf21)
buf22 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_div_sigmoid_2[grid(64)](buf18, buf22, 64, XBLOCK=
64, num_warps=1, num_stages=1)
buf23 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 1, 4), (4, 4,
1), 16), buf22, out=buf23)
buf24 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf14, (4, 1, 4), (4, 4, 1),
0), buf20, out=buf24)
buf25 = reinterpret_tensor(buf23, (4, 4), (4, 1), 0)
del buf23
buf26 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf27 = empty_strided_cuda((), (), torch.float32)
buf28 = buf27
del buf27
triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_7[grid(1)](
buf25, buf28, buf24, buf21, buf26, 1, 16, XBLOCK=1, num_warps=2,
num_stages=1)
buf29 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
triton_poi_fused_cat_8[grid(48)](primals_1, primals_2, buf26, buf28,
buf29, 48, XBLOCK=64, num_warps=1, num_stages=1)
buf30 = reinterpret_tensor(buf22, (4, 16), (16, 1), 0)
del buf22
extern_kernels.addmm(primals_4, buf29, reinterpret_tensor(primals_3,
(12, 16), (1, 12), 0), alpha=1, beta=1, out=buf30)
buf31 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_6, buf29, reinterpret_tensor(primals_5,
(12, 16), (1, 12), 0), alpha=1, beta=1, out=buf31)
buf32 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_div_relu_4[grid(64)](buf31, buf32, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf33 = reinterpret_tensor(buf24, (4, 4), (4, 1), 0)
del buf24
extern_kernels.addmm(primals_8, buf29, reinterpret_tensor(primals_7,
(12, 4), (1, 12), 0), alpha=1, beta=1, out=buf33)
buf34 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_div_sigmoid_2[grid(64)](buf30, buf34, 64, XBLOCK=
64, num_warps=1, num_stages=1)
buf35 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 1, 4), (4, 4,
1), 32), buf34, out=buf35)
buf36 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf26, (4, 1, 4), (4, 4, 1),
0), buf32, out=buf36)
buf37 = reinterpret_tensor(buf35, (4, 4), (4, 1), 0)
del buf35
buf38 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf39 = empty_strided_cuda((), (), torch.float32)
buf40 = buf39
del buf39
triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_7[grid(1)](
buf37, buf40, buf36, buf33, buf38, 1, 16, XBLOCK=1, num_warps=2,
num_stages=1)
buf41 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
triton_poi_fused_cat_9[grid(48)](primals_1, primals_2, buf38, buf40,
buf41, 48, XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
buf42 = reinterpret_tensor(buf34, (4, 16), (16, 1), 0)
del buf34
extern_kernels.addmm(primals_4, buf41, reinterpret_tensor(primals_3,
(12, 16), (1, 12), 0), alpha=1, beta=1, out=buf42)
del primals_4
buf43 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_6, buf41, reinterpret_tensor(primals_5,
(12, 16), (1, 12), 0), alpha=1, beta=1, out=buf43)
del primals_6
buf44 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_div_relu_4[grid(64)](buf43, buf44, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf45 = reinterpret_tensor(buf36, (4, 4), (4, 1), 0)
del buf36
extern_kernels.addmm(primals_8, buf41, reinterpret_tensor(primals_7,
(12, 4), (1, 12), 0), alpha=1, beta=1, out=buf45)
del primals_8
buf46 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_div_sigmoid_2[grid(64)](buf42, buf46, 64, XBLOCK=
64, num_warps=1, num_stages=1)
buf47 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 1, 4), (4, 4,
1), 48), buf46, out=buf47)
buf48 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf38, (4, 1, 4), (4, 4, 1),
0), buf44, out=buf48)
buf49 = reinterpret_tensor(buf47, (4, 4), (4, 1), 0)
del buf47
triton_poi_fused_add_10[grid(16)](buf49, buf48, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf48
buf50 = reinterpret_tensor(buf46, (16, 4), (4, 1), 0)
del buf46
buf51 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
triton_poi_fused_stack_11[grid(64)](buf7, buf13, buf21, buf25,
buf33, buf37, buf45, buf49, buf14, buf26, buf38, buf50, buf51,
64, XBLOCK=64, num_warps=1, num_stages=1)
return (reinterpret_tensor(buf50, (4, 4, 4), (16, 4, 1), 0),
reinterpret_tensor(buf51, (4, 4, 4), (16, 4, 1), 0), buf4, buf5,
buf6, buf7, buf13, buf14, buf16, buf17, buf18, buf19, buf20, buf21,
buf25, buf26, buf28, buf29, buf30, buf31, buf32, buf33, buf37,
buf38, buf40, buf41, buf42, buf43, buf44, buf45, buf49,
reinterpret_tensor(primals_1, (4, 4, 1), (4, 1, 4), 48), primals_7,
primals_5, primals_3, reinterpret_tensor(primals_1, (4, 4, 1), (4,
1, 4), 32), reinterpret_tensor(primals_1, (4, 4, 1), (4, 1, 4), 16),
reinterpret_tensor(buf10, (4, 4, 1), (4, 1, 4), 0),
reinterpret_tensor(primals_1, (4, 4, 1), (4, 1, 4), 0))
class _Gate(nn.Module):
"""Utility class to implement a standard sigmoid gate"""
def __init__(self, in_features: 'int', out_features: 'int'):
super(_Gate, self).__init__()
self.fc = nn.Linear(in_features=in_features, out_features=out_features)
self._reset_parameters()
def _reset_parameters(self):
nn.init.orthogonal_(self.fc.weight)
nn.init.zeros_(self.fc.bias)
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
"""Perform forward pass through the normalised gate"""
return torch.sigmoid(self.fc(x))
class _NormalizedGate(nn.Module):
"""Utility class to implement a gate with normalised activation function"""
def __init__(self, in_features: 'int', out_shape: 'Tuple[int, int]',
normalizer: 'str'):
super(_NormalizedGate, self).__init__()
self.fc = nn.Linear(in_features=in_features, out_features=out_shape
[0] * out_shape[1])
self.out_shape = out_shape
if normalizer == 'normalized_sigmoid':
self.activation = nn.Sigmoid()
elif normalizer == 'normalized_relu':
self.activation = nn.ReLU()
else:
raise ValueError(
f"Unknown normalizer {normalizer}. Must be one of {'normalized_sigmoid', 'normalized_relu'}"
)
self._reset_parameters()
def _reset_parameters(self):
nn.init.orthogonal_(self.fc.weight)
nn.init.zeros_(self.fc.bias)
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
"""Perform forward pass through the normalized gate"""
h = self.fc(x).view(-1, *self.out_shape)
return torch.nn.functional.normalize(self.activation(h), p=1, dim=-1)
class _MCLSTMCellNew(nn.Module):
"""The logic of the MC-LSTM cell"""
def __init__(self, mass_input_size: 'int', aux_input_size: 'int',
hidden_size: 'int', cfg: 'Config'):
super(_MCLSTMCellNew, self).__init__()
self.cfg = cfg
self._hidden_size = hidden_size
gate_inputs = aux_input_size + hidden_size + mass_input_size
self.output_gate = _Gate(in_features=gate_inputs, out_features=
hidden_size)
self.input_gate = _NormalizedGate(in_features=gate_inputs,
out_shape=(mass_input_size, hidden_size), normalizer=
'normalized_sigmoid')
self.redistribution = _NormalizedGate(in_features=gate_inputs,
out_shape=(hidden_size, hidden_size), normalizer='normalized_relu')
self._reset_parameters()
def _reset_parameters(self):
if self.cfg.initial_forget_bias is not None:
nn.init.constant_(self.output_gate.fc.bias, val=self.cfg.
initial_forget_bias)
def _step(self, xt_m, xt_a, c):
""" Make a single time step in the MCLSTM. """
features = torch.cat([xt_m, xt_a, c / (c.norm(1) + 1e-05)], dim=-1)
i = self.input_gate(features)
r = self.redistribution(features)
o = self.output_gate(features)
m_in = torch.matmul(xt_m.unsqueeze(-2), i).squeeze(-2)
m_sys = torch.matmul(c.unsqueeze(-2), r).squeeze(-2)
m_new = m_in + m_sys
return o * m_new, (1 - o) * m_new
def forward(self, input_0, input_1):
primals_7 = self.output_gate.fc.weight
primals_8 = self.output_gate.fc.bias
primals_3 = self.input_gate.fc.weight
primals_4 = self.input_gate.fc.bias
primals_5 = self.redistribution.fc.weight
primals_6 = self.redistribution.fc.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0], output[1]
|
DavidChoi76/neuralhydrology
|
_MCLSTMCell
| false
| 15,216
|
[
"BSD-3-Clause"
] | 144
|
a4c284b92934ee973c8b3fedf8a60df60c8feae1
|
https://github.com/DavidChoi76/neuralhydrology/tree/a4c284b92934ee973c8b3fedf8a60df60c8feae1
|
GatedResidualNetwork
|
import torch
from torch.nn import functional as F
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
class GatedLinearUnit(nn.Module):
def __init__(self, input_size, output_size, dropout=0):
super().__init__()
self.dropout = nn.Dropout(dropout)
self.w4 = nn.Linear(input_size, output_size)
self.w5 = nn.Linear(input_size, output_size)
self.act = nn.Sigmoid()
def forward(self, x):
x = self.dropout(x)
x = self.act(self.w4(x)) * self.w5(x)
return x
class GatedResidualNetwork(nn.Module):
def __init__(self, input_size, hidden_size, output_size, context_size=
None, dropout=0):
super().__init__()
self.w1 = nn.Linear(hidden_size, hidden_size)
self.w2 = nn.Linear(input_size, hidden_size)
self.w3 = None if context_size is None else nn.Linear(context_size,
hidden_size, bias=False)
self.glu = GatedLinearUnit(hidden_size, output_size, dropout)
self.layer_norm = nn.LayerNorm(output_size)
self.residual = nn.Sequential(
) if input_size == output_size else nn.Linear(input_size,
output_size)
def forward(self, a, c=None):
if c is not None:
n2 = F.elu(self.w2(a) + self.w3(c))
else:
n2 = F.elu(self.w2(a))
n1 = self.w1(n2)
grn = self.layer_norm(self.residual(a) + self.glu(n1))
return grn
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4, 'output_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tl.store(out_ptr0 + x0, tmp7, xmask)
@triton.jit
def triton_poi_fused_add_mul_native_layer_norm_sigmoid_1(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp23 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tl.sigmoid(tmp1)
tmp4 = tmp2 * tmp3
tmp5 = tmp0 + tmp4
tmp8 = tl.sigmoid(tmp7)
tmp10 = tmp8 * tmp9
tmp11 = tmp6 + tmp10
tmp12 = tmp5 + tmp11
tmp15 = tl.sigmoid(tmp14)
tmp17 = tmp15 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp12 + tmp18
tmp22 = tl.sigmoid(tmp21)
tmp24 = tmp22 * tmp23
tmp25 = tmp20 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + x0, tmp28, xmask)
tl.store(out_ptr1 + x0, tmp40, xmask)
@triton.jit
def triton_poi_fused_add_mul_native_layer_norm_sigmoid_2(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x2, xmask)
tmp6 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp4 = tmp2 * tmp3
tmp5 = tmp0 + tmp4
tmp7 = tmp5 - tmp6
tmp9 = 1e-05
tmp10 = tmp8 + tmp9
tmp11 = libdevice.rsqrt(tmp10)
tmp12 = tmp7 * tmp11
tmp14 = tmp12 * tmp13
tmp16 = tmp14 + tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_elu_0[grid(256)](buf0, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, buf2, reinterpret_tensor(primals_6,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_7
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, buf2, reinterpret_tensor(primals_8,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_9
buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused_add_mul_native_layer_norm_sigmoid_1[grid(64)](
primals_3, buf3, buf4, buf5, buf6, 64, XBLOCK=64, num_warps=1,
num_stages=1)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_native_layer_norm_sigmoid_2[grid(256)](
primals_3, buf3, buf4, buf5, buf6, primals_10, primals_11, buf7,
256, XBLOCK=128, num_warps=4, num_stages=1)
del buf5
del buf6
del primals_11
return buf7, primals_3, primals_10, buf0, reinterpret_tensor(buf1, (64,
4), (4, 1), 0), buf2, buf3, buf4, primals_8, primals_6, primals_4
class GatedLinearUnit(nn.Module):
def __init__(self, input_size, output_size, dropout=0):
super().__init__()
self.dropout = nn.Dropout(dropout)
self.w4 = nn.Linear(input_size, output_size)
self.w5 = nn.Linear(input_size, output_size)
self.act = nn.Sigmoid()
def forward(self, x):
x = self.dropout(x)
x = self.act(self.w4(x)) * self.w5(x)
return x
class GatedResidualNetworkNew(nn.Module):
def __init__(self, input_size, hidden_size, output_size, context_size=
None, dropout=0):
super().__init__()
self.w1 = nn.Linear(hidden_size, hidden_size)
self.w2 = nn.Linear(input_size, hidden_size)
self.w3 = None if context_size is None else nn.Linear(context_size,
hidden_size, bias=False)
self.glu = GatedLinearUnit(hidden_size, output_size, dropout)
self.layer_norm = nn.LayerNorm(output_size)
self.residual = nn.Sequential(
) if input_size == output_size else nn.Linear(input_size,
output_size)
def forward(self, input_0):
primals_1 = self.w1.weight
primals_2 = self.w1.bias
primals_4 = self.w2.weight
primals_5 = self.w2.bias
primals_6 = self.glu.w4.weight
primals_7 = self.glu.w4.bias
primals_8 = self.glu.w5.weight
primals_9 = self.glu.w5.bias
primals_10 = self.layer_norm.weight
primals_11 = self.layer_norm.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
dqawami/openvino_training_extensions
|
GatedResidualNetwork
| false
| 15,217
|
[
"Apache-2.0"
] | 256
|
dddda1dfd651eaae2d59cecda84275b1b03bd0ad
|
https://github.com/dqawami/openvino_training_extensions/tree/dddda1dfd651eaae2d59cecda84275b1b03bd0ad
|
SpatialAttention
|
import torch
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
class SpatialAttention(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.activation = nn.Sigmoid()
self.maxpool = nn.MaxPool2d((1, in_channels))
self.avgpool = nn.AvgPool2d((1, in_channels))
self.conv = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=7,
padding=3)
def forward(self, x):
maxpool = self.maxpool(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
avgpool = self.avgpool(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
convolved = self.conv(maxpool + avgpool)
out = self.activation(convolved)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 + tmp0
tmp8 = tmp3 + tmp7
tmp9 = tmp5 + tmp8
tmp10 = 0.25
tmp11 = tmp9 * tmp10
tmp12 = tmp6 + tmp11
tl.store(out_ptr0 + x2, tmp12, xmask)
@triton.jit
def triton_poi_fused_convolution_sigmoid_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 1, 7, 7), (49, 49, 7, 1))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 1, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 4, 4), (16, 1, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_sigmoid_1[grid(64)](buf2, primals_3,
64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
return buf2, primals_2, buf0, buf2
class SpatialAttentionNew(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.activation = nn.Sigmoid()
self.maxpool = nn.MaxPool2d((1, in_channels))
self.avgpool = nn.AvgPool2d((1, in_channels))
self.conv = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=7,
padding=3)
def forward(self, input_0):
primals_2 = self.conv.weight
primals_3 = self.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
dqawami/openvino_training_extensions
|
SpatialAttention
| false
| 15,218
|
[
"Apache-2.0"
] | 256
|
dddda1dfd651eaae2d59cecda84275b1b03bd0ad
|
https://github.com/dqawami/openvino_training_extensions/tree/dddda1dfd651eaae2d59cecda84275b1b03bd0ad
|
Critic
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
import torch.nn.functional as F
class Critic(nn.Module):
def __init__(self, state_dim, action_dim):
super(Critic, self).__init__()
self.l1 = nn.Linear(state_dim + action_dim, 400)
self.l2 = nn.Linear(400, 300)
self.l3 = nn.Linear(300, 1)
self.l4 = nn.Linear(state_dim + action_dim, 400)
self.l5 = nn.Linear(400, 300)
self.l6 = nn.Linear(300, 1)
def forward(self, x, u):
xu = torch.cat([x, u], 1)
x1 = F.relu(self.l1(xu))
x1 = F.relu(self.l2(x1))
x1 = self.l3(x1)
x2 = F.relu(self.l4(xu))
x2 = F.relu(self.l5(x2))
x2 = self.l6(x2)
return x1, x2
def Q1(self, x, u):
xu = torch.cat([x, u], 1)
x1 = F.relu(self.l1(xu))
x1 = F.relu(self.l2(x1))
x1 = self.l3(x1)
return x1
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4, 'action_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 400
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 300
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (400, 8), (8, 1))
assert_size_stride(primals_4, (400,), (1,))
assert_size_stride(primals_5, (300, 400), (400, 1))
assert_size_stride(primals_6, (300,), (1,))
assert_size_stride(primals_7, (1, 300), (300, 1))
assert_size_stride(primals_8, (1,), (1,))
assert_size_stride(primals_9, (400, 8), (8, 1))
assert_size_stride(primals_10, (400,), (1,))
assert_size_stride(primals_11, (300, 400), (400, 1))
assert_size_stride(primals_12, (300,), (1,))
assert_size_stride(primals_13, (1, 300), (300, 1))
assert_size_stride(primals_14, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 400), (400, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 400), (1,
8), 0), out=buf1)
del primals_3
buf2 = buf1
del buf1
triton_poi_fused_relu_1[grid(1600)](buf2, primals_4, 1600, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 300), (300, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (400, 300), (
1, 400), 0), out=buf3)
buf4 = buf3
del buf3
triton_poi_fused_relu_2[grid(1200)](buf4, primals_6, 1200, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_6
buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7,
(300, 1), (1, 300), 0), alpha=1, beta=1, out=buf6)
del primals_8
buf7 = empty_strided_cuda((4, 400), (400, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_9, (8, 400), (1,
8), 0), out=buf7)
del primals_9
buf8 = buf7
del buf7
triton_poi_fused_relu_1[grid(1600)](buf8, primals_10, 1600, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_10
buf9 = empty_strided_cuda((4, 300), (300, 1), torch.float32)
extern_kernels.mm(buf8, reinterpret_tensor(primals_11, (400, 300),
(1, 400), 0), out=buf9)
buf10 = buf9
del buf9
triton_poi_fused_relu_2[grid(1200)](buf10, primals_12, 1200, XBLOCK
=256, num_warps=4, num_stages=1)
del primals_12
buf12 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_14, buf10, reinterpret_tensor(
primals_13, (300, 1), (1, 300), 0), alpha=1, beta=1, out=buf12)
del primals_14
return (buf6, buf12, buf0, buf2, buf4, buf8, buf10, primals_13,
primals_11, primals_7, primals_5)
class CriticNew(nn.Module):
def __init__(self, state_dim, action_dim):
super(CriticNew, self).__init__()
self.l1 = nn.Linear(state_dim + action_dim, 400)
self.l2 = nn.Linear(400, 300)
self.l3 = nn.Linear(300, 1)
self.l4 = nn.Linear(state_dim + action_dim, 400)
self.l5 = nn.Linear(400, 300)
self.l6 = nn.Linear(300, 1)
def Q1(self, x, u):
xu = torch.cat([x, u], 1)
x1 = F.relu(self.l1(xu))
x1 = F.relu(self.l2(x1))
x1 = self.l3(x1)
return x1
def forward(self, input_0, input_1):
primals_3 = self.l1.weight
primals_4 = self.l1.bias
primals_5 = self.l2.weight
primals_6 = self.l2.bias
primals_7 = self.l3.weight
primals_8 = self.l3.bias
primals_9 = self.l4.weight
primals_10 = self.l4.bias
primals_11 = self.l5.weight
primals_12 = self.l5.bias
primals_13 = self.l6.weight
primals_14 = self.l6.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14])
return output[0], output[1]
|
doudoulaile/RL-GAN-Net
|
Critic
| false
| 15,219
|
[
"MIT"
] | 112
|
9c221223d1878bc24f0f39ad34928c1bb2974ae3
|
https://github.com/doudoulaile/RL-GAN-Net/tree/9c221223d1878bc24f0f39ad34928c1bb2974ae3
|
SmallBlock
|
import torch
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
class SmallBlock(nn.Module):
def __init__(self, channels):
super(SmallBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels=channels, out_channels=channels,
kernel_size=3, stride=1, padding=1, bias=False)
self.relu = nn.ReLU(inplace=False)
self.conv2 = nn.Conv2d(in_channels=channels, out_channels=channels,
kernel_size=3, stride=1, padding=1, bias=False)
def forward(self, x):
identity_data = x
output = self.relu(x)
output = self.conv1(output)
output = self.relu(output)
output = self.conv2(output)
output = torch.add(output, identity_data)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_relu_0[grid(256)](primals_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_relu_1[grid(256)](buf2, 256, XBLOCK=256, num_warps
=4, num_stages=1)
buf3 = extern_kernels.convolution(buf2, primals_3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = buf3
del buf3
triton_poi_fused_add_2[grid(256)](buf4, primals_1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_1
return buf4, primals_2, primals_3, buf0, buf2
class SmallBlockNew(nn.Module):
def __init__(self, channels):
super(SmallBlockNew, self).__init__()
self.conv1 = nn.Conv2d(in_channels=channels, out_channels=channels,
kernel_size=3, stride=1, padding=1, bias=False)
self.relu = nn.ReLU(inplace=False)
self.conv2 = nn.Conv2d(in_channels=channels, out_channels=channels,
kernel_size=3, stride=1, padding=1, bias=False)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv2.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
dqawami/openvino_training_extensions
|
SmallBlock
| false
| 15,220
|
[
"Apache-2.0"
] | 256
|
dddda1dfd651eaae2d59cecda84275b1b03bd0ad
|
https://github.com/dqawami/openvino_training_extensions/tree/dddda1dfd651eaae2d59cecda84275b1b03bd0ad
|
ResBlock
|
import torch
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
class ResBlock(nn.Module):
def __init__(self, num_of_channels):
super(ResBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels=num_of_channels, out_channels=
num_of_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.in1 = nn.InstanceNorm2d(num_of_channels, affine=True)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(in_channels=num_of_channels, out_channels=
num_of_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.in2 = nn.InstanceNorm2d(num_of_channels, affine=True)
def forward(self, x):
orig = x
output = self.relu(self.in1(self.conv1(x)))
output = self.in2(self.conv2(output))
output = torch.add(output, orig)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_of_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__native_batch_norm_legit_relu_repeat_0(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3, out_ptr4, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
x0 = xindex
r1 = rindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + x0 % 4, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0)
tmp26 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tl.where(xmask, tmp2, 0)
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp5, 0)
tmp8 = tl.sum(tmp7, 1)[:, None]
tmp9 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp8 / tmp10
tmp12 = tmp2 - tmp11
tmp13 = tmp12 * tmp12
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = tl.where(xmask, tmp14, 0)
tmp17 = tl.sum(tmp16, 1)[:, None]
tmp18 = tmp1 - tmp11
tmp19 = 16.0
tmp20 = tmp17 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tmp24 = tmp18 * tmp23
tmp25 = tmp24 * tmp0
tmp27 = tmp25 + tmp26
tmp28 = tl.full([1, 1], 0, tl.int32)
tmp29 = triton_helpers.maximum(tmp28, tmp27)
tl.store(out_ptr0 + x0, tmp0, xmask)
tl.store(out_ptr3 + (r1 + 16 * x0), tmp29, xmask)
tl.store(out_ptr4 + x0, tmp23, xmask)
tl.store(out_ptr1 + x0, tmp11, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_repeat_1(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr3, out_ptr4, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
x0 = xindex
r1 = rindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + x0 % 4, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0)
tmp26 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr3 + (r1 + 16 * x0), xmask, other=0.0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tl.where(xmask, tmp2, 0)
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp5, 0)
tmp8 = tl.sum(tmp7, 1)[:, None]
tmp9 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp8 / tmp10
tmp12 = tmp2 - tmp11
tmp13 = tmp12 * tmp12
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = tl.where(xmask, tmp14, 0)
tmp17 = tl.sum(tmp16, 1)[:, None]
tmp18 = tmp1 - tmp11
tmp19 = 16.0
tmp20 = tmp17 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tmp24 = tmp18 * tmp23
tmp25 = tmp24 * tmp0
tmp27 = tmp25 + tmp26
tmp29 = tmp27 + tmp28
tl.store(out_ptr0 + x0, tmp0, xmask)
tl.store(out_ptr3 + (r1 + 16 * x0), tmp29, xmask)
tl.store(out_ptr4 + x0, tmp23, xmask)
tl.store(out_ptr1 + x0, tmp11, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((16,), (1,), torch.float32)
buf2 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_relu_repeat_0[grid(16)](
primals_3, buf0, primals_4, buf1, buf2, buf6, buf5, 16, 16,
XBLOCK=8, num_warps=2, num_stages=1)
del primals_3
del primals_4
buf7 = extern_kernels.convolution(buf6, primals_5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 4, 4, 4), (64, 16, 4, 1))
buf8 = empty_strided_cuda((16,), (1,), torch.float32)
buf9 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf12 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.
float32)
triton_per_fused__native_batch_norm_legit_add_repeat_1[grid(16)](
primals_6, buf7, primals_7, primals_1, buf8, buf9, buf13, buf12,
16, 16, XBLOCK=1, num_warps=2, num_stages=1)
del primals_6
del primals_7
return (buf13, primals_1, primals_2, primals_5, buf0, buf1,
reinterpret_tensor(buf5, (16,), (1,), 0), buf6, buf7, buf8,
reinterpret_tensor(buf12, (16,), (1,), 0), reinterpret_tensor(buf9,
(1, 16, 1, 1), (16, 1, 1, 1), 0), reinterpret_tensor(buf2, (1, 16,
1, 1), (16, 1, 1, 1), 0))
class ResBlockNew(nn.Module):
def __init__(self, num_of_channels):
super(ResBlockNew, self).__init__()
self.conv1 = nn.Conv2d(in_channels=num_of_channels, out_channels=
num_of_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.in1 = nn.InstanceNorm2d(num_of_channels, affine=True)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(in_channels=num_of_channels, out_channels=
num_of_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.in2 = nn.InstanceNorm2d(num_of_channels, affine=True)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.in1.weight
primals_4 = self.in1.bias
primals_5 = self.conv2.weight
primals_6 = self.in2.weight
primals_7 = self.in2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
dqawami/openvino_training_extensions
|
ResBlock
| false
| 15,221
|
[
"Apache-2.0"
] | 256
|
dddda1dfd651eaae2d59cecda84275b1b03bd0ad
|
https://github.com/dqawami/openvino_training_extensions/tree/dddda1dfd651eaae2d59cecda84275b1b03bd0ad
|
EntmaxBisect
|
from torch.autograd import Function
import torch
import torch.nn as nn
def entmax_bisect(X, alpha=1.5, dim=-1, n_iter=50, ensure_sum_one=True):
"""alpha-entmax: normalizing sparse transform (a la softmax).
Solves the optimization problem:
max_p <x, p> - H_a(p) s.t. p >= 0, sum(p) == 1.
where H_a(p) is the Tsallis alpha-entropy with custom alpha >= 1,
using a bisection (root finding, binary search) algorithm.
This function is differentiable with respect to both X and alpha.
Parameters
----------
X : torch.Tensor
The input tensor.
alpha : float or torch.Tensor
Tensor of alpha parameters (> 1) to use. If scalar
or python float, the same value is used for all rows, otherwise,
it must have shape (or be expandable to)
alpha.shape[j] == (X.shape[j] if j != dim else 1)
A value of alpha=2 corresponds to sparsemax, and alpha=1 would in theory recover
softmax. For numeric reasons, this algorithm does not work with `alpha=1`: if you
want softmax, we recommend `torch.nn.softmax`.
dim : int
The dimension along which to apply alpha-entmax.
n_iter : int
Number of bisection iterations. For float32, 24 iterations should
suffice for machine precision.
ensure_sum_one : bool,
Whether to divide the result by its sum. If false, the result might
sum to close but not exactly 1, which might cause downstream problems.
Returns
-------
P : torch tensor, same shape as X
The projection result, such that P.sum(dim=dim) == 1 elementwise.
"""
return EntmaxBisectFunction.apply(X, alpha, dim, n_iter, ensure_sum_one)
class EntmaxBisectFunction(Function):
@classmethod
def _gp(cls, x, alpha):
return x ** (alpha - 1)
@classmethod
def _gp_inv(cls, y, alpha):
return y ** (1 / (alpha - 1))
@classmethod
def _p(cls, X, alpha):
return cls._gp_inv(torch.clamp(X, min=0), alpha)
@classmethod
def forward(cls, ctx, X, alpha=1.5, dim=-1, n_iter=50, ensure_sum_one=True
):
if not isinstance(alpha, torch.Tensor):
alpha = torch.tensor(alpha, dtype=X.dtype, device=X.device)
alpha_shape = list(X.shape)
alpha_shape[dim] = 1
alpha = alpha.expand(*alpha_shape)
ctx.alpha = alpha
ctx.dim = dim
d = X.shape[dim]
max_val, _ = X.max(dim=dim, keepdim=True)
X = X * (alpha - 1)
max_val = max_val * (alpha - 1)
tau_lo = max_val - cls._gp(1, alpha)
tau_hi = max_val - cls._gp(1 / d, alpha)
f_lo = cls._p(X - tau_lo, alpha).sum(dim) - 1
dm = tau_hi - tau_lo
for it in range(n_iter):
dm /= 2
tau_m = tau_lo + dm
p_m = cls._p(X - tau_m, alpha)
f_m = p_m.sum(dim) - 1
mask = (f_m * f_lo >= 0).unsqueeze(dim)
tau_lo = torch.where(mask, tau_m, tau_lo)
if ensure_sum_one:
p_m /= p_m.sum(dim=dim).unsqueeze(dim=dim)
ctx.save_for_backward(p_m)
return p_m
@classmethod
def backward(cls, ctx, dY):
Y, = ctx.saved_tensors
gppr = torch.where(Y > 0, Y ** (2 - ctx.alpha), Y.new_zeros(1))
dX = dY * gppr
q = dX.sum(ctx.dim) / gppr.sum(ctx.dim)
q = q.unsqueeze(ctx.dim)
dX -= q * gppr
d_alpha = None
if ctx.needs_input_grad[1]:
S = torch.where(Y > 0, Y * torch.log(Y), Y.new_zeros(1))
ent = S.sum(ctx.dim).unsqueeze(ctx.dim)
Y_skewed = gppr / gppr.sum(ctx.dim).unsqueeze(ctx.dim)
d_alpha = dY * (Y - Y_skewed) / (ctx.alpha - 1) ** 2
d_alpha -= dY * (S - Y_skewed * ent) / (ctx.alpha - 1)
d_alpha = d_alpha.sum(ctx.dim).unsqueeze(ctx.dim)
return dX, d_alpha, None, None, None
class EntmaxBisect(nn.Module):
def __init__(self, alpha=1.5, dim=-1, n_iter=50):
"""alpha-entmax: normalizing sparse map (a la softmax) via bisection.
Solves the optimization problem:
max_p <x, p> - H_a(p) s.t. p >= 0, sum(p) == 1.
where H_a(p) is the Tsallis alpha-entropy with custom alpha >= 1,
using a bisection (root finding, binary search) algorithm.
Parameters
----------
alpha : float or torch.Tensor
Tensor of alpha parameters (> 1) to use. If scalar
or python float, the same value is used for all rows, otherwise,
it must have shape (or be expandable to)
alpha.shape[j] == (X.shape[j] if j != dim else 1)
A value of alpha=2 corresponds to sparsemax; and alpha=1 would in theory recover
softmax. For numeric reasons, this algorithm does not work with `alpha=1`; if you
want softmax, we recommend `torch.nn.softmax`.
dim : int
The dimension along which to apply alpha-entmax.
n_iter : int
Number of bisection iterations. For float32, 24 iterations should
suffice for machine precision.
"""
self.dim = dim
self.n_iter = n_iter
self.alpha = alpha
super().__init__()
def forward(self, X):
return entmax_bisect(X, alpha=self.alpha, dim=self.dim, n_iter=self
.n_iter)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch.autograd import Function
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_clamp_div_max_mul_pow_sub_sum_where_0(in_out_ptr13,
in_ptr0, out_ptr0, out_ptr25, out_ptr31, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = triton_helpers.maximum(tmp0, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8 * tmp1
tmp10 = 1.0
tmp11 = tmp9 - tmp10
tmp12 = tmp9 - tmp1
tmp13 = tmp12 - tmp11
tmp14 = tmp13 * tmp1
tmp15 = tmp11 + tmp14
tmp16 = tmp2 - tmp15
tmp17 = 0.0
tmp18 = triton_helpers.maximum(tmp16, tmp17)
tmp19 = 2.0
tmp20 = libdevice.pow(tmp18, tmp19)
tmp21 = tmp3 * tmp1
tmp22 = tmp21 - tmp15
tmp23 = triton_helpers.maximum(tmp22, tmp17)
tmp24 = libdevice.pow(tmp23, tmp19)
tmp25 = tmp20 + tmp24
tmp26 = tmp5 * tmp1
tmp27 = tmp26 - tmp15
tmp28 = triton_helpers.maximum(tmp27, tmp17)
tmp29 = libdevice.pow(tmp28, tmp19)
tmp30 = tmp25 + tmp29
tmp31 = tmp7 * tmp1
tmp32 = tmp31 - tmp15
tmp33 = triton_helpers.maximum(tmp32, tmp17)
tmp34 = libdevice.pow(tmp33, tmp19)
tmp35 = tmp30 + tmp34
tmp36 = tmp2 - tmp11
tmp37 = triton_helpers.maximum(tmp36, tmp17)
tmp38 = libdevice.pow(tmp37, tmp19)
tmp39 = tmp21 - tmp11
tmp40 = triton_helpers.maximum(tmp39, tmp17)
tmp41 = libdevice.pow(tmp40, tmp19)
tmp42 = tmp38 + tmp41
tmp43 = tmp26 - tmp11
tmp44 = triton_helpers.maximum(tmp43, tmp17)
tmp45 = libdevice.pow(tmp44, tmp19)
tmp46 = tmp42 + tmp45
tmp47 = tmp31 - tmp11
tmp48 = triton_helpers.maximum(tmp47, tmp17)
tmp49 = libdevice.pow(tmp48, tmp19)
tmp50 = tmp46 + tmp49
tmp51 = tmp35 - tmp10
tmp52 = tmp50 - tmp10
tmp53 = tmp51 * tmp52
tmp54 = tmp53 >= tmp17
tmp55 = tl.where(tmp54, tmp15, tmp11)
tmp56 = tmp14 * tmp1
tmp57 = tmp55 + tmp56
tmp58 = tmp2 - tmp57
tmp59 = triton_helpers.maximum(tmp58, tmp17)
tmp60 = libdevice.pow(tmp59, tmp19)
tmp61 = tmp21 - tmp57
tmp62 = triton_helpers.maximum(tmp61, tmp17)
tmp63 = libdevice.pow(tmp62, tmp19)
tmp64 = tmp60 + tmp63
tmp65 = tmp26 - tmp57
tmp66 = triton_helpers.maximum(tmp65, tmp17)
tmp67 = libdevice.pow(tmp66, tmp19)
tmp68 = tmp64 + tmp67
tmp69 = tmp31 - tmp57
tmp70 = triton_helpers.maximum(tmp69, tmp17)
tmp71 = libdevice.pow(tmp70, tmp19)
tmp72 = tmp68 + tmp71
tmp73 = tmp72 - tmp10
tmp74 = tmp73 * tmp52
tmp75 = tmp74 >= tmp17
tmp76 = tl.where(tmp75, tmp57, tmp55)
tmp77 = tmp56 * tmp1
tmp78 = tmp76 + tmp77
tmp79 = tmp2 - tmp78
tmp80 = triton_helpers.maximum(tmp79, tmp17)
tmp81 = libdevice.pow(tmp80, tmp19)
tmp82 = tmp21 - tmp78
tmp83 = triton_helpers.maximum(tmp82, tmp17)
tmp84 = libdevice.pow(tmp83, tmp19)
tmp85 = tmp81 + tmp84
tmp86 = tmp26 - tmp78
tmp87 = triton_helpers.maximum(tmp86, tmp17)
tmp88 = libdevice.pow(tmp87, tmp19)
tmp89 = tmp85 + tmp88
tmp90 = tmp31 - tmp78
tmp91 = triton_helpers.maximum(tmp90, tmp17)
tmp92 = libdevice.pow(tmp91, tmp19)
tmp93 = tmp89 + tmp92
tmp94 = tmp93 - tmp10
tmp95 = tmp94 * tmp52
tmp96 = tmp95 >= tmp17
tmp97 = tl.where(tmp96, tmp78, tmp76)
tmp98 = tmp77 * tmp1
tmp99 = tmp97 + tmp98
tmp100 = tmp2 - tmp99
tmp101 = triton_helpers.maximum(tmp100, tmp17)
tmp102 = libdevice.pow(tmp101, tmp19)
tmp103 = tmp21 - tmp99
tmp104 = triton_helpers.maximum(tmp103, tmp17)
tmp105 = libdevice.pow(tmp104, tmp19)
tmp106 = tmp102 + tmp105
tmp107 = tmp26 - tmp99
tmp108 = triton_helpers.maximum(tmp107, tmp17)
tmp109 = libdevice.pow(tmp108, tmp19)
tmp110 = tmp106 + tmp109
tmp111 = tmp31 - tmp99
tmp112 = triton_helpers.maximum(tmp111, tmp17)
tmp113 = libdevice.pow(tmp112, tmp19)
tmp114 = tmp110 + tmp113
tmp115 = tmp114 - tmp10
tmp116 = tmp115 * tmp52
tmp117 = tmp116 >= tmp17
tmp118 = tl.where(tmp117, tmp99, tmp97)
tmp119 = tmp98 * tmp1
tmp120 = tmp118 + tmp119
tmp121 = tmp2 - tmp120
tmp122 = triton_helpers.maximum(tmp121, tmp17)
tmp123 = libdevice.pow(tmp122, tmp19)
tmp124 = tmp21 - tmp120
tmp125 = triton_helpers.maximum(tmp124, tmp17)
tmp126 = libdevice.pow(tmp125, tmp19)
tmp127 = tmp123 + tmp126
tmp128 = tmp26 - tmp120
tmp129 = triton_helpers.maximum(tmp128, tmp17)
tmp130 = libdevice.pow(tmp129, tmp19)
tmp131 = tmp127 + tmp130
tmp132 = tmp31 - tmp120
tmp133 = triton_helpers.maximum(tmp132, tmp17)
tmp134 = libdevice.pow(tmp133, tmp19)
tmp135 = tmp131 + tmp134
tmp136 = tmp135 - tmp10
tmp137 = tmp136 * tmp52
tmp138 = tmp137 >= tmp17
tmp139 = tl.where(tmp138, tmp120, tmp118)
tmp140 = tmp119 * tmp1
tmp141 = tmp139 + tmp140
tmp142 = tmp2 - tmp141
tmp143 = triton_helpers.maximum(tmp142, tmp17)
tmp144 = libdevice.pow(tmp143, tmp19)
tmp145 = tmp21 - tmp141
tmp146 = triton_helpers.maximum(tmp145, tmp17)
tmp147 = libdevice.pow(tmp146, tmp19)
tmp148 = tmp144 + tmp147
tmp149 = tmp26 - tmp141
tmp150 = triton_helpers.maximum(tmp149, tmp17)
tmp151 = libdevice.pow(tmp150, tmp19)
tmp152 = tmp148 + tmp151
tmp153 = tmp31 - tmp141
tmp154 = triton_helpers.maximum(tmp153, tmp17)
tmp155 = libdevice.pow(tmp154, tmp19)
tmp156 = tmp152 + tmp155
tmp157 = tmp156 - tmp10
tmp158 = tmp157 * tmp52
tmp159 = tmp158 >= tmp17
tmp160 = tl.where(tmp159, tmp141, tmp139)
tmp161 = tmp140 * tmp1
tmp162 = tmp160 + tmp161
tmp163 = tmp2 - tmp162
tmp164 = triton_helpers.maximum(tmp163, tmp17)
tmp165 = libdevice.pow(tmp164, tmp19)
tmp166 = tmp21 - tmp162
tmp167 = triton_helpers.maximum(tmp166, tmp17)
tmp168 = libdevice.pow(tmp167, tmp19)
tmp169 = tmp165 + tmp168
tmp170 = tmp26 - tmp162
tmp171 = triton_helpers.maximum(tmp170, tmp17)
tmp172 = libdevice.pow(tmp171, tmp19)
tmp173 = tmp169 + tmp172
tmp174 = tmp31 - tmp162
tmp175 = triton_helpers.maximum(tmp174, tmp17)
tmp176 = libdevice.pow(tmp175, tmp19)
tmp177 = tmp173 + tmp176
tmp178 = tmp177 - tmp10
tmp179 = tmp178 * tmp52
tmp180 = tmp179 >= tmp17
tmp181 = tl.where(tmp180, tmp162, tmp160)
tmp182 = tmp161 * tmp1
tmp183 = tmp181 + tmp182
tmp184 = tmp2 - tmp183
tmp185 = triton_helpers.maximum(tmp184, tmp17)
tmp186 = libdevice.pow(tmp185, tmp19)
tmp187 = tmp21 - tmp183
tmp188 = triton_helpers.maximum(tmp187, tmp17)
tmp189 = libdevice.pow(tmp188, tmp19)
tmp190 = tmp186 + tmp189
tmp191 = tmp26 - tmp183
tmp192 = triton_helpers.maximum(tmp191, tmp17)
tmp193 = libdevice.pow(tmp192, tmp19)
tmp194 = tmp190 + tmp193
tmp195 = tmp31 - tmp183
tmp196 = triton_helpers.maximum(tmp195, tmp17)
tmp197 = libdevice.pow(tmp196, tmp19)
tmp198 = tmp194 + tmp197
tmp199 = tmp198 - tmp10
tmp200 = tmp199 * tmp52
tmp201 = tmp200 >= tmp17
tmp202 = tl.where(tmp201, tmp183, tmp181)
tmp203 = tmp182 * tmp1
tmp204 = tmp202 + tmp203
tmp205 = tmp2 - tmp204
tmp206 = triton_helpers.maximum(tmp205, tmp17)
tmp207 = libdevice.pow(tmp206, tmp19)
tmp208 = tmp21 - tmp204
tmp209 = triton_helpers.maximum(tmp208, tmp17)
tmp210 = libdevice.pow(tmp209, tmp19)
tmp211 = tmp207 + tmp210
tmp212 = tmp26 - tmp204
tmp213 = triton_helpers.maximum(tmp212, tmp17)
tmp214 = libdevice.pow(tmp213, tmp19)
tmp215 = tmp211 + tmp214
tmp216 = tmp31 - tmp204
tmp217 = triton_helpers.maximum(tmp216, tmp17)
tmp218 = libdevice.pow(tmp217, tmp19)
tmp219 = tmp215 + tmp218
tmp220 = tmp219 - tmp10
tmp221 = tmp220 * tmp52
tmp222 = tmp221 >= tmp17
tmp223 = tl.where(tmp222, tmp204, tmp202)
tmp224 = tmp203 * tmp1
tmp225 = tmp223 + tmp224
tmp226 = tmp2 - tmp225
tmp227 = triton_helpers.maximum(tmp226, tmp17)
tmp228 = libdevice.pow(tmp227, tmp19)
tmp229 = tmp21 - tmp225
tmp230 = triton_helpers.maximum(tmp229, tmp17)
tmp231 = libdevice.pow(tmp230, tmp19)
tmp232 = tmp228 + tmp231
tmp233 = tmp26 - tmp225
tmp234 = triton_helpers.maximum(tmp233, tmp17)
tmp235 = libdevice.pow(tmp234, tmp19)
tmp236 = tmp232 + tmp235
tmp237 = tmp31 - tmp225
tmp238 = triton_helpers.maximum(tmp237, tmp17)
tmp239 = libdevice.pow(tmp238, tmp19)
tmp240 = tmp236 + tmp239
tmp241 = tmp240 - tmp10
tmp242 = tmp241 * tmp52
tmp243 = tmp242 >= tmp17
tmp244 = tl.where(tmp243, tmp225, tmp223)
tmp245 = tmp224 * tmp1
tmp246 = tmp244 + tmp245
tmp247 = tmp2 - tmp246
tmp248 = triton_helpers.maximum(tmp247, tmp17)
tmp249 = libdevice.pow(tmp248, tmp19)
tmp250 = tmp21 - tmp246
tmp251 = triton_helpers.maximum(tmp250, tmp17)
tmp252 = libdevice.pow(tmp251, tmp19)
tmp253 = tmp249 + tmp252
tmp254 = tmp26 - tmp246
tmp255 = triton_helpers.maximum(tmp254, tmp17)
tmp256 = libdevice.pow(tmp255, tmp19)
tmp257 = tmp253 + tmp256
tmp258 = tmp31 - tmp246
tmp259 = triton_helpers.maximum(tmp258, tmp17)
tmp260 = libdevice.pow(tmp259, tmp19)
tmp261 = tmp257 + tmp260
tmp262 = tmp261 - tmp10
tmp263 = tmp262 * tmp52
tmp264 = tmp263 >= tmp17
tmp265 = tl.where(tmp264, tmp246, tmp244)
tmp266 = tmp245 * tmp1
tmp267 = tmp265 + tmp266
tmp268 = tmp2 - tmp267
tmp269 = triton_helpers.maximum(tmp268, tmp17)
tmp270 = libdevice.pow(tmp269, tmp19)
tmp271 = tmp21 - tmp267
tmp272 = triton_helpers.maximum(tmp271, tmp17)
tmp273 = libdevice.pow(tmp272, tmp19)
tmp274 = tmp270 + tmp273
tmp275 = tmp26 - tmp267
tmp276 = triton_helpers.maximum(tmp275, tmp17)
tmp277 = libdevice.pow(tmp276, tmp19)
tmp278 = tmp274 + tmp277
tmp279 = tmp31 - tmp267
tmp280 = triton_helpers.maximum(tmp279, tmp17)
tmp281 = libdevice.pow(tmp280, tmp19)
tmp282 = tmp278 + tmp281
tmp283 = tmp282 - tmp10
tmp284 = tmp283 * tmp52
tmp285 = tmp284 >= tmp17
tmp286 = tl.where(tmp285, tmp267, tmp265)
tmp287 = tmp266 * tmp1
tmp288 = tmp286 + tmp287
tmp289 = tmp2 - tmp288
tmp290 = triton_helpers.maximum(tmp289, tmp17)
tmp291 = libdevice.pow(tmp290, tmp19)
tmp292 = tmp21 - tmp288
tmp293 = triton_helpers.maximum(tmp292, tmp17)
tmp294 = libdevice.pow(tmp293, tmp19)
tmp295 = tmp291 + tmp294
tmp296 = tmp26 - tmp288
tmp297 = triton_helpers.maximum(tmp296, tmp17)
tmp298 = libdevice.pow(tmp297, tmp19)
tmp299 = tmp295 + tmp298
tmp300 = tmp31 - tmp288
tmp301 = triton_helpers.maximum(tmp300, tmp17)
tmp302 = libdevice.pow(tmp301, tmp19)
tmp303 = tmp299 + tmp302
tmp304 = tmp303 - tmp10
tmp305 = tmp304 * tmp52
tmp306 = tmp305 >= tmp17
tmp307 = tl.where(tmp306, tmp288, tmp286)
tmp308 = tmp287 * tmp1
tmp309 = tmp307 + tmp308
tmp310 = tmp2 - tmp309
tmp311 = triton_helpers.maximum(tmp310, tmp17)
tmp312 = libdevice.pow(tmp311, tmp19)
tmp313 = tmp21 - tmp309
tmp314 = triton_helpers.maximum(tmp313, tmp17)
tmp315 = libdevice.pow(tmp314, tmp19)
tmp316 = tmp312 + tmp315
tmp317 = tmp26 - tmp309
tmp318 = triton_helpers.maximum(tmp317, tmp17)
tmp319 = libdevice.pow(tmp318, tmp19)
tmp320 = tmp316 + tmp319
tmp321 = tmp31 - tmp309
tmp322 = triton_helpers.maximum(tmp321, tmp17)
tmp323 = libdevice.pow(tmp322, tmp19)
tmp324 = tmp320 + tmp323
tmp325 = tmp324 - tmp10
tmp326 = tmp325 * tmp52
tmp327 = tmp326 >= tmp17
tmp328 = tl.where(tmp327, tmp309, tmp307)
tmp329 = tmp308 * tmp1
tmp330 = tmp328 + tmp329
tmp331 = tmp2 - tmp330
tmp332 = triton_helpers.maximum(tmp331, tmp17)
tmp333 = libdevice.pow(tmp332, tmp19)
tmp334 = tmp21 - tmp330
tmp335 = triton_helpers.maximum(tmp334, tmp17)
tmp336 = libdevice.pow(tmp335, tmp19)
tmp337 = tmp333 + tmp336
tmp338 = tmp26 - tmp330
tmp339 = triton_helpers.maximum(tmp338, tmp17)
tmp340 = libdevice.pow(tmp339, tmp19)
tmp341 = tmp337 + tmp340
tmp342 = tmp31 - tmp330
tmp343 = triton_helpers.maximum(tmp342, tmp17)
tmp344 = libdevice.pow(tmp343, tmp19)
tmp345 = tmp341 + tmp344
tmp346 = tmp345 - tmp10
tmp347 = tmp346 * tmp52
tmp348 = tmp347 >= tmp17
tmp349 = tl.where(tmp348, tmp330, tmp328)
tmp350 = tmp329 * tmp1
tmp351 = tmp349 + tmp350
tmp352 = tmp2 - tmp351
tmp353 = triton_helpers.maximum(tmp352, tmp17)
tmp354 = libdevice.pow(tmp353, tmp19)
tmp355 = tmp21 - tmp351
tmp356 = triton_helpers.maximum(tmp355, tmp17)
tmp357 = libdevice.pow(tmp356, tmp19)
tmp358 = tmp354 + tmp357
tmp359 = tmp26 - tmp351
tmp360 = triton_helpers.maximum(tmp359, tmp17)
tmp361 = libdevice.pow(tmp360, tmp19)
tmp362 = tmp358 + tmp361
tmp363 = tmp31 - tmp351
tmp364 = triton_helpers.maximum(tmp363, tmp17)
tmp365 = libdevice.pow(tmp364, tmp19)
tmp366 = tmp362 + tmp365
tmp367 = tmp366 - tmp10
tmp368 = tmp367 * tmp52
tmp369 = tmp368 >= tmp17
tmp370 = tl.where(tmp369, tmp351, tmp349)
tmp371 = tmp350 * tmp1
tmp372 = tmp370 + tmp371
tmp373 = tmp2 - tmp372
tmp374 = triton_helpers.maximum(tmp373, tmp17)
tmp375 = libdevice.pow(tmp374, tmp19)
tmp376 = tmp21 - tmp372
tmp377 = triton_helpers.maximum(tmp376, tmp17)
tmp378 = libdevice.pow(tmp377, tmp19)
tmp379 = tmp375 + tmp378
tmp380 = tmp26 - tmp372
tmp381 = triton_helpers.maximum(tmp380, tmp17)
tmp382 = libdevice.pow(tmp381, tmp19)
tmp383 = tmp379 + tmp382
tmp384 = tmp31 - tmp372
tmp385 = triton_helpers.maximum(tmp384, tmp17)
tmp386 = libdevice.pow(tmp385, tmp19)
tmp387 = tmp383 + tmp386
tmp388 = tmp387 - tmp10
tmp389 = tmp388 * tmp52
tmp390 = tmp389 >= tmp17
tmp391 = tl.where(tmp390, tmp372, tmp370)
tmp392 = tmp371 * tmp1
tmp393 = tmp391 + tmp392
tmp394 = tmp2 - tmp393
tmp395 = triton_helpers.maximum(tmp394, tmp17)
tmp396 = libdevice.pow(tmp395, tmp19)
tmp397 = tmp21 - tmp393
tmp398 = triton_helpers.maximum(tmp397, tmp17)
tmp399 = libdevice.pow(tmp398, tmp19)
tmp400 = tmp396 + tmp399
tmp401 = tmp26 - tmp393
tmp402 = triton_helpers.maximum(tmp401, tmp17)
tmp403 = libdevice.pow(tmp402, tmp19)
tmp404 = tmp400 + tmp403
tmp405 = tmp31 - tmp393
tmp406 = triton_helpers.maximum(tmp405, tmp17)
tmp407 = libdevice.pow(tmp406, tmp19)
tmp408 = tmp404 + tmp407
tmp409 = tmp408 - tmp10
tmp410 = tmp409 * tmp52
tmp411 = tmp410 >= tmp17
tmp412 = tl.where(tmp411, tmp393, tmp391)
tmp413 = tmp392 * tmp1
tmp414 = tmp412 + tmp413
tmp415 = tmp2 - tmp414
tmp416 = triton_helpers.maximum(tmp415, tmp17)
tmp417 = libdevice.pow(tmp416, tmp19)
tmp418 = tmp21 - tmp414
tmp419 = triton_helpers.maximum(tmp418, tmp17)
tmp420 = libdevice.pow(tmp419, tmp19)
tmp421 = tmp417 + tmp420
tmp422 = tmp26 - tmp414
tmp423 = triton_helpers.maximum(tmp422, tmp17)
tmp424 = libdevice.pow(tmp423, tmp19)
tmp425 = tmp421 + tmp424
tmp426 = tmp31 - tmp414
tmp427 = triton_helpers.maximum(tmp426, tmp17)
tmp428 = libdevice.pow(tmp427, tmp19)
tmp429 = tmp425 + tmp428
tmp430 = tmp429 - tmp10
tmp431 = tmp430 * tmp52
tmp432 = tmp431 >= tmp17
tmp433 = tl.where(tmp432, tmp414, tmp412)
tmp434 = tmp413 * tmp1
tmp435 = tmp433 + tmp434
tmp436 = tmp2 - tmp435
tmp437 = triton_helpers.maximum(tmp436, tmp17)
tmp438 = libdevice.pow(tmp437, tmp19)
tmp439 = tmp21 - tmp435
tmp440 = triton_helpers.maximum(tmp439, tmp17)
tmp441 = libdevice.pow(tmp440, tmp19)
tmp442 = tmp438 + tmp441
tmp443 = tmp26 - tmp435
tmp444 = triton_helpers.maximum(tmp443, tmp17)
tmp445 = libdevice.pow(tmp444, tmp19)
tmp446 = tmp442 + tmp445
tmp447 = tmp31 - tmp435
tmp448 = triton_helpers.maximum(tmp447, tmp17)
tmp449 = libdevice.pow(tmp448, tmp19)
tmp450 = tmp446 + tmp449
tmp451 = tmp450 - tmp10
tmp452 = tmp451 * tmp52
tmp453 = tmp452 >= tmp17
tmp454 = tl.where(tmp453, tmp435, tmp433)
tmp455 = tmp434 * tmp1
tmp456 = tmp454 + tmp455
tmp457 = tmp2 - tmp456
tmp458 = triton_helpers.maximum(tmp457, tmp17)
tmp459 = libdevice.pow(tmp458, tmp19)
tmp460 = tmp21 - tmp456
tmp461 = triton_helpers.maximum(tmp460, tmp17)
tmp462 = libdevice.pow(tmp461, tmp19)
tmp463 = tmp459 + tmp462
tmp464 = tmp26 - tmp456
tmp465 = triton_helpers.maximum(tmp464, tmp17)
tmp466 = libdevice.pow(tmp465, tmp19)
tmp467 = tmp463 + tmp466
tmp468 = tmp31 - tmp456
tmp469 = triton_helpers.maximum(tmp468, tmp17)
tmp470 = libdevice.pow(tmp469, tmp19)
tmp471 = tmp467 + tmp470
tmp472 = tmp471 - tmp10
tmp473 = tmp472 * tmp52
tmp474 = tmp473 >= tmp17
tmp475 = tl.where(tmp474, tmp456, tmp454)
tmp476 = tmp455 * tmp1
tmp477 = tmp475 + tmp476
tmp478 = tmp2 - tmp477
tmp479 = triton_helpers.maximum(tmp478, tmp17)
tmp480 = libdevice.pow(tmp479, tmp19)
tmp481 = tmp21 - tmp477
tmp482 = triton_helpers.maximum(tmp481, tmp17)
tmp483 = libdevice.pow(tmp482, tmp19)
tmp484 = tmp480 + tmp483
tmp485 = tmp26 - tmp477
tmp486 = triton_helpers.maximum(tmp485, tmp17)
tmp487 = libdevice.pow(tmp486, tmp19)
tmp488 = tmp484 + tmp487
tmp489 = tmp31 - tmp477
tmp490 = triton_helpers.maximum(tmp489, tmp17)
tmp491 = libdevice.pow(tmp490, tmp19)
tmp492 = tmp488 + tmp491
tmp493 = tmp492 - tmp10
tmp494 = tmp493 * tmp52
tmp495 = tmp494 >= tmp17
tmp496 = tl.where(tmp495, tmp477, tmp475)
tmp497 = tmp476 * tmp1
tmp498 = tmp496 + tmp497
tmp499 = tmp2 - tmp498
tmp500 = triton_helpers.maximum(tmp499, tmp17)
tmp501 = libdevice.pow(tmp500, tmp19)
tmp502 = tmp21 - tmp498
tmp503 = triton_helpers.maximum(tmp502, tmp17)
tmp504 = libdevice.pow(tmp503, tmp19)
tmp505 = tmp501 + tmp504
tmp506 = tmp26 - tmp498
tmp507 = triton_helpers.maximum(tmp506, tmp17)
tmp508 = libdevice.pow(tmp507, tmp19)
tmp509 = tmp505 + tmp508
tmp510 = tmp31 - tmp498
tmp511 = triton_helpers.maximum(tmp510, tmp17)
tmp512 = libdevice.pow(tmp511, tmp19)
tmp513 = tmp509 + tmp512
tmp514 = tmp513 - tmp10
tmp515 = tmp514 * tmp52
tmp516 = tmp515 >= tmp17
tmp517 = tl.where(tmp516, tmp498, tmp496)
tmp518 = tmp497 * tmp1
tmp519 = tmp517 + tmp518
tmp520 = tmp2 - tmp519
tmp521 = triton_helpers.maximum(tmp520, tmp17)
tmp522 = libdevice.pow(tmp521, tmp19)
tmp523 = tmp21 - tmp519
tmp524 = triton_helpers.maximum(tmp523, tmp17)
tmp525 = libdevice.pow(tmp524, tmp19)
tmp526 = tmp522 + tmp525
tmp527 = tmp26 - tmp519
tmp528 = triton_helpers.maximum(tmp527, tmp17)
tmp529 = libdevice.pow(tmp528, tmp19)
tmp530 = tmp526 + tmp529
tmp531 = tmp31 - tmp519
tmp532 = triton_helpers.maximum(tmp531, tmp17)
tmp533 = libdevice.pow(tmp532, tmp19)
tmp534 = tmp530 + tmp533
tmp535 = tmp534 - tmp10
tmp536 = tmp535 * tmp52
tmp537 = tmp536 >= tmp17
tmp538 = tl.where(tmp537, tmp519, tmp517)
tmp539 = tmp518 * tmp1
tmp540 = tmp538 + tmp539
tmp541 = tmp2 - tmp540
tmp542 = triton_helpers.maximum(tmp541, tmp17)
tmp543 = libdevice.pow(tmp542, tmp19)
tmp544 = tmp21 - tmp540
tmp545 = triton_helpers.maximum(tmp544, tmp17)
tmp546 = libdevice.pow(tmp545, tmp19)
tmp547 = tmp543 + tmp546
tmp548 = tmp26 - tmp540
tmp549 = triton_helpers.maximum(tmp548, tmp17)
tmp550 = libdevice.pow(tmp549, tmp19)
tmp551 = tmp547 + tmp550
tmp552 = tmp31 - tmp540
tmp553 = triton_helpers.maximum(tmp552, tmp17)
tmp554 = libdevice.pow(tmp553, tmp19)
tmp555 = tmp551 + tmp554
tmp556 = tmp555 - tmp10
tmp557 = tmp556 * tmp52
tmp558 = tmp557 >= tmp17
tmp559 = tl.where(tmp558, tmp540, tmp538)
tmp560 = tmp539 * tmp1
tmp561 = tmp559 + tmp560
tmp562 = tmp2 - tmp561
tmp563 = triton_helpers.maximum(tmp562, tmp17)
tmp564 = libdevice.pow(tmp563, tmp19)
tmp565 = tmp21 - tmp561
tmp566 = triton_helpers.maximum(tmp565, tmp17)
tmp567 = libdevice.pow(tmp566, tmp19)
tmp568 = tmp564 + tmp567
tmp569 = tmp26 - tmp561
tmp570 = triton_helpers.maximum(tmp569, tmp17)
tmp571 = libdevice.pow(tmp570, tmp19)
tmp572 = tmp568 + tmp571
tmp573 = tmp31 - tmp561
tmp574 = triton_helpers.maximum(tmp573, tmp17)
tmp575 = libdevice.pow(tmp574, tmp19)
tmp576 = tmp572 + tmp575
tmp577 = tmp576 - tmp10
tmp578 = tmp577 * tmp52
tmp579 = tmp578 >= tmp17
tmp580 = tl.where(tmp579, tmp561, tmp559)
tmp581 = tmp560 * tmp1
tmp582 = tmp580 + tmp581
tmp583 = tmp2 - tmp582
tmp584 = triton_helpers.maximum(tmp583, tmp17)
tmp585 = libdevice.pow(tmp584, tmp19)
tmp586 = tmp21 - tmp582
tmp587 = triton_helpers.maximum(tmp586, tmp17)
tmp588 = libdevice.pow(tmp587, tmp19)
tmp589 = tmp585 + tmp588
tmp590 = tmp26 - tmp582
tmp591 = triton_helpers.maximum(tmp590, tmp17)
tmp592 = libdevice.pow(tmp591, tmp19)
tmp593 = tmp589 + tmp592
tmp594 = tmp31 - tmp582
tmp595 = triton_helpers.maximum(tmp594, tmp17)
tmp596 = libdevice.pow(tmp595, tmp19)
tmp597 = tmp593 + tmp596
tmp598 = tmp597 - tmp10
tmp599 = tmp598 * tmp52
tmp600 = tmp599 >= tmp17
tmp601 = tl.where(tmp600, tmp582, tmp580)
tmp602 = tmp581 * tmp1
tmp603 = tmp601 + tmp602
tmp604 = tmp2 - tmp603
tmp605 = triton_helpers.maximum(tmp604, tmp17)
tmp606 = libdevice.pow(tmp605, tmp19)
tmp607 = tmp21 - tmp603
tmp608 = triton_helpers.maximum(tmp607, tmp17)
tmp609 = libdevice.pow(tmp608, tmp19)
tmp610 = tmp606 + tmp609
tmp611 = tmp26 - tmp603
tmp612 = triton_helpers.maximum(tmp611, tmp17)
tmp613 = libdevice.pow(tmp612, tmp19)
tmp614 = tmp610 + tmp613
tmp615 = tmp31 - tmp603
tmp616 = triton_helpers.maximum(tmp615, tmp17)
tmp617 = libdevice.pow(tmp616, tmp19)
tmp618 = tmp614 + tmp617
tl.store(out_ptr0 + x0, tmp50, xmask)
tl.store(out_ptr25 + x0, tmp392, xmask)
tl.store(in_out_ptr13 + x0, tmp601, xmask)
tl.store(out_ptr31 + x0, tmp618, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_mul_pow_sub_where_1(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = 1.0
tmp5 = tmp3 - tmp4
tmp7 = tmp6 - tmp4
tmp8 = tmp5 * tmp7
tmp9 = 0.0
tmp10 = tmp8 >= tmp9
tmp13 = tmp12 * tmp1
tmp14 = tmp13 * tmp1
tmp15 = tmp14 * tmp1
tmp16 = tmp15 * tmp1
tmp17 = tmp16 * tmp1
tmp18 = tmp17 * tmp1
tmp19 = tmp18 * tmp1
tmp20 = tmp19 * tmp1
tmp21 = tmp20 * tmp1
tmp22 = tmp21 * tmp1
tmp23 = tmp11 + tmp22
tmp24 = tl.where(tmp10, tmp23, tmp11)
tmp25 = tmp22 * tmp1
tmp26 = tmp24 + tmp25
tmp27 = tmp2 - tmp26
tmp28 = triton_helpers.maximum(tmp27, tmp9)
tmp29 = 2.0
tmp30 = libdevice.pow(tmp28, tmp29)
tl.store(out_ptr0 + x2, tmp30, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_2(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + x0, xmask)
tmp14 = tl.load(in_ptr2 + x0, xmask)
tmp18 = tl.load(in_out_ptr0 + x0, xmask)
tmp19 = tl.load(in_ptr3 + x0, xmask)
tmp36 = tl.load(in_ptr4 + 4 * x0, xmask, eviction_policy='evict_last')
tmp44 = tl.load(in_ptr4 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp50 = tl.load(in_ptr4 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp56 = tl.load(in_ptr4 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 1.0
tmp8 = tmp6 - tmp7
tmp10 = tmp9 - tmp7
tmp11 = tmp8 * tmp10
tmp12 = 0.0
tmp13 = tmp11 >= tmp12
tmp15 = tmp14 - tmp7
tmp16 = tmp15 * tmp10
tmp17 = tmp16 >= tmp12
tmp20 = 0.5
tmp21 = tmp19 * tmp20
tmp22 = tmp21 * tmp20
tmp23 = tmp22 * tmp20
tmp24 = tmp23 * tmp20
tmp25 = tmp24 * tmp20
tmp26 = tmp25 * tmp20
tmp27 = tmp26 * tmp20
tmp28 = tmp27 * tmp20
tmp29 = tmp28 * tmp20
tmp30 = tmp29 * tmp20
tmp31 = tmp18 + tmp30
tmp32 = tl.where(tmp17, tmp31, tmp18)
tmp33 = tmp30 * tmp20
tmp34 = tmp32 + tmp33
tmp35 = tl.where(tmp13, tmp34, tmp32)
tmp37 = tmp36 * tmp20
tmp38 = tmp33 * tmp20
tmp39 = tmp35 + tmp38
tmp40 = tmp37 - tmp39
tmp41 = triton_helpers.maximum(tmp40, tmp12)
tmp42 = 2.0
tmp43 = libdevice.pow(tmp41, tmp42)
tmp45 = tmp44 * tmp20
tmp46 = tmp45 - tmp39
tmp47 = triton_helpers.maximum(tmp46, tmp12)
tmp48 = libdevice.pow(tmp47, tmp42)
tmp49 = tmp43 + tmp48
tmp51 = tmp50 * tmp20
tmp52 = tmp51 - tmp39
tmp53 = triton_helpers.maximum(tmp52, tmp12)
tmp54 = libdevice.pow(tmp53, tmp42)
tmp55 = tmp49 + tmp54
tmp57 = tmp56 * tmp20
tmp58 = tmp57 - tmp39
tmp59 = triton_helpers.maximum(tmp58, tmp12)
tmp60 = libdevice.pow(tmp59, tmp42)
tmp61 = tmp55 + tmp60
tl.store(in_out_ptr0 + x0, tmp35, xmask)
tl.store(out_ptr0 + x0, tmp61, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_mul_sub_where_3(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = 1.0
tmp5 = tmp3 - tmp4
tmp7 = tmp6 - tmp4
tmp8 = tmp5 * tmp7
tmp9 = 0.0
tmp10 = tmp8 >= tmp9
tmp13 = tmp12 * tmp1
tmp14 = tmp13 * tmp1
tmp15 = tmp14 * tmp1
tmp16 = tmp15 * tmp1
tmp17 = tmp16 * tmp1
tmp18 = tmp17 * tmp1
tmp19 = tmp18 * tmp1
tmp20 = tmp19 * tmp1
tmp21 = tmp20 * tmp1
tmp22 = tmp21 * tmp1
tmp23 = tmp22 * tmp1
tmp24 = tmp23 * tmp1
tmp25 = tmp11 + tmp24
tmp26 = tl.where(tmp10, tmp25, tmp11)
tmp27 = tmp24 * tmp1
tmp28 = tmp26 + tmp27
tmp29 = tmp2 - tmp28
tmp30 = triton_helpers.maximum(tmp29, tmp9)
tl.store(out_ptr0 + x2, tmp30, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_4(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + x0, xmask)
tmp19 = tl.load(in_ptr2 + x0, xmask)
tmp23 = tl.load(in_out_ptr0 + x0, xmask)
tmp24 = tl.load(in_ptr3 + x0, xmask)
tmp43 = tl.load(in_ptr4 + 4 * x0, xmask, eviction_policy='evict_last')
tmp50 = tl.load(in_ptr4 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp56 = tl.load(in_ptr4 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp62 = tl.load(in_ptr4 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 2.0
tmp2 = libdevice.pow(tmp0, tmp1)
tmp4 = libdevice.pow(tmp3, tmp1)
tmp5 = tmp2 + tmp4
tmp7 = libdevice.pow(tmp6, tmp1)
tmp8 = tmp5 + tmp7
tmp10 = libdevice.pow(tmp9, tmp1)
tmp11 = tmp8 + tmp10
tmp12 = 1.0
tmp13 = tmp11 - tmp12
tmp15 = tmp14 - tmp12
tmp16 = tmp13 * tmp15
tmp17 = 0.0
tmp18 = tmp16 >= tmp17
tmp20 = tmp19 - tmp12
tmp21 = tmp20 * tmp15
tmp22 = tmp21 >= tmp17
tmp25 = 0.5
tmp26 = tmp24 * tmp25
tmp27 = tmp26 * tmp25
tmp28 = tmp27 * tmp25
tmp29 = tmp28 * tmp25
tmp30 = tmp29 * tmp25
tmp31 = tmp30 * tmp25
tmp32 = tmp31 * tmp25
tmp33 = tmp32 * tmp25
tmp34 = tmp33 * tmp25
tmp35 = tmp34 * tmp25
tmp36 = tmp35 * tmp25
tmp37 = tmp36 * tmp25
tmp38 = tmp23 + tmp37
tmp39 = tl.where(tmp22, tmp38, tmp23)
tmp40 = tmp37 * tmp25
tmp41 = tmp39 + tmp40
tmp42 = tl.where(tmp18, tmp41, tmp39)
tmp44 = tmp43 * tmp25
tmp45 = tmp40 * tmp25
tmp46 = tmp42 + tmp45
tmp47 = tmp44 - tmp46
tmp48 = triton_helpers.maximum(tmp47, tmp17)
tmp49 = libdevice.pow(tmp48, tmp1)
tmp51 = tmp50 * tmp25
tmp52 = tmp51 - tmp46
tmp53 = triton_helpers.maximum(tmp52, tmp17)
tmp54 = libdevice.pow(tmp53, tmp1)
tmp55 = tmp49 + tmp54
tmp57 = tmp56 * tmp25
tmp58 = tmp57 - tmp46
tmp59 = triton_helpers.maximum(tmp58, tmp17)
tmp60 = libdevice.pow(tmp59, tmp1)
tmp61 = tmp55 + tmp60
tmp63 = tmp62 * tmp25
tmp64 = tmp63 - tmp46
tmp65 = triton_helpers.maximum(tmp64, tmp17)
tmp66 = libdevice.pow(tmp65, tmp1)
tmp67 = tmp61 + tmp66
tl.store(in_out_ptr0 + x0, tmp42, xmask)
tl.store(out_ptr0 + x0, tmp67, xmask)
@triton.jit
def triton_poi_fused_add_div_mul_sub_where_5(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = 1.0
tmp5 = tmp3 - tmp4
tmp7 = tmp6 - tmp4
tmp8 = tmp5 * tmp7
tmp9 = 0.0
tmp10 = tmp8 >= tmp9
tmp13 = tmp12 * tmp1
tmp14 = tmp13 * tmp1
tmp15 = tmp14 * tmp1
tmp16 = tmp15 * tmp1
tmp17 = tmp16 * tmp1
tmp18 = tmp17 * tmp1
tmp19 = tmp18 * tmp1
tmp20 = tmp19 * tmp1
tmp21 = tmp20 * tmp1
tmp22 = tmp21 * tmp1
tmp23 = tmp22 * tmp1
tmp24 = tmp23 * tmp1
tmp25 = tmp24 * tmp1
tmp26 = tmp25 * tmp1
tmp27 = tmp11 + tmp26
tmp28 = tl.where(tmp10, tmp27, tmp11)
tmp29 = tmp26 * tmp1
tmp30 = tmp28 + tmp29
tmp31 = tmp2 - tmp30
tl.store(out_ptr0 + x2, tmp31, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_6(in_out_ptr0,
in_out_ptr3, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr1 + x0, xmask)
tmp23 = tl.load(in_ptr2 + x0, xmask)
tmp27 = tl.load(in_out_ptr0 + x0, xmask)
tmp28 = tl.load(in_ptr3 + x0, xmask)
tmp49 = tl.load(in_ptr4 + 4 * x0, xmask, eviction_policy='evict_last')
tmp56 = tl.load(in_ptr4 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp62 = tl.load(in_ptr4 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp68 = tl.load(in_ptr4 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 2.0
tmp4 = libdevice.pow(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp5, tmp1)
tmp7 = libdevice.pow(tmp6, tmp3)
tmp8 = tmp4 + tmp7
tmp10 = triton_helpers.maximum(tmp9, tmp1)
tmp11 = libdevice.pow(tmp10, tmp3)
tmp12 = tmp8 + tmp11
tmp14 = triton_helpers.maximum(tmp13, tmp1)
tmp15 = libdevice.pow(tmp14, tmp3)
tmp16 = tmp12 + tmp15
tmp17 = 1.0
tmp18 = tmp16 - tmp17
tmp20 = tmp19 - tmp17
tmp21 = tmp18 * tmp20
tmp22 = tmp21 >= tmp1
tmp24 = tmp23 - tmp17
tmp25 = tmp24 * tmp20
tmp26 = tmp25 >= tmp1
tmp29 = 0.5
tmp30 = tmp28 * tmp29
tmp31 = tmp30 * tmp29
tmp32 = tmp31 * tmp29
tmp33 = tmp32 * tmp29
tmp34 = tmp33 * tmp29
tmp35 = tmp34 * tmp29
tmp36 = tmp35 * tmp29
tmp37 = tmp36 * tmp29
tmp38 = tmp37 * tmp29
tmp39 = tmp38 * tmp29
tmp40 = tmp39 * tmp29
tmp41 = tmp40 * tmp29
tmp42 = tmp41 * tmp29
tmp43 = tmp42 * tmp29
tmp44 = tmp27 + tmp43
tmp45 = tl.where(tmp26, tmp44, tmp27)
tmp46 = tmp43 * tmp29
tmp47 = tmp45 + tmp46
tmp48 = tl.where(tmp22, tmp47, tmp45)
tmp50 = tmp49 * tmp29
tmp51 = tmp46 * tmp29
tmp52 = tmp48 + tmp51
tmp53 = tmp50 - tmp52
tmp54 = triton_helpers.maximum(tmp53, tmp1)
tmp55 = libdevice.pow(tmp54, tmp3)
tmp57 = tmp56 * tmp29
tmp58 = tmp57 - tmp52
tmp59 = triton_helpers.maximum(tmp58, tmp1)
tmp60 = libdevice.pow(tmp59, tmp3)
tmp61 = tmp55 + tmp60
tmp63 = tmp62 * tmp29
tmp64 = tmp63 - tmp52
tmp65 = triton_helpers.maximum(tmp64, tmp1)
tmp66 = libdevice.pow(tmp65, tmp3)
tmp67 = tmp61 + tmp66
tmp69 = tmp68 * tmp29
tmp70 = tmp69 - tmp52
tmp71 = triton_helpers.maximum(tmp70, tmp1)
tmp72 = libdevice.pow(tmp71, tmp3)
tmp73 = tmp67 + tmp72
tmp74 = tmp73 - tmp17
tmp75 = tmp74 * tmp20
tmp76 = tmp75 >= tmp1
tmp77 = tl.where(tmp76, tmp52, tmp48)
tmp78 = tmp51 * tmp29
tmp79 = tmp77 + tmp78
tmp80 = tmp50 - tmp79
tmp81 = triton_helpers.maximum(tmp80, tmp1)
tmp82 = libdevice.pow(tmp81, tmp3)
tmp83 = tmp57 - tmp79
tmp84 = triton_helpers.maximum(tmp83, tmp1)
tmp85 = libdevice.pow(tmp84, tmp3)
tmp86 = tmp82 + tmp85
tmp87 = tmp63 - tmp79
tmp88 = triton_helpers.maximum(tmp87, tmp1)
tmp89 = libdevice.pow(tmp88, tmp3)
tmp90 = tmp86 + tmp89
tmp91 = tmp69 - tmp79
tmp92 = triton_helpers.maximum(tmp91, tmp1)
tmp93 = libdevice.pow(tmp92, tmp3)
tmp94 = tmp90 + tmp93
tmp95 = tmp94 - tmp17
tmp96 = tmp95 * tmp20
tmp97 = tmp96 >= tmp1
tmp98 = tl.where(tmp97, tmp79, tmp77)
tmp99 = tmp78 * tmp29
tmp100 = tmp98 + tmp99
tmp101 = tmp50 - tmp100
tmp102 = triton_helpers.maximum(tmp101, tmp1)
tmp103 = libdevice.pow(tmp102, tmp3)
tmp104 = tmp57 - tmp100
tmp105 = triton_helpers.maximum(tmp104, tmp1)
tmp106 = libdevice.pow(tmp105, tmp3)
tmp107 = tmp103 + tmp106
tmp108 = tmp63 - tmp100
tmp109 = triton_helpers.maximum(tmp108, tmp1)
tmp110 = libdevice.pow(tmp109, tmp3)
tmp111 = tmp107 + tmp110
tmp112 = tmp69 - tmp100
tmp113 = triton_helpers.maximum(tmp112, tmp1)
tmp114 = libdevice.pow(tmp113, tmp3)
tmp115 = tmp111 + tmp114
tmp116 = tmp115 - tmp17
tmp117 = tmp116 * tmp20
tmp118 = tmp117 >= tmp1
tmp119 = tl.where(tmp118, tmp100, tmp98)
tmp120 = tmp99 * tmp29
tmp121 = tmp119 + tmp120
tmp122 = tmp50 - tmp121
tmp123 = triton_helpers.maximum(tmp122, tmp1)
tmp124 = libdevice.pow(tmp123, tmp3)
tmp125 = tmp57 - tmp121
tmp126 = triton_helpers.maximum(tmp125, tmp1)
tmp127 = libdevice.pow(tmp126, tmp3)
tmp128 = tmp124 + tmp127
tmp129 = tmp63 - tmp121
tmp130 = triton_helpers.maximum(tmp129, tmp1)
tmp131 = libdevice.pow(tmp130, tmp3)
tmp132 = tmp128 + tmp131
tmp133 = tmp69 - tmp121
tmp134 = triton_helpers.maximum(tmp133, tmp1)
tmp135 = libdevice.pow(tmp134, tmp3)
tmp136 = tmp132 + tmp135
tmp137 = tmp136 - tmp17
tmp138 = tmp137 * tmp20
tmp139 = tmp138 >= tmp1
tmp140 = tl.where(tmp139, tmp121, tmp119)
tl.store(in_out_ptr3 + x0, tmp140, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_mul_pow_sub_7(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp5 = tmp4 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = tmp6 * tmp1
tmp8 = tmp7 * tmp1
tmp9 = tmp8 * tmp1
tmp10 = tmp9 * tmp1
tmp11 = tmp10 * tmp1
tmp12 = tmp11 * tmp1
tmp13 = tmp12 * tmp1
tmp14 = tmp13 * tmp1
tmp15 = tmp14 * tmp1
tmp16 = tmp15 * tmp1
tmp17 = tmp16 * tmp1
tmp18 = tmp17 * tmp1
tmp19 = tmp18 * tmp1
tmp20 = tmp19 * tmp1
tmp21 = tmp20 * tmp1
tmp22 = tmp21 * tmp1
tmp23 = tmp22 * tmp1
tmp24 = tmp23 * tmp1
tmp25 = tmp3 + tmp24
tmp26 = tmp2 - tmp25
tmp27 = 0.0
tmp28 = triton_helpers.maximum(tmp26, tmp27)
tmp29 = 2.0
tmp30 = libdevice.pow(tmp28, tmp29)
tl.store(out_ptr0 + x2, tmp30, xmask)
@triton.jit
def triton_poi_fused_add_div_where_8(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + x0, xmask)
tmp14 = tl.load(in_out_ptr0 + x0, xmask)
tmp15 = tl.load(in_ptr2 + x0, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 1.0
tmp8 = tmp6 - tmp7
tmp10 = tmp9 - tmp7
tmp11 = tmp8 * tmp10
tmp12 = 0.0
tmp13 = tmp11 >= tmp12
tmp16 = 0.5
tmp17 = tmp15 * tmp16
tmp18 = tmp17 * tmp16
tmp19 = tmp18 * tmp16
tmp20 = tmp19 * tmp16
tmp21 = tmp20 * tmp16
tmp22 = tmp21 * tmp16
tmp23 = tmp22 * tmp16
tmp24 = tmp23 * tmp16
tmp25 = tmp24 * tmp16
tmp26 = tmp25 * tmp16
tmp27 = tmp26 * tmp16
tmp28 = tmp27 * tmp16
tmp29 = tmp28 * tmp16
tmp30 = tmp29 * tmp16
tmp31 = tmp30 * tmp16
tmp32 = tmp31 * tmp16
tmp33 = tmp32 * tmp16
tmp34 = tmp33 * tmp16
tmp35 = tmp34 * tmp16
tmp36 = tmp35 * tmp16
tmp37 = tmp14 + tmp36
tmp38 = tl.where(tmp13, tmp37, tmp14)
tl.store(in_out_ptr0 + x0, tmp38, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_mul_pow_sub_9(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp5 = tmp4 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = tmp6 * tmp1
tmp8 = tmp7 * tmp1
tmp9 = tmp8 * tmp1
tmp10 = tmp9 * tmp1
tmp11 = tmp10 * tmp1
tmp12 = tmp11 * tmp1
tmp13 = tmp12 * tmp1
tmp14 = tmp13 * tmp1
tmp15 = tmp14 * tmp1
tmp16 = tmp15 * tmp1
tmp17 = tmp16 * tmp1
tmp18 = tmp17 * tmp1
tmp19 = tmp18 * tmp1
tmp20 = tmp19 * tmp1
tmp21 = tmp20 * tmp1
tmp22 = tmp21 * tmp1
tmp23 = tmp22 * tmp1
tmp24 = tmp23 * tmp1
tmp25 = tmp24 * tmp1
tmp26 = tmp3 + tmp25
tmp27 = tmp2 - tmp26
tmp28 = 0.0
tmp29 = triton_helpers.maximum(tmp27, tmp28)
tmp30 = 2.0
tmp31 = libdevice.pow(tmp29, tmp30)
tl.store(out_ptr0 + x2, tmp31, xmask)
@triton.jit
def triton_poi_fused_add_div_where_10(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + x0, xmask)
tmp14 = tl.load(in_out_ptr0 + x0, xmask)
tmp15 = tl.load(in_ptr2 + x0, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 1.0
tmp8 = tmp6 - tmp7
tmp10 = tmp9 - tmp7
tmp11 = tmp8 * tmp10
tmp12 = 0.0
tmp13 = tmp11 >= tmp12
tmp16 = 0.5
tmp17 = tmp15 * tmp16
tmp18 = tmp17 * tmp16
tmp19 = tmp18 * tmp16
tmp20 = tmp19 * tmp16
tmp21 = tmp20 * tmp16
tmp22 = tmp21 * tmp16
tmp23 = tmp22 * tmp16
tmp24 = tmp23 * tmp16
tmp25 = tmp24 * tmp16
tmp26 = tmp25 * tmp16
tmp27 = tmp26 * tmp16
tmp28 = tmp27 * tmp16
tmp29 = tmp28 * tmp16
tmp30 = tmp29 * tmp16
tmp31 = tmp30 * tmp16
tmp32 = tmp31 * tmp16
tmp33 = tmp32 * tmp16
tmp34 = tmp33 * tmp16
tmp35 = tmp34 * tmp16
tmp36 = tmp35 * tmp16
tmp37 = tmp36 * tmp16
tmp38 = tmp14 + tmp37
tmp39 = tl.where(tmp13, tmp38, tmp14)
tl.store(in_out_ptr0 + x0, tmp39, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_mul_sub_11(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp5 = tmp4 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = tmp6 * tmp1
tmp8 = tmp7 * tmp1
tmp9 = tmp8 * tmp1
tmp10 = tmp9 * tmp1
tmp11 = tmp10 * tmp1
tmp12 = tmp11 * tmp1
tmp13 = tmp12 * tmp1
tmp14 = tmp13 * tmp1
tmp15 = tmp14 * tmp1
tmp16 = tmp15 * tmp1
tmp17 = tmp16 * tmp1
tmp18 = tmp17 * tmp1
tmp19 = tmp18 * tmp1
tmp20 = tmp19 * tmp1
tmp21 = tmp20 * tmp1
tmp22 = tmp21 * tmp1
tmp23 = tmp22 * tmp1
tmp24 = tmp23 * tmp1
tmp25 = tmp24 * tmp1
tmp26 = tmp25 * tmp1
tmp27 = tmp3 + tmp26
tmp28 = tmp2 - tmp27
tmp29 = 0.0
tmp30 = triton_helpers.maximum(tmp28, tmp29)
tl.store(out_ptr0 + x2, tmp30, xmask)
@triton.jit
def triton_poi_fused_add_div_where_12(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + x0, xmask)
tmp19 = tl.load(in_out_ptr0 + x0, xmask)
tmp20 = tl.load(in_ptr2 + x0, xmask)
tmp1 = 2.0
tmp2 = libdevice.pow(tmp0, tmp1)
tmp4 = libdevice.pow(tmp3, tmp1)
tmp5 = tmp2 + tmp4
tmp7 = libdevice.pow(tmp6, tmp1)
tmp8 = tmp5 + tmp7
tmp10 = libdevice.pow(tmp9, tmp1)
tmp11 = tmp8 + tmp10
tmp12 = 1.0
tmp13 = tmp11 - tmp12
tmp15 = tmp14 - tmp12
tmp16 = tmp13 * tmp15
tmp17 = 0.0
tmp18 = tmp16 >= tmp17
tmp21 = 0.5
tmp22 = tmp20 * tmp21
tmp23 = tmp22 * tmp21
tmp24 = tmp23 * tmp21
tmp25 = tmp24 * tmp21
tmp26 = tmp25 * tmp21
tmp27 = tmp26 * tmp21
tmp28 = tmp27 * tmp21
tmp29 = tmp28 * tmp21
tmp30 = tmp29 * tmp21
tmp31 = tmp30 * tmp21
tmp32 = tmp31 * tmp21
tmp33 = tmp32 * tmp21
tmp34 = tmp33 * tmp21
tmp35 = tmp34 * tmp21
tmp36 = tmp35 * tmp21
tmp37 = tmp36 * tmp21
tmp38 = tmp37 * tmp21
tmp39 = tmp38 * tmp21
tmp40 = tmp39 * tmp21
tmp41 = tmp40 * tmp21
tmp42 = tmp41 * tmp21
tmp43 = tmp42 * tmp21
tmp44 = tmp19 + tmp43
tmp45 = tl.where(tmp18, tmp44, tmp19)
tl.store(in_out_ptr0 + x0, tmp45, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_mul_sub_13(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp5 = tmp4 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = tmp6 * tmp1
tmp8 = tmp7 * tmp1
tmp9 = tmp8 * tmp1
tmp10 = tmp9 * tmp1
tmp11 = tmp10 * tmp1
tmp12 = tmp11 * tmp1
tmp13 = tmp12 * tmp1
tmp14 = tmp13 * tmp1
tmp15 = tmp14 * tmp1
tmp16 = tmp15 * tmp1
tmp17 = tmp16 * tmp1
tmp18 = tmp17 * tmp1
tmp19 = tmp18 * tmp1
tmp20 = tmp19 * tmp1
tmp21 = tmp20 * tmp1
tmp22 = tmp21 * tmp1
tmp23 = tmp22 * tmp1
tmp24 = tmp23 * tmp1
tmp25 = tmp24 * tmp1
tmp26 = tmp25 * tmp1
tmp27 = tmp26 * tmp1
tmp28 = tmp3 + tmp27
tmp29 = tmp2 - tmp28
tmp30 = 0.0
tmp31 = triton_helpers.maximum(tmp29, tmp30)
tl.store(out_ptr0 + x2, tmp31, xmask)
@triton.jit
def triton_poi_fused_add_div_where_14(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + x0, xmask)
tmp19 = tl.load(in_out_ptr0 + x0, xmask)
tmp20 = tl.load(in_ptr2 + x0, xmask)
tmp1 = 2.0
tmp2 = libdevice.pow(tmp0, tmp1)
tmp4 = libdevice.pow(tmp3, tmp1)
tmp5 = tmp2 + tmp4
tmp7 = libdevice.pow(tmp6, tmp1)
tmp8 = tmp5 + tmp7
tmp10 = libdevice.pow(tmp9, tmp1)
tmp11 = tmp8 + tmp10
tmp12 = 1.0
tmp13 = tmp11 - tmp12
tmp15 = tmp14 - tmp12
tmp16 = tmp13 * tmp15
tmp17 = 0.0
tmp18 = tmp16 >= tmp17
tmp21 = 0.5
tmp22 = tmp20 * tmp21
tmp23 = tmp22 * tmp21
tmp24 = tmp23 * tmp21
tmp25 = tmp24 * tmp21
tmp26 = tmp25 * tmp21
tmp27 = tmp26 * tmp21
tmp28 = tmp27 * tmp21
tmp29 = tmp28 * tmp21
tmp30 = tmp29 * tmp21
tmp31 = tmp30 * tmp21
tmp32 = tmp31 * tmp21
tmp33 = tmp32 * tmp21
tmp34 = tmp33 * tmp21
tmp35 = tmp34 * tmp21
tmp36 = tmp35 * tmp21
tmp37 = tmp36 * tmp21
tmp38 = tmp37 * tmp21
tmp39 = tmp38 * tmp21
tmp40 = tmp39 * tmp21
tmp41 = tmp40 * tmp21
tmp42 = tmp41 * tmp21
tmp43 = tmp42 * tmp21
tmp44 = tmp43 * tmp21
tmp45 = tmp19 + tmp44
tmp46 = tl.where(tmp18, tmp45, tmp19)
tl.store(in_out_ptr0 + x0, tmp46, xmask)
@triton.jit
def triton_poi_fused_add_div_mul_sub_15(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp5 = tmp4 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = tmp6 * tmp1
tmp8 = tmp7 * tmp1
tmp9 = tmp8 * tmp1
tmp10 = tmp9 * tmp1
tmp11 = tmp10 * tmp1
tmp12 = tmp11 * tmp1
tmp13 = tmp12 * tmp1
tmp14 = tmp13 * tmp1
tmp15 = tmp14 * tmp1
tmp16 = tmp15 * tmp1
tmp17 = tmp16 * tmp1
tmp18 = tmp17 * tmp1
tmp19 = tmp18 * tmp1
tmp20 = tmp19 * tmp1
tmp21 = tmp20 * tmp1
tmp22 = tmp21 * tmp1
tmp23 = tmp22 * tmp1
tmp24 = tmp23 * tmp1
tmp25 = tmp24 * tmp1
tmp26 = tmp25 * tmp1
tmp27 = tmp26 * tmp1
tmp28 = tmp27 * tmp1
tmp29 = tmp3 + tmp28
tmp30 = tmp2 - tmp29
tl.store(out_ptr0 + x2, tmp30, xmask)
@triton.jit
def triton_poi_fused_add_div_where_16(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr1 + x0, xmask)
tmp23 = tl.load(in_out_ptr0 + x0, xmask)
tmp24 = tl.load(in_ptr2 + x0, xmask)
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 2.0
tmp4 = libdevice.pow(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp5, tmp1)
tmp7 = libdevice.pow(tmp6, tmp3)
tmp8 = tmp4 + tmp7
tmp10 = triton_helpers.maximum(tmp9, tmp1)
tmp11 = libdevice.pow(tmp10, tmp3)
tmp12 = tmp8 + tmp11
tmp14 = triton_helpers.maximum(tmp13, tmp1)
tmp15 = libdevice.pow(tmp14, tmp3)
tmp16 = tmp12 + tmp15
tmp17 = 1.0
tmp18 = tmp16 - tmp17
tmp20 = tmp19 - tmp17
tmp21 = tmp18 * tmp20
tmp22 = tmp21 >= tmp1
tmp25 = 0.5
tmp26 = tmp24 * tmp25
tmp27 = tmp26 * tmp25
tmp28 = tmp27 * tmp25
tmp29 = tmp28 * tmp25
tmp30 = tmp29 * tmp25
tmp31 = tmp30 * tmp25
tmp32 = tmp31 * tmp25
tmp33 = tmp32 * tmp25
tmp34 = tmp33 * tmp25
tmp35 = tmp34 * tmp25
tmp36 = tmp35 * tmp25
tmp37 = tmp36 * tmp25
tmp38 = tmp37 * tmp25
tmp39 = tmp38 * tmp25
tmp40 = tmp39 * tmp25
tmp41 = tmp40 * tmp25
tmp42 = tmp41 * tmp25
tmp43 = tmp42 * tmp25
tmp44 = tmp43 * tmp25
tmp45 = tmp44 * tmp25
tmp46 = tmp45 * tmp25
tmp47 = tmp46 * tmp25
tmp48 = tmp47 * tmp25
tmp49 = tmp48 * tmp25
tmp50 = tmp23 + tmp49
tmp51 = tl.where(tmp22, tmp50, tmp23)
tl.store(in_out_ptr0 + x0, tmp51, xmask)
@triton.jit
def triton_poi_fused_add_div_mul_sub_17(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp5 = tmp4 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = tmp6 * tmp1
tmp8 = tmp7 * tmp1
tmp9 = tmp8 * tmp1
tmp10 = tmp9 * tmp1
tmp11 = tmp10 * tmp1
tmp12 = tmp11 * tmp1
tmp13 = tmp12 * tmp1
tmp14 = tmp13 * tmp1
tmp15 = tmp14 * tmp1
tmp16 = tmp15 * tmp1
tmp17 = tmp16 * tmp1
tmp18 = tmp17 * tmp1
tmp19 = tmp18 * tmp1
tmp20 = tmp19 * tmp1
tmp21 = tmp20 * tmp1
tmp22 = tmp21 * tmp1
tmp23 = tmp22 * tmp1
tmp24 = tmp23 * tmp1
tmp25 = tmp24 * tmp1
tmp26 = tmp25 * tmp1
tmp27 = tmp26 * tmp1
tmp28 = tmp27 * tmp1
tmp29 = tmp28 * tmp1
tmp30 = tmp3 + tmp29
tmp31 = tmp2 - tmp30
tl.store(out_ptr0 + x2, tmp31, xmask)
@triton.jit
def triton_poi_fused_add_div_where_18(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr1 + x0, xmask)
tmp23 = tl.load(in_out_ptr0 + x0, xmask)
tmp24 = tl.load(in_ptr2 + x0, xmask)
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 2.0
tmp4 = libdevice.pow(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp5, tmp1)
tmp7 = libdevice.pow(tmp6, tmp3)
tmp8 = tmp4 + tmp7
tmp10 = triton_helpers.maximum(tmp9, tmp1)
tmp11 = libdevice.pow(tmp10, tmp3)
tmp12 = tmp8 + tmp11
tmp14 = triton_helpers.maximum(tmp13, tmp1)
tmp15 = libdevice.pow(tmp14, tmp3)
tmp16 = tmp12 + tmp15
tmp17 = 1.0
tmp18 = tmp16 - tmp17
tmp20 = tmp19 - tmp17
tmp21 = tmp18 * tmp20
tmp22 = tmp21 >= tmp1
tmp25 = 0.5
tmp26 = tmp24 * tmp25
tmp27 = tmp26 * tmp25
tmp28 = tmp27 * tmp25
tmp29 = tmp28 * tmp25
tmp30 = tmp29 * tmp25
tmp31 = tmp30 * tmp25
tmp32 = tmp31 * tmp25
tmp33 = tmp32 * tmp25
tmp34 = tmp33 * tmp25
tmp35 = tmp34 * tmp25
tmp36 = tmp35 * tmp25
tmp37 = tmp36 * tmp25
tmp38 = tmp37 * tmp25
tmp39 = tmp38 * tmp25
tmp40 = tmp39 * tmp25
tmp41 = tmp40 * tmp25
tmp42 = tmp41 * tmp25
tmp43 = tmp42 * tmp25
tmp44 = tmp43 * tmp25
tmp45 = tmp44 * tmp25
tmp46 = tmp45 * tmp25
tmp47 = tmp46 * tmp25
tmp48 = tmp47 * tmp25
tmp49 = tmp48 * tmp25
tmp50 = tmp49 * tmp25
tmp51 = tmp23 + tmp50
tmp52 = tl.where(tmp22, tmp51, tmp23)
tl.store(in_out_ptr0 + x0, tmp52, xmask)
@triton.jit
def triton_poi_fused_add_div_mul_sub_19(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp5 = tmp4 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = tmp6 * tmp1
tmp8 = tmp7 * tmp1
tmp9 = tmp8 * tmp1
tmp10 = tmp9 * tmp1
tmp11 = tmp10 * tmp1
tmp12 = tmp11 * tmp1
tmp13 = tmp12 * tmp1
tmp14 = tmp13 * tmp1
tmp15 = tmp14 * tmp1
tmp16 = tmp15 * tmp1
tmp17 = tmp16 * tmp1
tmp18 = tmp17 * tmp1
tmp19 = tmp18 * tmp1
tmp20 = tmp19 * tmp1
tmp21 = tmp20 * tmp1
tmp22 = tmp21 * tmp1
tmp23 = tmp22 * tmp1
tmp24 = tmp23 * tmp1
tmp25 = tmp24 * tmp1
tmp26 = tmp25 * tmp1
tmp27 = tmp26 * tmp1
tmp28 = tmp27 * tmp1
tmp29 = tmp28 * tmp1
tmp30 = tmp29 * tmp1
tmp31 = tmp3 + tmp30
tmp32 = tmp2 - tmp31
tl.store(out_ptr0 + x2, tmp32, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_20(in_out_ptr0,
in_out_ptr2, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr3, out_ptr5,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr1 + x0, xmask)
tmp23 = tl.load(in_out_ptr0 + x0, xmask)
tmp24 = tl.load(in_ptr2 + x0, xmask)
tmp56 = tl.load(in_ptr3 + 4 * x0, xmask, eviction_policy='evict_last')
tmp61 = tl.load(in_ptr3 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp67 = tl.load(in_ptr3 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp73 = tl.load(in_ptr3 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 2.0
tmp4 = libdevice.pow(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp5, tmp1)
tmp7 = libdevice.pow(tmp6, tmp3)
tmp8 = tmp4 + tmp7
tmp10 = triton_helpers.maximum(tmp9, tmp1)
tmp11 = libdevice.pow(tmp10, tmp3)
tmp12 = tmp8 + tmp11
tmp14 = triton_helpers.maximum(tmp13, tmp1)
tmp15 = libdevice.pow(tmp14, tmp3)
tmp16 = tmp12 + tmp15
tmp17 = 1.0
tmp18 = tmp16 - tmp17
tmp20 = tmp19 - tmp17
tmp21 = tmp18 * tmp20
tmp22 = tmp21 >= tmp1
tmp25 = 0.5
tmp26 = tmp24 * tmp25
tmp27 = tmp26 * tmp25
tmp28 = tmp27 * tmp25
tmp29 = tmp28 * tmp25
tmp30 = tmp29 * tmp25
tmp31 = tmp30 * tmp25
tmp32 = tmp31 * tmp25
tmp33 = tmp32 * tmp25
tmp34 = tmp33 * tmp25
tmp35 = tmp34 * tmp25
tmp36 = tmp35 * tmp25
tmp37 = tmp36 * tmp25
tmp38 = tmp37 * tmp25
tmp39 = tmp38 * tmp25
tmp40 = tmp39 * tmp25
tmp41 = tmp40 * tmp25
tmp42 = tmp41 * tmp25
tmp43 = tmp42 * tmp25
tmp44 = tmp43 * tmp25
tmp45 = tmp44 * tmp25
tmp46 = tmp45 * tmp25
tmp47 = tmp46 * tmp25
tmp48 = tmp47 * tmp25
tmp49 = tmp48 * tmp25
tmp50 = tmp49 * tmp25
tmp51 = tmp50 * tmp25
tmp52 = tmp23 + tmp51
tmp53 = tl.where(tmp22, tmp52, tmp23)
tmp54 = tmp51 * tmp25
tmp55 = tmp53 + tmp54
tmp57 = tmp56 * tmp25
tmp58 = tmp57 - tmp55
tmp59 = triton_helpers.maximum(tmp58, tmp1)
tmp60 = libdevice.pow(tmp59, tmp3)
tmp62 = tmp61 * tmp25
tmp63 = tmp62 - tmp55
tmp64 = triton_helpers.maximum(tmp63, tmp1)
tmp65 = libdevice.pow(tmp64, tmp3)
tmp66 = tmp60 + tmp65
tmp68 = tmp67 * tmp25
tmp69 = tmp68 - tmp55
tmp70 = triton_helpers.maximum(tmp69, tmp1)
tmp71 = libdevice.pow(tmp70, tmp3)
tmp72 = tmp66 + tmp71
tmp74 = tmp73 * tmp25
tmp75 = tmp74 - tmp55
tmp76 = triton_helpers.maximum(tmp75, tmp1)
tmp77 = libdevice.pow(tmp76, tmp3)
tmp78 = tmp72 + tmp77
tmp79 = tmp78 - tmp17
tmp80 = tmp79 * tmp20
tmp81 = tmp80 >= tmp1
tmp82 = tl.where(tmp81, tmp55, tmp53)
tmp83 = tmp54 * tmp25
tmp84 = tmp82 + tmp83
tmp85 = tmp57 - tmp84
tmp86 = triton_helpers.maximum(tmp85, tmp1)
tmp87 = libdevice.pow(tmp86, tmp3)
tmp88 = tmp62 - tmp84
tmp89 = triton_helpers.maximum(tmp88, tmp1)
tmp90 = libdevice.pow(tmp89, tmp3)
tmp91 = tmp87 + tmp90
tmp92 = tmp68 - tmp84
tmp93 = triton_helpers.maximum(tmp92, tmp1)
tmp94 = libdevice.pow(tmp93, tmp3)
tmp95 = tmp91 + tmp94
tmp96 = tmp74 - tmp84
tmp97 = triton_helpers.maximum(tmp96, tmp1)
tmp98 = libdevice.pow(tmp97, tmp3)
tmp99 = tmp95 + tmp98
tmp100 = tmp99 - tmp17
tmp101 = tmp100 * tmp20
tmp102 = tmp101 >= tmp1
tmp103 = tl.where(tmp102, tmp84, tmp82)
tmp104 = tmp83 * tmp25
tmp105 = tmp103 + tmp104
tmp106 = tmp57 - tmp105
tmp107 = triton_helpers.maximum(tmp106, tmp1)
tmp108 = libdevice.pow(tmp107, tmp3)
tmp109 = tmp62 - tmp105
tmp110 = triton_helpers.maximum(tmp109, tmp1)
tmp111 = libdevice.pow(tmp110, tmp3)
tmp112 = tmp108 + tmp111
tmp113 = tmp68 - tmp105
tmp114 = triton_helpers.maximum(tmp113, tmp1)
tmp115 = libdevice.pow(tmp114, tmp3)
tmp116 = tmp112 + tmp115
tmp117 = tmp74 - tmp105
tmp118 = triton_helpers.maximum(tmp117, tmp1)
tmp119 = libdevice.pow(tmp118, tmp3)
tmp120 = tmp116 + tmp119
tmp121 = tmp120 - tmp17
tmp122 = tmp121 * tmp20
tmp123 = tmp122 >= tmp1
tmp124 = tl.where(tmp123, tmp105, tmp103)
tmp125 = tmp104 * tmp25
tmp126 = tmp124 + tmp125
tmp127 = tmp57 - tmp126
tmp128 = triton_helpers.maximum(tmp127, tmp1)
tmp129 = libdevice.pow(tmp128, tmp3)
tmp130 = tmp62 - tmp126
tmp131 = triton_helpers.maximum(tmp130, tmp1)
tmp132 = libdevice.pow(tmp131, tmp3)
tmp133 = tmp129 + tmp132
tmp134 = tmp68 - tmp126
tmp135 = triton_helpers.maximum(tmp134, tmp1)
tmp136 = libdevice.pow(tmp135, tmp3)
tmp137 = tmp133 + tmp136
tmp138 = tmp74 - tmp126
tmp139 = triton_helpers.maximum(tmp138, tmp1)
tmp140 = libdevice.pow(tmp139, tmp3)
tmp141 = tmp137 + tmp140
tmp142 = tmp141 - tmp17
tmp143 = tmp142 * tmp20
tmp144 = tmp143 >= tmp1
tmp145 = tl.where(tmp144, tmp126, tmp124)
tmp146 = tmp125 * tmp25
tmp147 = tmp145 + tmp146
tmp148 = tmp57 - tmp147
tmp149 = triton_helpers.maximum(tmp148, tmp1)
tmp150 = libdevice.pow(tmp149, tmp3)
tmp151 = tmp62 - tmp147
tmp152 = triton_helpers.maximum(tmp151, tmp1)
tmp153 = libdevice.pow(tmp152, tmp3)
tmp154 = tmp150 + tmp153
tmp155 = tmp68 - tmp147
tmp156 = triton_helpers.maximum(tmp155, tmp1)
tmp157 = libdevice.pow(tmp156, tmp3)
tmp158 = tmp154 + tmp157
tmp159 = tmp74 - tmp147
tmp160 = triton_helpers.maximum(tmp159, tmp1)
tmp161 = libdevice.pow(tmp160, tmp3)
tmp162 = tmp158 + tmp161
tl.store(out_ptr3 + x0, tmp104, xmask)
tl.store(in_out_ptr2 + x0, tmp145, xmask)
tl.store(out_ptr5 + x0, tmp162, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_mul_pow_sub_where_21(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = 1.0
tmp5 = tmp3 - tmp4
tmp7 = tmp6 - tmp4
tmp8 = tmp5 * tmp7
tmp9 = 0.0
tmp10 = tmp8 >= tmp9
tmp13 = tmp12 * tmp1
tmp14 = tmp13 * tmp1
tmp15 = tmp11 + tmp14
tmp16 = tl.where(tmp10, tmp15, tmp11)
tmp17 = tmp14 * tmp1
tmp18 = tmp16 + tmp17
tmp19 = tmp2 - tmp18
tmp20 = triton_helpers.maximum(tmp19, tmp9)
tmp21 = 2.0
tmp22 = libdevice.pow(tmp20, tmp21)
tl.store(out_ptr0 + x2, tmp22, xmask)
@triton.jit
def triton_poi_fused_div_22(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf43 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf57 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf58 = reinterpret_tensor(buf57, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf57
buf59 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_clamp_div_max_mul_pow_sub_sum_where_0[grid(64)](
buf58, arg0_1, buf1, buf43, buf59, 64, XBLOCK=64, num_warps=1,
num_stages=1)
buf60 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_clamp_div_mul_pow_sub_where_1[grid(256)](arg0_1,
buf59, buf1, buf58, buf43, buf60, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf61 = buf58
del buf58
buf62 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_2[grid(64)](buf61,
buf60, buf1, buf59, buf43, arg0_1, buf62, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf63 = buf60
del buf60
triton_poi_fused_add_clamp_div_mul_sub_where_3[grid(256)](arg0_1,
buf62, buf1, buf61, buf43, buf63, 256, XBLOCK=256, num_warps=4,
num_stages=1)
buf64 = buf61
del buf61
buf65 = buf59
del buf59
triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_4[grid(64)](buf64,
buf63, buf1, buf62, buf43, arg0_1, buf65, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf66 = buf63
del buf63
triton_poi_fused_add_div_mul_sub_where_5[grid(256)](arg0_1, buf65,
buf1, buf64, buf43, buf66, 256, XBLOCK=256, num_warps=4,
num_stages=1)
buf67 = buf64
del buf64
buf74 = buf62
del buf62
buf75 = reinterpret_tensor(buf74, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf74
triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_6[grid(64)](buf67,
buf75, buf66, buf1, buf65, buf43, arg0_1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf76 = buf66
del buf66
triton_poi_fused_add_clamp_div_mul_pow_sub_7[grid(256)](arg0_1,
buf75, buf43, buf76, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf77 = buf75
del buf75
triton_poi_fused_add_div_where_8[grid(64)](buf77, buf76, buf1,
buf43, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf78 = buf76
del buf76
triton_poi_fused_add_clamp_div_mul_pow_sub_9[grid(256)](arg0_1,
buf77, buf43, buf78, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf79 = buf77
del buf77
triton_poi_fused_add_div_where_10[grid(64)](buf79, buf78, buf1,
buf43, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf80 = buf78
del buf78
triton_poi_fused_add_clamp_div_mul_sub_11[grid(256)](arg0_1, buf79,
buf43, buf80, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf81 = buf79
del buf79
triton_poi_fused_add_div_where_12[grid(64)](buf81, buf80, buf1,
buf43, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf82 = buf80
del buf80
triton_poi_fused_add_clamp_div_mul_sub_13[grid(256)](arg0_1, buf81,
buf43, buf82, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf83 = buf81
del buf81
triton_poi_fused_add_div_where_14[grid(64)](buf83, buf82, buf1,
buf43, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf84 = buf82
del buf82
triton_poi_fused_add_div_mul_sub_15[grid(256)](arg0_1, buf83, buf43,
buf84, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf85 = buf83
del buf83
triton_poi_fused_add_div_where_16[grid(64)](buf85, buf84, buf1,
buf43, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf86 = buf84
del buf84
triton_poi_fused_add_div_mul_sub_17[grid(256)](arg0_1, buf85, buf43,
buf86, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf87 = buf85
del buf85
triton_poi_fused_add_div_where_18[grid(64)](buf87, buf86, buf1,
buf43, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf88 = buf86
del buf86
triton_poi_fused_add_div_mul_sub_19[grid(256)](arg0_1, buf87, buf43,
buf88, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf89 = buf87
del buf87
buf95 = buf67
del buf67
buf97 = buf65
del buf65
buf98 = reinterpret_tensor(buf97, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf97
buf99 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_20[grid(64)](buf89
, buf98, buf88, buf1, buf43, arg0_1, buf95, buf99, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del buf43
del buf89
buf100 = buf88
del buf88
triton_poi_fused_add_clamp_div_mul_pow_sub_where_21[grid(256)](arg0_1,
buf99, buf1, buf98, buf95, buf100, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del arg0_1
del buf1
del buf95
del buf98
del buf99
buf101 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32
)
triton_poi_fused_div_22[grid(256)](buf100, buf101, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf100
return buf101,
def entmax_bisect(X, alpha=1.5, dim=-1, n_iter=50, ensure_sum_one=True):
"""alpha-entmax: normalizing sparse transform (a la softmax).
Solves the optimization problem:
max_p <x, p> - H_a(p) s.t. p >= 0, sum(p) == 1.
where H_a(p) is the Tsallis alpha-entropy with custom alpha >= 1,
using a bisection (root finding, binary search) algorithm.
This function is differentiable with respect to both X and alpha.
Parameters
----------
X : torch.Tensor
The input tensor.
alpha : float or torch.Tensor
Tensor of alpha parameters (> 1) to use. If scalar
or python float, the same value is used for all rows, otherwise,
it must have shape (or be expandable to)
alpha.shape[j] == (X.shape[j] if j != dim else 1)
A value of alpha=2 corresponds to sparsemax, and alpha=1 would in theory recover
softmax. For numeric reasons, this algorithm does not work with `alpha=1`: if you
want softmax, we recommend `torch.nn.softmax`.
dim : int
The dimension along which to apply alpha-entmax.
n_iter : int
Number of bisection iterations. For float32, 24 iterations should
suffice for machine precision.
ensure_sum_one : bool,
Whether to divide the result by its sum. If false, the result might
sum to close but not exactly 1, which might cause downstream problems.
Returns
-------
P : torch tensor, same shape as X
The projection result, such that P.sum(dim=dim) == 1 elementwise.
"""
return EntmaxBisectFunction.apply(X, alpha, dim, n_iter, ensure_sum_one)
class EntmaxBisectFunction(Function):
@classmethod
def _gp(cls, x, alpha):
return x ** (alpha - 1)
@classmethod
def _gp_inv(cls, y, alpha):
return y ** (1 / (alpha - 1))
@classmethod
def _p(cls, X, alpha):
return cls._gp_inv(torch.clamp(X, min=0), alpha)
@classmethod
def forward(cls, ctx, X, alpha=1.5, dim=-1, n_iter=50, ensure_sum_one=True
):
if not isinstance(alpha, torch.Tensor):
alpha = torch.tensor(alpha, dtype=X.dtype, device=X.device)
alpha_shape = list(X.shape)
alpha_shape[dim] = 1
alpha = alpha.expand(*alpha_shape)
ctx.alpha = alpha
ctx.dim = dim
d = X.shape[dim]
max_val, _ = X.max(dim=dim, keepdim=True)
X = X * (alpha - 1)
max_val = max_val * (alpha - 1)
tau_lo = max_val - cls._gp(1, alpha)
tau_hi = max_val - cls._gp(1 / d, alpha)
f_lo = cls._p(X - tau_lo, alpha).sum(dim) - 1
dm = tau_hi - tau_lo
for it in range(n_iter):
dm /= 2
tau_m = tau_lo + dm
p_m = cls._p(X - tau_m, alpha)
f_m = p_m.sum(dim) - 1
mask = (f_m * f_lo >= 0).unsqueeze(dim)
tau_lo = torch.where(mask, tau_m, tau_lo)
if ensure_sum_one:
p_m /= p_m.sum(dim=dim).unsqueeze(dim=dim)
ctx.save_for_backward(p_m)
return p_m
@classmethod
def backward(cls, ctx, dY):
Y, = ctx.saved_tensors
gppr = torch.where(Y > 0, Y ** (2 - ctx.alpha), Y.new_zeros(1))
dX = dY * gppr
q = dX.sum(ctx.dim) / gppr.sum(ctx.dim)
q = q.unsqueeze(ctx.dim)
dX -= q * gppr
d_alpha = None
if ctx.needs_input_grad[1]:
S = torch.where(Y > 0, Y * torch.log(Y), Y.new_zeros(1))
ent = S.sum(ctx.dim).unsqueeze(ctx.dim)
Y_skewed = gppr / gppr.sum(ctx.dim).unsqueeze(ctx.dim)
d_alpha = dY * (Y - Y_skewed) / (ctx.alpha - 1) ** 2
d_alpha -= dY * (S - Y_skewed * ent) / (ctx.alpha - 1)
d_alpha = d_alpha.sum(ctx.dim).unsqueeze(ctx.dim)
return dX, d_alpha, None, None, None
class EntmaxBisectNew(nn.Module):
def __init__(self, alpha=1.5, dim=-1, n_iter=50):
"""alpha-entmax: normalizing sparse map (a la softmax) via bisection.
Solves the optimization problem:
max_p <x, p> - H_a(p) s.t. p >= 0, sum(p) == 1.
where H_a(p) is the Tsallis alpha-entropy with custom alpha >= 1,
using a bisection (root finding, binary search) algorithm.
Parameters
----------
alpha : float or torch.Tensor
Tensor of alpha parameters (> 1) to use. If scalar
or python float, the same value is used for all rows, otherwise,
it must have shape (or be expandable to)
alpha.shape[j] == (X.shape[j] if j != dim else 1)
A value of alpha=2 corresponds to sparsemax; and alpha=1 would in theory recover
softmax. For numeric reasons, this algorithm does not work with `alpha=1`; if you
want softmax, we recommend `torch.nn.softmax`.
dim : int
The dimension along which to apply alpha-entmax.
n_iter : int
Number of bisection iterations. For float32, 24 iterations should
suffice for machine precision.
"""
self.dim = dim
self.n_iter = n_iter
self.alpha = alpha
super().__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
cifkao/entmax
|
EntmaxBisect
| false
| 15,222
|
[
"MIT"
] | 298
|
f18bab9318f9d2471a36545ee0b4c97be6d48a87
|
https://github.com/cifkao/entmax/tree/f18bab9318f9d2471a36545ee0b4c97be6d48a87
|
Net
|
import torch
from torch.nn import functional as F
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=3)
self.conv2 = nn.Conv2d(10, 20, kernel_size=3)
self.conv3 = nn.Conv2d(20, 50, kernel_size=3)
self.conv4 = nn.Conv2d(50, 2, kernel_size=1, bias=False, padding=0,
stride=1)
self.max_pool2d = nn.MaxPool2d((4, 4))
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2(x), 2))
x = self.conv3(x)
x = self.conv4(x)
x = self.max_pool2d(x)
x = self.softmax(x)
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 153760
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3844 % 10
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_1(in_ptr0, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 38440
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 31
x3 = xindex // 31
x2 = xindex // 9610
x4 = xindex % 9610
x5 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 124 * x3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 124 * x3), xmask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (62 + 2 * x0 + 124 * x3), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (63 + 2 * x0 + 124 * x3), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tl.store(out_ptr0 + (x4 + 9728 * x2), tmp15, xmask)
tl.store(out_ptr1 + x5, tmp18, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 67280
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 841 % 20
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_3(in_ptr0, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 15680
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 14
x1 = xindex // 14 % 14
x4 = xindex // 196
x3 = xindex // 3920
x5 = xindex % 3920
x6 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 58 * x1 + 841 * x4), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 58 * x1 + 841 * x4), xmask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (29 + 2 * x0 + 58 * x1 + 841 * x4), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (30 + 2 * x0 + 58 * x1 + 841 * x4), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tl.store(out_ptr0 + (x5 + 3968 * x3), tmp15, xmask)
tl.store(out_ptr1 + x6, tmp18, xmask)
@triton.jit
def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 28800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 144 % 50
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 72
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x1 = xindex // 3
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4 * x0 + 48 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0 + 48 * x1), xmask, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0 + 48 * x1), xmask, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0 + 48 * x1), xmask, eviction_policy
='evict_last')
tmp7 = tl.load(in_ptr0 + (12 + 4 * x0 + 48 * x1), xmask,
eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (13 + 4 * x0 + 48 * x1), xmask,
eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (14 + 4 * x0 + 48 * x1), xmask,
eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (15 + 4 * x0 + 48 * x1), xmask,
eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (24 + 4 * x0 + 48 * x1), xmask,
eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (25 + 4 * x0 + 48 * x1), xmask,
eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (26 + 4 * x0 + 48 * x1), xmask,
eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (27 + 4 * x0 + 48 * x1), xmask,
eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (36 + 4 * x0 + 48 * x1), xmask,
eviction_policy='evict_last')
tmp25 = tl.load(in_ptr0 + (37 + 4 * x0 + 48 * x1), xmask,
eviction_policy='evict_last')
tmp27 = tl.load(in_ptr0 + (38 + 4 * x0 + 48 * x1), xmask,
eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (39 + 4 * x0 + 48 * x1), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp20 = triton_helpers.maximum(tmp19, tmp18)
tmp22 = triton_helpers.maximum(tmp21, tmp20)
tmp24 = triton_helpers.maximum(tmp23, tmp22)
tmp26 = triton_helpers.maximum(tmp25, tmp24)
tmp28 = triton_helpers.maximum(tmp27, tmp26)
tmp30 = triton_helpers.maximum(tmp29, tmp28)
tmp31 = tmp1 > tmp0
tmp32 = tl.full([1], 1, tl.int8)
tmp33 = tl.full([1], 0, tl.int8)
tmp34 = tl.where(tmp31, tmp32, tmp33)
tmp35 = tmp3 > tmp2
tmp36 = tl.full([1], 2, tl.int8)
tmp37 = tl.where(tmp35, tmp36, tmp34)
tmp38 = tmp5 > tmp4
tmp39 = tl.full([1], 3, tl.int8)
tmp40 = tl.where(tmp38, tmp39, tmp37)
tmp41 = tmp7 > tmp6
tmp42 = tl.full([1], 4, tl.int8)
tmp43 = tl.where(tmp41, tmp42, tmp40)
tmp44 = tmp9 > tmp8
tmp45 = tl.full([1], 5, tl.int8)
tmp46 = tl.where(tmp44, tmp45, tmp43)
tmp47 = tmp11 > tmp10
tmp48 = tl.full([1], 6, tl.int8)
tmp49 = tl.where(tmp47, tmp48, tmp46)
tmp50 = tmp13 > tmp12
tmp51 = tl.full([1], 7, tl.int8)
tmp52 = tl.where(tmp50, tmp51, tmp49)
tmp53 = tmp15 > tmp14
tmp54 = tl.full([1], 8, tl.int8)
tmp55 = tl.where(tmp53, tmp54, tmp52)
tmp56 = tmp17 > tmp16
tmp57 = tl.full([1], 9, tl.int8)
tmp58 = tl.where(tmp56, tmp57, tmp55)
tmp59 = tmp19 > tmp18
tmp60 = tl.full([1], 10, tl.int8)
tmp61 = tl.where(tmp59, tmp60, tmp58)
tmp62 = tmp21 > tmp20
tmp63 = tl.full([1], 11, tl.int8)
tmp64 = tl.where(tmp62, tmp63, tmp61)
tmp65 = tmp23 > tmp22
tmp66 = tl.full([1], 12, tl.int8)
tmp67 = tl.where(tmp65, tmp66, tmp64)
tmp68 = tmp25 > tmp24
tmp69 = tl.full([1], 13, tl.int8)
tmp70 = tl.where(tmp68, tmp69, tmp67)
tmp71 = tmp27 > tmp26
tmp72 = tl.full([1], 14, tl.int8)
tmp73 = tl.where(tmp71, tmp72, tmp70)
tmp74 = tmp29 > tmp28
tmp75 = tl.full([1], 15, tl.int8)
tmp76 = tl.where(tmp74, tmp75, tmp73)
tl.store(out_ptr0 + x2, tmp30, xmask)
tl.store(out_ptr1 + x2, tmp76, xmask)
@triton.jit
def triton_poi_fused__softmax_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 72
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 9
x2 = xindex // 18
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 18 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (9 + x0 + 18 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp1 - tmp3
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp2 - tmp3
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tmp5 / tmp10
tl.store(out_ptr0 + x3, tmp11, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (10, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (10,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (20, 10, 3, 3), (90, 9, 3, 1))
assert_size_stride(primals_5, (20,), (1,))
assert_size_stride(primals_6, (50, 20, 3, 3), (180, 9, 3, 1))
assert_size_stride(primals_7, (50,), (1,))
assert_size_stride(primals_8, (2, 50, 1, 1), (50, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 10, 62, 62), (38440, 3844, 62, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(153760)](buf1, primals_2,
153760, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 10, 31, 31), (9728, 961, 31, 1),
torch.int8)
buf3 = empty_strided_cuda((4, 10, 31, 31), (9610, 961, 31, 1),
torch.float32)
triton_poi_fused_max_pool2d_with_indices_relu_1[grid(38440)](buf1,
buf2, buf3, 38440, XBLOCK=512, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 20, 29, 29), (16820, 841, 29, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_2[grid(67280)](buf5, primals_5, 67280,
XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 20, 14, 14), (3968, 196, 14, 1),
torch.int8)
buf7 = empty_strided_cuda((4, 20, 14, 14), (3920, 196, 14, 1),
torch.float32)
triton_poi_fused_max_pool2d_with_indices_relu_3[grid(15680)](buf5,
buf6, buf7, 15680, XBLOCK=256, num_warps=4, num_stages=1)
buf8 = extern_kernels.convolution(buf7, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 50, 12, 12), (7200, 144, 12, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_4[grid(28800)](buf9, primals_7, 28800,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf10 = extern_kernels.convolution(buf9, primals_8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 2, 12, 12), (288, 144, 12, 1))
buf11 = empty_strided_cuda((4, 2, 3, 3), (18, 9, 3, 1), torch.float32)
buf12 = empty_strided_cuda((4, 2, 3, 3), (18, 9, 3, 1), torch.int8)
triton_poi_fused_max_pool2d_with_indices_5[grid(72)](buf10, buf11,
buf12, 72, XBLOCK=128, num_warps=4, num_stages=1)
buf13 = empty_strided_cuda((4, 2, 3, 3), (18, 9, 3, 1), torch.float32)
triton_poi_fused__softmax_6[grid(72)](buf11, buf13, 72, XBLOCK=128,
num_warps=4, num_stages=1)
del buf11
return (buf13, primals_1, primals_3, primals_4, primals_6, primals_8,
buf1, buf2, buf3, buf5, buf6, buf7, buf9, buf10, buf12, buf13)
class NetNew(nn.Module):
def __init__(self):
super(NetNew, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=3)
self.conv2 = nn.Conv2d(10, 20, kernel_size=3)
self.conv3 = nn.Conv2d(20, 50, kernel_size=3)
self.conv4 = nn.Conv2d(50, 2, kernel_size=1, bias=False, padding=0,
stride=1)
self.max_pool2d = nn.MaxPool2d((4, 4))
self.softmax = nn.Softmax(dim=1)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
|
dqawami/openvino_training_extensions
|
Net
| false
| 15,223
|
[
"Apache-2.0"
] | 256
|
dddda1dfd651eaae2d59cecda84275b1b03bd0ad
|
https://github.com/dqawami/openvino_training_extensions/tree/dddda1dfd651eaae2d59cecda84275b1b03bd0ad
|
EquivariantLayer
|
import math
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
import torch.nn.functional as F
from torch.nn.modules.batchnorm import _BatchNorm
class MyBatchNorm1d(_BatchNorm):
"""Applies Batch Normalization over a 2d or 3d input that is seen as a
mini-batch.
.. math::
y = \\frac{x - mean[x]}{ \\sqrt{Var[x] + \\epsilon}} * gamma + beta
The mean and standard-deviation are calculated per-dimension over
the mini-batches and gamma and beta are learnable parameter vectors
of size C (where C is the input size).
During training, this layer keeps a running estimate of its computed mean
and variance. The running sum is kept with a default momentum of 0.1.
During evaluation, this running mean/variance is used for normalization.
Because the BatchNorm is done over the `C` dimension, computing statistics
on `(N, L)` slices, it's common terminology to call this Temporal BatchNorm
Args:
num_features: num_features from an expected input of size
`batch_size x num_features [x width]`
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Default: 0.1
affine: a boolean value that when set to ``True``, gives the layer learnable
affine parameters. Default: ``True``
Shape:
- Input: :math:`(N, C)` or :math:`(N, C, L)`
- Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input)
"""
def __init__(self, num_features, eps=1e-05, momentum=0.1, affine=True,
momentum_decay_step=None, momentum_decay=1):
super(MyBatchNorm1d, self).__init__(num_features, eps, momentum, affine
)
self.momentum_decay_step = momentum_decay_step
self.momentum_decay = momentum_decay
self.momentum_original = self.momentum
def _check_input_dim(self, input):
if input.dim() != 2 and input.dim() != 3:
raise ValueError('expected 2D or 3D input (got {}D input)'.
format(input.dim()))
super(MyBatchNorm1d, self)._check_input_dim(input)
def forward(self, input, epoch=None):
if (epoch is not None and epoch >= 1 and self.momentum_decay_step
is not None and self.momentum_decay_step > 0):
self.momentum = self.momentum_original * self.momentum_decay ** (
epoch // self.momentum_decay_step)
if self.momentum < 0.01:
self.momentum = 0.01
return F.batch_norm(input, self.running_mean, self.running_var,
self.weight, self.bias, self.training, self.momentum, self.eps)
class Swish(nn.Module):
def __init__(self):
super(Swish, self).__init__()
def forward(self, x):
return 1.78718727865 * (x * torch.sigmoid(x) - 0.20662096414)
class EquivariantLayer(nn.Module):
def __init__(self, num_in_channels, num_out_channels, activation='relu',
normalization=None, momentum=0.1, bn_momentum_decay_step=None,
bn_momentum_decay=1):
super(EquivariantLayer, self).__init__()
self.num_in_channels = num_in_channels
self.num_out_channels = num_out_channels
self.activation = activation
self.normalization = normalization
self.conv = nn.Conv1d(self.num_in_channels, self.num_out_channels,
kernel_size=1, stride=1, padding=0)
if 'batch' == self.normalization:
self.norm = MyBatchNorm1d(self.num_out_channels, momentum=
momentum, affine=True, momentum_decay_step=
bn_momentum_decay_step, momentum_decay=bn_momentum_decay)
elif 'instance' == self.normalization:
self.norm = nn.InstanceNorm1d(self.num_out_channels, momentum=
momentum, affine=True)
if 'relu' == self.activation:
self.act = nn.ReLU()
elif 'elu' == self.activation:
self.act = nn.ELU(alpha=1.0)
elif 'swish' == self.activation:
self.act = Swish()
elif 'leakyrelu' == self.activation:
self.act = nn.LeakyReLU(0.1)
self.weight_init()
def weight_init(self):
for m in self.modules():
if isinstance(m, nn.Conv1d):
n = m.kernel_size[0] * m.in_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
if m.bias is not None:
m.bias.data.fill_(0)
elif isinstance(m, MyBatchNorm1d) or isinstance(m, nn.
InstanceNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x, epoch=None):
y = self.conv(x)
if self.normalization == 'batch':
y = self.norm(y, epoch)
elif self.normalization is not None:
y = self.norm(y)
if self.activation is not None:
y = self.act(y)
return y
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'num_in_channels': 4, 'num_out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import math
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
import torch.nn.functional as F
from torch.nn.modules.batchnorm import _BatchNorm
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1,
4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf0, (1, 4, 4), (16, 4, 1))
buf1 = reinterpret_tensor(buf0, (4, 4), (4, 1), 0)
del buf0
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(16)](buf1,
primals_2, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
return buf1, primals_1, reinterpret_tensor(primals_3, (1, 4, 4), (16, 4,
1), 0), buf2
class MyBatchNorm1d(_BatchNorm):
"""Applies Batch Normalization over a 2d or 3d input that is seen as a
mini-batch.
.. math::
y = \\frac{x - mean[x]}{ \\sqrt{Var[x] + \\epsilon}} * gamma + beta
The mean and standard-deviation are calculated per-dimension over
the mini-batches and gamma and beta are learnable parameter vectors
of size C (where C is the input size).
During training, this layer keeps a running estimate of its computed mean
and variance. The running sum is kept with a default momentum of 0.1.
During evaluation, this running mean/variance is used for normalization.
Because the BatchNorm is done over the `C` dimension, computing statistics
on `(N, L)` slices, it's common terminology to call this Temporal BatchNorm
Args:
num_features: num_features from an expected input of size
`batch_size x num_features [x width]`
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Default: 0.1
affine: a boolean value that when set to ``True``, gives the layer learnable
affine parameters. Default: ``True``
Shape:
- Input: :math:`(N, C)` or :math:`(N, C, L)`
- Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input)
"""
def __init__(self, num_features, eps=1e-05, momentum=0.1, affine=True,
momentum_decay_step=None, momentum_decay=1):
super(MyBatchNorm1d, self).__init__(num_features, eps, momentum, affine
)
self.momentum_decay_step = momentum_decay_step
self.momentum_decay = momentum_decay
self.momentum_original = self.momentum
def _check_input_dim(self, input):
if input.dim() != 2 and input.dim() != 3:
raise ValueError('expected 2D or 3D input (got {}D input)'.
format(input.dim()))
super(MyBatchNorm1d, self)._check_input_dim(input)
def forward(self, input, epoch=None):
if (epoch is not None and epoch >= 1 and self.momentum_decay_step
is not None and self.momentum_decay_step > 0):
self.momentum = self.momentum_original * self.momentum_decay ** (
epoch // self.momentum_decay_step)
if self.momentum < 0.01:
self.momentum = 0.01
return F.batch_norm(input, self.running_mean, self.running_var,
self.weight, self.bias, self.training, self.momentum, self.eps)
class Swish(nn.Module):
def __init__(self):
super(Swish, self).__init__()
def forward(self, x):
return 1.78718727865 * (x * torch.sigmoid(x) - 0.20662096414)
class EquivariantLayerNew(nn.Module):
def __init__(self, num_in_channels, num_out_channels, activation='relu',
normalization=None, momentum=0.1, bn_momentum_decay_step=None,
bn_momentum_decay=1):
super(EquivariantLayerNew, self).__init__()
self.num_in_channels = num_in_channels
self.num_out_channels = num_out_channels
self.activation = activation
self.normalization = normalization
self.conv = nn.Conv1d(self.num_in_channels, self.num_out_channels,
kernel_size=1, stride=1, padding=0)
if 'batch' == self.normalization:
self.norm = MyBatchNorm1d(self.num_out_channels, momentum=
momentum, affine=True, momentum_decay_step=
bn_momentum_decay_step, momentum_decay=bn_momentum_decay)
elif 'instance' == self.normalization:
self.norm = nn.InstanceNorm1d(self.num_out_channels, momentum=
momentum, affine=True)
if 'relu' == self.activation:
self.act = nn.ReLU()
elif 'elu' == self.activation:
self.act = nn.ELU(alpha=1.0)
elif 'swish' == self.activation:
self.act = Swish()
elif 'leakyrelu' == self.activation:
self.act = nn.LeakyReLU(0.1)
self.weight_init()
def weight_init(self):
for m in self.modules():
if isinstance(m, nn.Conv1d):
n = m.kernel_size[0] * m.in_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
if m.bias is not None:
m.bias.data.fill_(0)
elif isinstance(m, MyBatchNorm1d) or isinstance(m, nn.
InstanceNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
doudoulaile/RL-GAN-Net
|
EquivariantLayer
| false
| 15,224
|
[
"MIT"
] | 112
|
9c221223d1878bc24f0f39ad34928c1bb2974ae3
|
https://github.com/doudoulaile/RL-GAN-Net/tree/9c221223d1878bc24f0f39ad34928c1bb2974ae3
|
FAdd
|
import torch
import numpy as np
import torch.nn as nn
class FAdd(nn.Module):
def __init__(self):
super(FAdd, self).__init__()
def forward(self, x, y):
x = x + y + np.float32(0.1)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 + tmp1
tmp3 = 0.10000000149011612
tmp4 = tmp2 + tmp3
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__to_copy_add_0[grid(256)](arg0_1, arg1_1, buf0,
256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class FAddNew(nn.Module):
def __init__(self):
super(FAddNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
dawnclaude/onnx2keras
|
FAdd
| false
| 15,225
|
[
"MIT"
] | 115
|
3d2a47c0a228b91fd434232274e216e491da36e3
|
https://github.com/dawnclaude/onnx2keras/tree/3d2a47c0a228b91fd434232274e216e491da36e3
|
Embedding_Net
|
from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
import torch.nn.functional as F
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
class Embedding_Net(nn.Module):
def __init__(self, opt):
super(Embedding_Net, self).__init__()
self.fc1 = nn.Linear(opt.resSize, opt.embedSize)
self.fc2 = nn.Linear(opt.embedSize, opt.outzSize)
self.lrelu = nn.LeakyReLU(0.2, True)
self.relu = nn.ReLU(True)
self.apply(weights_init)
def forward(self, features):
embedding = self.relu(self.fc1(features))
out_z = F.normalize(self.fc2(embedding), dim=1)
return embedding, out_z
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'opt': _mock_config(resSize=4, embedSize=4, outzSize=4)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_view_0(in_out_ptr0, in_ptr0,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr1 + x4, tmp6, xmask)
@triton.jit
def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * (x1 % 4 // 4) + 64 * ((4 *
(x1 // 4 % 4) + x1 % 4) // 16)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_div_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x3, tmp15, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_view_0[grid(256)](buf1,
primals_2, buf2, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
triton_poi_fused_view_1[grid(256)](buf1, buf3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0)
del buf1
extern_kernels.addmm(primals_5, buf3, reinterpret_tensor(primals_4,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_5
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_div_2[grid(256)](buf4, buf5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
return buf2, buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf3, buf4, primals_4, buf6
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
class Embedding_NetNew(nn.Module):
def __init__(self, opt):
super(Embedding_NetNew, self).__init__()
self.fc1 = nn.Linear(opt.resSize, opt.embedSize)
self.fc2 = nn.Linear(opt.embedSize, opt.outzSize)
self.lrelu = nn.LeakyReLU(0.2, True)
self.relu = nn.ReLU(True)
self.apply(weights_init)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1]
|
Huihui-z/CE-GZSL
|
Embedding_Net
| false
| 15,226
|
[
"MIT"
] | 58
|
7bf5358ac4727ea1dc2dc9dec2f453b014500bd8
|
https://github.com/Huihui-z/CE-GZSL/tree/7bf5358ac4727ea1dc2dc9dec2f453b014500bd8
|
GatedLinearUnit
|
import torch
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
class GatedLinearUnit(nn.Module):
def __init__(self, input_size, output_size, dropout=0):
super().__init__()
self.dropout = nn.Dropout(dropout)
self.w4 = nn.Linear(input_size, output_size)
self.w5 = nn.Linear(input_size, output_size)
self.act = nn.Sigmoid()
def forward(self, x):
x = self.dropout(x)
x = self.act(self.w4(x)) * self.w5(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'output_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64,
4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0[grid(256)](buf0, buf1, buf2, 256,
XBLOCK=256, num_warps=4, num_stages=1)
return buf2, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf0, buf1
class GatedLinearUnitNew(nn.Module):
def __init__(self, input_size, output_size, dropout=0):
super().__init__()
self.dropout = nn.Dropout(dropout)
self.w4 = nn.Linear(input_size, output_size)
self.w5 = nn.Linear(input_size, output_size)
self.act = nn.Sigmoid()
def forward(self, input_0):
primals_2 = self.w4.weight
primals_3 = self.w4.bias
primals_4 = self.w5.weight
primals_5 = self.w5.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
dqawami/openvino_training_extensions
|
GatedLinearUnit
| false
| 15,227
|
[
"Apache-2.0"
] | 256
|
dddda1dfd651eaae2d59cecda84275b1b03bd0ad
|
https://github.com/dqawami/openvino_training_extensions/tree/dddda1dfd651eaae2d59cecda84275b1b03bd0ad
|
Swish
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
class Swish(nn.Module):
def __init__(self):
super(Swish, self).__init__()
def forward(self, x):
return 1.78718727865 * (x * torch.sigmoid(x) - 0.20662096414)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_sigmoid_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = tmp0 * tmp1
tmp3 = 0.20662096414
tmp4 = tmp2 - tmp3
tmp5 = 1.78718727865
tmp6 = tmp4 * tmp5
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sigmoid_sub_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class SwishNew(nn.Module):
def __init__(self):
super(SwishNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
doudoulaile/RL-GAN-Net
|
Swish
| false
| 15,228
|
[
"MIT"
] | 112
|
9c221223d1878bc24f0f39ad34928c1bb2974ae3
|
https://github.com/doudoulaile/RL-GAN-Net/tree/9c221223d1878bc24f0f39ad34928c1bb2974ae3
|
GNNLayer
|
from torch.nn import Module
import torch
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
class GNNLayer(Module):
def __init__(self, in_features, out_features):
super(GNNLayer, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
torch.nn.init.xavier_uniform_(self.weight)
def forward(self, features, adj, active=True):
support = torch.mm(features, self.weight)
output = torch.spmm(adj, support)
if active:
output = F.relu(output)
return output
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch.nn import Module
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_2, primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_3, buf0, out=buf1)
del buf0
buf2 = buf1
del buf1
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(16)](buf2, buf3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
return buf2, buf3, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0
), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0)
class GNNLayerNew(Module):
def __init__(self, in_features, out_features):
super(GNNLayerNew, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
torch.nn.init.xavier_uniform_(self.weight)
def forward(self, input_0, input_1):
primals_1 = self.weight
primals_2 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
|
drzhang3/SDCN
|
GNNLayer
| false
| 15,229
|
[
"Apache-2.0"
] | 146
|
3d11365bcb4af2cbe9625362737f1224aeea3b72
|
https://github.com/drzhang3/SDCN/tree/3d11365bcb4af2cbe9625362737f1224aeea3b72
|
RGBDiff
|
import torch
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
class RGBDiff(nn.Module):
def __init__(self, dim=1):
super().__init__()
self.dim = dim
def forward(self, image):
"""
Args:
image (torch.Tensor): (N x T x C x H x W)
"""
diffs = []
for i in range(1, image.size(self.dim)):
prev = image.index_select(self.dim, image.new_tensor(i - 1,
dtype=torch.long))
current = image.index_select(self.dim, image.new_tensor(i,
dtype=torch.long))
diffs.append(current - prev)
return torch.cat(diffs, dim=self.dim)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 3
x0 = xindex % 16
x2 = xindex // 48
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tmp5 - tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 2, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tmp10 & tmp12
tmp14 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp13 & xmask,
eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp13 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tmp14 - tmp15
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp13, tmp16, tmp17)
tmp19 = tmp0 >= tmp11
tl.full([1], 3, tl.int64)
tmp22 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp19 & xmask,
eviction_policy='evict_last', other=0.0)
tmp23 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp19 & xmask,
eviction_policy='evict_last', other=0.0)
tmp24 = tmp22 - tmp23
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp19, tmp24, tmp25)
tmp27 = tl.where(tmp13, tmp18, tmp26)
tmp28 = tl.where(tmp4, tmp9, tmp27)
tl.store(out_ptr0 + x3, tmp28, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(192)](arg0_1, buf0, 192, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class RGBDiffNew(nn.Module):
def __init__(self, dim=1):
super().__init__()
self.dim = dim
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
dqawami/openvino_training_extensions
|
RGBDiff
| false
| 15,230
|
[
"Apache-2.0"
] | 256
|
dddda1dfd651eaae2d59cecda84275b1b03bd0ad
|
https://github.com/dqawami/openvino_training_extensions/tree/dddda1dfd651eaae2d59cecda84275b1b03bd0ad
|
StddevLayer
|
import torch
from torch import nn
class StddevLayer(nn.Module):
def __init__(self, group_size=4, num_new_features=1):
super().__init__()
self.group_size = 4
self.num_new_features = 1
def forward(self, x):
b, c, h, w = x.shape
group_size = min(self.group_size, b)
y = x.reshape([group_size, -1, self.num_new_features, c // self.
num_new_features, h, w])
y = y - y.mean(0, keepdim=True)
y = (y ** 2).mean(0, keepdim=True)
y = (y + 1e-08) ** 0.5
y = y.mean([3, 4, 5], keepdim=True).squeeze(3)
y = y.expand(group_size, -1, -1, h, w).clone().reshape(b, self.
num_new_features, h, w)
z = torch.cat([x, y], dim=1)
return z
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_cat_mean_pow_sub_0(in_ptr0, out_ptr1, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
r1 = rindex % 16
r2 = rindex // 16
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr0 + (64 + r0), None)
tmp3 = tl.load(in_ptr0 + (128 + r0), None)
tmp5 = tl.load(in_ptr0 + (192 + r0), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-08
tmp22 = tmp20 + tmp21
tmp23 = libdevice.sqrt(tmp22)
tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK])
tmp26 = tl.sum(tmp24, 1)[:, None]
tmp27 = 64.0
tmp28 = tmp26 / tmp27
tl.store(out_ptr1 + tl.broadcast_to(r1 + 80 * r2, [XBLOCK, RBLOCK]),
tmp28, None)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
x1 = xindex // 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tl.store(out_ptr0 + (x0 + 80 * x1), tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf3 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32)
buf2 = reinterpret_tensor(buf3, (4, 1, 4, 4), (80, 16, 4, 1), 64)
get_raw_stream(0)
triton_per_fused_add_cat_mean_pow_sub_0[grid(1)](arg0_1, buf2, 1,
64, XBLOCK=1, num_warps=2, num_stages=1)
buf1 = reinterpret_tensor(buf3, (4, 4, 4, 4), (80, 16, 4, 1), 0)
triton_poi_fused_cat_1[grid(256)](arg0_1, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf3,
class StddevLayerNew(nn.Module):
def __init__(self, group_size=4, num_new_features=1):
super().__init__()
self.group_size = 4
self.num_new_features = 1
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
dubtor/EditGAN-Robert
|
StddevLayer
| false
| 15,231
|
[
"BSD-2-Clause"
] | 110
|
8e6d80e7647c3536827f11cf0a9abf51c42794b2
|
https://github.com/dubtor/EditGAN-Robert/tree/8e6d80e7647c3536827f11cf0a9abf51c42794b2
|
Actor
|
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Normal
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1.0 / np.sqrt(fan_in)
return -lim, lim
class Actor(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, device, hidden_size=
32, init_w=0.003, log_std_min=-20, log_std_max=2):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
fc2_units (int): Number of nodes in second hidden layer
"""
super(Actor, self).__init__()
self.seed = torch.manual_seed(seed)
self.device = device
self.log_std_min = log_std_min
self.log_std_max = log_std_max
self.fc1 = nn.Linear(state_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.mu = nn.Linear(hidden_size, action_size)
self.log_std_linear = nn.Linear(hidden_size, action_size)
def reset_parameters(self):
self.fc1.weight.data.uniform_(*hidden_init(self.fc1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.mu.weight.data.uniform_(-init_w, init_w)
self.log_std_linear.weight.data.uniform_(-init_w, init_w)
def forward(self, state):
x = F.relu(self.fc1(state))
x = F.relu(self.fc2(x))
mu = self.mu(x)
log_std = self.log_std_linear(x)
log_std = torch.clamp(log_std, self.log_std_min, self.log_std_max)
return mu, log_std
def evaluate(self, state, epsilon=1e-06):
mu, log_std = self.forward(state)
std = log_std.exp()
dist = Normal(mu, std)
e = dist.rsample()
action = torch.tanh(e)
log_prob = (dist.log_prob(e) - torch.log(1 - action.pow(2) + epsilon)
).sum(1, keepdim=True)
return action, log_prob
def get_action(self, state):
"""
returns the action based on a squashed gaussian policy. That means the samples are obtained according to:
a(s,e)= tanh(mu(s)+sigma(s)+e)
"""
mu, log_std = self.forward(state)
std = log_std.exp()
dist = Normal(mu, std)
e = dist.rsample()
action = torch.tanh(e)
return action.detach().cpu()
def get_det_action(self, state):
mu, _log_std = self.forward(state)
return torch.tanh(mu).detach().cpu()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_size': 4, 'action_size': 4, 'seed': 4, 'device': 0}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import numpy as np
import torch.nn as nn
from torch.distributions import Normal
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_clamp_ge_le_logical_and_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = -20.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 2.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = tmp2 >= tmp3
tmp8 = tmp2 <= tmp5
tmp9 = tmp7 & tmp8
tl.store(out_ptr0 + x2, tmp6, xmask)
tl.store(out_ptr1 + x2, tmp9, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (32, 4), (4, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (32, 32), (32, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (4, 32), (32, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 32), (32, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 32), (32, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 32), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 32), (512, 128, 32, 1), 0)
del buf0
buf9 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(2048)](buf1,
primals_2, buf9, 2048, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 32), (32, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 32), (32, 1), 0),
reinterpret_tensor(primals_4, (32, 32), (1, 32), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 32), (512, 128, 32, 1), 0)
del buf2
buf8 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(2048)](buf3,
primals_5, buf8, 2048, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 32),
(32, 1), 0), reinterpret_tensor(primals_6, (32, 4), (1, 32), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 32), (32, 1), 0),
reinterpret_tensor(primals_8, (32, 4), (1, 32), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_clamp_ge_le_logical_and_1[grid(256)](buf5,
primals_9, buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf5
del primals_9
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 32), (32, 1), 0), reinterpret_tensor(
buf3, (64, 32), (32, 1), 0
), buf7, primals_8, primals_6, buf8, primals_4, buf9
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1.0 / np.sqrt(fan_in)
return -lim, lim
class ActorNew(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, device, hidden_size=
32, init_w=0.003, log_std_min=-20, log_std_max=2):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
fc2_units (int): Number of nodes in second hidden layer
"""
super(ActorNew, self).__init__()
self.seed = torch.manual_seed(seed)
self.device = device
self.log_std_min = log_std_min
self.log_std_max = log_std_max
self.fc1 = nn.Linear(state_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.mu = nn.Linear(hidden_size, action_size)
self.log_std_linear = nn.Linear(hidden_size, action_size)
def reset_parameters(self):
self.fc1.weight.data.uniform_(*hidden_init(self.fc1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.mu.weight.data.uniform_(-init_w, init_w)
self.log_std_linear.weight.data.uniform_(-init_w, init_w)
def evaluate(self, state, epsilon=1e-06):
mu, log_std = self.forward(state)
std = log_std.exp()
dist = Normal(mu, std)
e = dist.rsample()
action = torch.tanh(e)
log_prob = (dist.log_prob(e) - torch.log(1 - action.pow(2) + epsilon)
).sum(1, keepdim=True)
return action, log_prob
def get_action(self, state):
"""
returns the action based on a squashed gaussian policy. That means the samples are obtained according to:
a(s,e)= tanh(mu(s)+sigma(s)+e)
"""
mu, log_std = self.forward(state)
std = log_std.exp()
dist = Normal(mu, std)
e = dist.rsample()
action = torch.tanh(e)
return action.detach().cpu()
def get_det_action(self, state):
mu, _log_std = self.forward(state)
return torch.tanh(mu).detach().cpu()
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.mu.weight
primals_7 = self.mu.bias
primals_8 = self.log_std_linear.weight
primals_9 = self.log_std_linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0], output[1]
|
drib861204/Soft-Actor-Critic-and-Extensions
|
Actor
| false
| 15,232
|
[
"MIT"
] | 143
|
3075df7430c1c49177b3798d753a9e3f6226672e
|
https://github.com/drib861204/Soft-Actor-Critic-and-Extensions/tree/3075df7430c1c49177b3798d753a9e3f6226672e
|
PositionWiseFeedForward
|
import torch
from torch.nn import functional as F
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
class GatedLinearUnit(nn.Module):
def __init__(self, input_size, output_size, dropout=0):
super().__init__()
self.dropout = nn.Dropout(dropout)
self.w4 = nn.Linear(input_size, output_size)
self.w5 = nn.Linear(input_size, output_size)
self.act = nn.Sigmoid()
def forward(self, x):
x = self.dropout(x)
x = self.act(self.w4(x)) * self.w5(x)
return x
class GateAddNorm(nn.Module):
def __init__(self, input_size, output_size, dropout):
super().__init__()
self.glu = GatedLinearUnit(input_size, output_size, dropout)
self.norm = nn.LayerNorm(output_size)
def forward(self, x, skip):
return self.norm(self.glu(x) + skip)
class GatedResidualNetwork(nn.Module):
def __init__(self, input_size, hidden_size, output_size, context_size=
None, dropout=0):
super().__init__()
self.w1 = nn.Linear(hidden_size, hidden_size)
self.w2 = nn.Linear(input_size, hidden_size)
self.w3 = None if context_size is None else nn.Linear(context_size,
hidden_size, bias=False)
self.glu = GatedLinearUnit(hidden_size, output_size, dropout)
self.layer_norm = nn.LayerNorm(output_size)
self.residual = nn.Sequential(
) if input_size == output_size else nn.Linear(input_size,
output_size)
def forward(self, a, c=None):
if c is not None:
n2 = F.elu(self.w2(a) + self.w3(c))
else:
n2 = F.elu(self.w2(a))
n1 = self.w1(n2)
grn = self.layer_norm(self.residual(a) + self.glu(n1))
return grn
class PositionWiseFeedForward(nn.Module):
def __init__(self, input_size, output_size, dropout):
super().__init__()
self.grn = GatedResidualNetwork(input_size=input_size, hidden_size=
input_size, output_size=output_size, dropout=dropout)
self.gate_add_norm = GateAddNorm(input_size=input_size, output_size
=output_size, dropout=dropout)
def forward(self, x, skip):
out = self.grn(x)
out = self.gate_add_norm(out, skip)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'output_size': 4, 'dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch.nn import functional as F
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tl.store(out_ptr0 + x0, tmp7, xmask)
@triton.jit
def triton_poi_fused_add_mul_native_layer_norm_sigmoid_1(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp23 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tl.sigmoid(tmp1)
tmp4 = tmp2 * tmp3
tmp5 = tmp0 + tmp4
tmp8 = tl.sigmoid(tmp7)
tmp10 = tmp8 * tmp9
tmp11 = tmp6 + tmp10
tmp12 = tmp5 + tmp11
tmp15 = tl.sigmoid(tmp14)
tmp17 = tmp15 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp12 + tmp18
tmp22 = tl.sigmoid(tmp21)
tmp24 = tmp22 * tmp23
tmp25 = tmp20 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + x0, tmp28, xmask)
tl.store(out_ptr1 + x0, tmp40, xmask)
@triton.jit
def triton_poi_fused_add_mul_native_layer_norm_sigmoid_2(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x2, xmask)
tmp6 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp4 = tmp2 * tmp3
tmp5 = tmp0 + tmp4
tmp7 = tmp5 - tmp6
tmp9 = 1e-05
tmp10 = tmp8 + tmp9
tmp11 = libdevice.rsqrt(tmp10)
tmp12 = tmp7 * tmp11
tmp14 = tmp12 * tmp13
tmp16 = tmp14 + tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_add_mul_native_layer_norm_sigmoid_3(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp24 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp5 = tmp3 + tmp4
tmp7 = tl.sigmoid(tmp6)
tmp9 = tmp7 * tmp8
tmp11 = tmp9 + tmp10
tmp12 = tmp5 + tmp11
tmp14 = tl.sigmoid(tmp13)
tmp16 = tmp14 * tmp15
tmp18 = tmp16 + tmp17
tmp19 = tmp12 + tmp18
tmp21 = tl.sigmoid(tmp20)
tmp23 = tmp21 * tmp22
tmp25 = tmp23 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + x0, tmp28, xmask)
tl.store(out_ptr1 + x0, tmp40, xmask)
@triton.jit
def triton_poi_fused_add_mul_native_layer_norm_sigmoid_4(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x2, xmask)
tmp6 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 - tmp6
tmp9 = 1e-05
tmp10 = tmp8 + tmp9
tmp11 = libdevice.rsqrt(tmp10)
tmp12 = tmp7 * tmp11
tmp14 = tmp12 * tmp13
tmp16 = tmp14 + tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17, primals_18
) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4, 4), (4, 1))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (4, 4), (4, 1))
assert_size_stride(primals_15, (4,), (1,))
assert_size_stride(primals_16, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_17, (4,), (1,))
assert_size_stride(primals_18, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_elu_0[grid(256)](buf0, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, buf2, reinterpret_tensor(primals_6,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_7
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, buf2, reinterpret_tensor(primals_8,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_9
buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused_add_mul_native_layer_norm_sigmoid_1[grid(64)](
primals_3, buf3, buf4, buf5, buf6, 64, XBLOCK=64, num_warps=1,
num_stages=1)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_native_layer_norm_sigmoid_2[grid(256)](
primals_3, buf3, buf4, buf5, buf6, primals_10, primals_11, buf7,
256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_11
buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_13, reinterpret_tensor(buf7, (64, 4),
(4, 1), 0), reinterpret_tensor(primals_12, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf8)
del primals_13
buf9 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_15, reinterpret_tensor(buf7, (64, 4),
(4, 1), 0), reinterpret_tensor(primals_14, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf9)
del primals_15
buf10 = buf6
del buf6
buf11 = buf5
del buf5
triton_poi_fused_add_mul_native_layer_norm_sigmoid_3[grid(64)](buf8,
buf9, primals_16, buf10, buf11, 64, XBLOCK=64, num_warps=1,
num_stages=1)
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_native_layer_norm_sigmoid_4[grid(256)](buf8,
buf9, primals_16, buf10, buf11, primals_17, primals_18, buf12,
256, XBLOCK=128, num_warps=4, num_stages=1)
del buf10
del buf11
del primals_18
return (buf12, primals_3, primals_10, primals_16, primals_17, buf0,
reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf2, buf3, buf4,
reinterpret_tensor(buf7, (64, 4), (4, 1), 0), buf8, buf9,
primals_14, primals_12, primals_8, primals_6, primals_4)
class GatedLinearUnit(nn.Module):
def __init__(self, input_size, output_size, dropout=0):
super().__init__()
self.dropout = nn.Dropout(dropout)
self.w4 = nn.Linear(input_size, output_size)
self.w5 = nn.Linear(input_size, output_size)
self.act = nn.Sigmoid()
def forward(self, x):
x = self.dropout(x)
x = self.act(self.w4(x)) * self.w5(x)
return x
class GateAddNorm(nn.Module):
def __init__(self, input_size, output_size, dropout):
super().__init__()
self.glu = GatedLinearUnit(input_size, output_size, dropout)
self.norm = nn.LayerNorm(output_size)
def forward(self, x, skip):
return self.norm(self.glu(x) + skip)
class GatedResidualNetwork(nn.Module):
def __init__(self, input_size, hidden_size, output_size, context_size=
None, dropout=0):
super().__init__()
self.w1 = nn.Linear(hidden_size, hidden_size)
self.w2 = nn.Linear(input_size, hidden_size)
self.w3 = None if context_size is None else nn.Linear(context_size,
hidden_size, bias=False)
self.glu = GatedLinearUnit(hidden_size, output_size, dropout)
self.layer_norm = nn.LayerNorm(output_size)
self.residual = nn.Sequential(
) if input_size == output_size else nn.Linear(input_size,
output_size)
def forward(self, a, c=None):
if c is not None:
n2 = F.elu(self.w2(a) + self.w3(c))
else:
n2 = F.elu(self.w2(a))
n1 = self.w1(n2)
grn = self.layer_norm(self.residual(a) + self.glu(n1))
return grn
class PositionWiseFeedForwardNew(nn.Module):
def __init__(self, input_size, output_size, dropout):
super().__init__()
self.grn = GatedResidualNetwork(input_size=input_size, hidden_size=
input_size, output_size=output_size, dropout=dropout)
self.gate_add_norm = GateAddNorm(input_size=input_size, output_size
=output_size, dropout=dropout)
def forward(self, input_0, input_1):
primals_1 = self.grn.w1.weight
primals_2 = self.grn.w1.bias
primals_4 = self.grn.w2.weight
primals_5 = self.grn.w2.bias
primals_6 = self.grn.glu.w4.weight
primals_7 = self.grn.glu.w4.bias
primals_8 = self.grn.glu.w5.weight
primals_9 = self.grn.glu.w5.bias
primals_10 = self.grn.layer_norm.weight
primals_11 = self.grn.layer_norm.bias
primals_12 = self.gate_add_norm.glu.w4.weight
primals_13 = self.gate_add_norm.glu.w4.bias
primals_14 = self.gate_add_norm.glu.w5.weight
primals_15 = self.gate_add_norm.glu.w5.bias
primals_17 = self.gate_add_norm.norm.weight
primals_18 = self.gate_add_norm.norm.bias
primals_3 = input_0
primals_16 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18])
return output[0]
|
dqawami/openvino_training_extensions
|
PositionWiseFeedForward
| false
| 15,233
|
[
"Apache-2.0"
] | 256
|
dddda1dfd651eaae2d59cecda84275b1b03bd0ad
|
https://github.com/dqawami/openvino_training_extensions/tree/dddda1dfd651eaae2d59cecda84275b1b03bd0ad
|
PositionwiseFeedForward
|
import torch
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
class Identity(nn.Module):
def forward(self, input_):
return input_
class LayerNormalization(nn.Module):
""" Layer normalization module """
def __init__(self, d_hid, eps=0.001):
super(LayerNormalization, self).__init__()
self.eps = eps
self.a_2 = nn.Parameter(torch.ones(d_hid), requires_grad=True)
self.b_2 = nn.Parameter(torch.zeros(d_hid), requires_grad=True)
def forward(self, z):
if z.size(1) == 1:
return z
mu = torch.mean(z, keepdim=True, dim=-1)
sigma = torch.std(z, keepdim=True, dim=-1)
ln_out = (z - mu.expand_as(z)) / (sigma.expand_as(z) + self.eps)
ln_out = ln_out * self.a_2.expand_as(ln_out) + self.b_2.expand_as(
ln_out)
return ln_out
class PositionwiseFeedForward(nn.Module):
""" A two-feed-forward-layer module """
def __init__(self, d_hid, d_inner_hid, dropout=0.1, layer_norm=True):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Conv1d(d_hid, d_inner_hid, 1)
self.w_2 = nn.Conv1d(d_inner_hid, d_hid, 1)
self.layer_norm = LayerNormalization(d_hid
) if layer_norm else Identity()
self.dropout = nn.Dropout(dropout)
self.relu = nn.ReLU()
def forward(self, x):
residual = x
output = self.relu(self.w_1(x.transpose(1, 2)))
output = self.w_2(output).transpose(2, 1)
output = self.dropout(output)
return self.layer_norm(output + residual)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'d_hid': 4, 'd_inner_hid': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_mean_std_3(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp4 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp8 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = 3.0
tmp29 = tmp27 / tmp28
tl.store(in_out_ptr0 + x2, tmp29, xmask)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_add_div_mul_sub_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, in_ptr5, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr,
XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask & ymask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x2 + 4 * y1), xmask & ymask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr3 + (x2 + 4 * y1), xmask & ymask, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr4 + y0, ymask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = libdevice.sqrt(tmp5)
tmp7 = 0.001
tmp8 = tmp6 + tmp7
tmp9 = tmp4 / tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2 + 4 * y3), tmp13, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4), (16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_relu_1[grid(64)](buf2, primals_3, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4), (16, 4, 1))
buf4 = buf3
del buf3
triton_poi_fused_convolution_2[grid(64)](buf4, primals_5, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf6 = buf5
del buf5
buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_mean_std_3[grid(16)](buf6, buf4, primals_1,
buf7, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf8 = reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0)
del buf0
triton_poi_fused_add_div_mul_sub_4[grid(16, 4)](buf4, primals_1,
buf7, buf6, primals_6, primals_7, buf8, 16, 4, XBLOCK=4, YBLOCK
=16, num_warps=1, num_stages=1)
del buf6
del buf7
del primals_7
return buf8, primals_1, primals_2, primals_4, primals_6, buf2, buf4
class Identity(nn.Module):
def forward(self, input_):
return input_
class LayerNormalization(nn.Module):
""" Layer normalization module """
def __init__(self, d_hid, eps=0.001):
super(LayerNormalization, self).__init__()
self.eps = eps
self.a_2 = nn.Parameter(torch.ones(d_hid), requires_grad=True)
self.b_2 = nn.Parameter(torch.zeros(d_hid), requires_grad=True)
def forward(self, z):
if z.size(1) == 1:
return z
mu = torch.mean(z, keepdim=True, dim=-1)
sigma = torch.std(z, keepdim=True, dim=-1)
ln_out = (z - mu.expand_as(z)) / (sigma.expand_as(z) + self.eps)
ln_out = ln_out * self.a_2.expand_as(ln_out) + self.b_2.expand_as(
ln_out)
return ln_out
class PositionwiseFeedForwardNew(nn.Module):
""" A two-feed-forward-layer module """
def __init__(self, d_hid, d_inner_hid, dropout=0.1, layer_norm=True):
super(PositionwiseFeedForwardNew, self).__init__()
self.w_1 = nn.Conv1d(d_hid, d_inner_hid, 1)
self.w_2 = nn.Conv1d(d_inner_hid, d_hid, 1)
self.layer_norm = LayerNormalization(d_hid
) if layer_norm else Identity()
self.dropout = nn.Dropout(dropout)
self.relu = nn.ReLU()
def forward(self, input_0):
primals_2 = self.w_1.weight
primals_3 = self.w_1.bias
primals_4 = self.w_2.weight
primals_5 = self.w_2.bias
primals_6 = self.layer_norm.a_2
primals_7 = self.layer_norm.b_2
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
dqawami/openvino_training_extensions
|
PositionwiseFeedForward
| false
| 15,234
|
[
"Apache-2.0"
] | 256
|
dddda1dfd651eaae2d59cecda84275b1b03bd0ad
|
https://github.com/dqawami/openvino_training_extensions/tree/dddda1dfd651eaae2d59cecda84275b1b03bd0ad
|
C3D
|
import random
import torch
import torchvision
import torch.nn.parallel
import torch.optim
from torch import nn
class GroupMultiScaleCrop(object):
def __init__(self, input_size, scales=None, max_distort=1, fix_crop=
True, more_fix_crop=True):
self.scales = scales if scales is not None else [1, 875, 0.75, 0.66]
self.max_distort = max_distort
self.fix_crop = fix_crop
self.more_fix_crop = more_fix_crop
self.input_size = input_size if not isinstance(input_size, int) else [
input_size, input_size]
self.interpolation = Image.BILINEAR
def __call__(self, img_group):
im_size = img_group[0].size
crop_w, crop_h, offset_w, offset_h = self._sample_crop_size(im_size)
crop_img_group = [img.crop((offset_w, offset_h, offset_w + crop_w,
offset_h + crop_h)) for img in img_group]
ret_img_group = [img.resize((self.input_size[0], self.input_size[1]
), self.interpolation) for img in crop_img_group]
return ret_img_group
def _sample_crop_size(self, im_size):
image_w, image_h = im_size[0], im_size[1]
base_size = min(image_w, image_h)
crop_sizes = [int(base_size * x) for x in self.scales]
crop_h = [(self.input_size[1] if abs(x - self.input_size[1]) < 3 else
x) for x in crop_sizes]
crop_w = [(self.input_size[0] if abs(x - self.input_size[0]) < 3 else
x) for x in crop_sizes]
pairs = []
for i, h in enumerate(crop_h):
for j, w in enumerate(crop_w):
if abs(i - j) <= self.max_distort:
pairs.append((w, h))
crop_pair = random.choice(pairs)
if not self.fix_crop:
w_offset = random.randint(0, image_w - crop_pair[0])
h_offset = random.randint(0, image_h - crop_pair[1])
else:
w_offset, h_offset = self._sample_fix_offset(image_w, image_h,
crop_pair[0], crop_pair[1])
return crop_pair[0], crop_pair[1], w_offset, h_offset
def _sample_fix_offset(self, image_w, image_h, crop_w, crop_h):
offsets = self.fill_fix_offset(self.more_fix_crop, image_w, image_h,
crop_w, crop_h)
return random.choice(offsets)
@staticmethod
def fill_fix_offset(more_fix_crop, image_w, image_h, crop_w, crop_h):
w_step = (image_w - crop_w) // 4
h_step = (image_h - crop_h) // 4
ret = list()
ret.append((0, 0))
ret.append((4 * w_step, 0))
ret.append((0, 4 * h_step))
ret.append((4 * w_step, 4 * h_step))
ret.append((2 * w_step, 2 * h_step))
if more_fix_crop:
ret.append((0, 2 * h_step))
ret.append((4 * w_step, 2 * h_step))
ret.append((2 * w_step, 4 * h_step))
ret.append((2 * w_step, 0 * h_step))
ret.append((1 * w_step, 1 * h_step))
ret.append((3 * w_step, 1 * h_step))
ret.append((1 * w_step, 3 * h_step))
ret.append((3 * w_step, 3 * h_step))
return ret
class GroupRandomHorizontalFlip(object):
"""Randomly horizontally flips the given PIL.Image with a probability of 0.5
"""
def __init__(self, is_flow=False):
self.is_flow = is_flow
def __call__(self, img_group, is_flow=False):
v = random.random()
if v < 0.5:
ret = [img.transpose(Image.FLIP_LEFT_RIGHT) for img in img_group]
if self.is_flow:
for i in range(0, len(ret), 2):
ret[i] = ImageOps.invert(ret[i])
return ret
else:
return img_group
class C3D(nn.Module):
def __init__(self):
super(C3D, self).__init__()
self.modality = 'RGB'
self.input_size = 112
self.input_mean = [104, 117, 128]
self.input_std = [1]
self.conv1 = nn.Conv3d(3, 64, kernel_size=(3, 3, 3), padding=(1, 1, 1))
self.pool1 = nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2))
self.conv2 = nn.Conv3d(64, 128, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.pool2 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))
self.conv3a = nn.Conv3d(128, 256, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.conv3b = nn.Conv3d(256, 256, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.pool3 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))
self.conv4a = nn.Conv3d(256, 512, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.conv4b = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.pool4 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))
self.conv5a = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.conv5b = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.pool5 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2),
padding=(0, 1, 1))
self.fc6 = nn.Linear(8192, 4096)
self.fc7 = nn.Linear(4096, 4096)
self.fc8_new = nn.Linear(4096, 174)
self.dropout = nn.Dropout(p=0.5)
self.relu = nn.ReLU()
self.softmax = nn.Softmax()
def forward(self, x):
x.size(0)
x = x.view(-1, 3, 16, 112, 112)
h = self.relu(self.conv1(x))
h = self.pool1(h)
h = self.relu(self.conv2(h))
h = self.pool2(h)
h = self.relu(self.conv3a(h))
h = self.relu(self.conv3b(h))
h = self.pool3(h)
h = self.relu(self.conv4a(h))
h = self.relu(self.conv4b(h))
h = self.pool4(h)
h = self.relu(self.conv5a(h))
h = self.relu(self.conv5b(h))
h = self.pool5(h)
h = h.view(-1, 8192)
h = self.relu(self.fc6(h))
h = self.dropout(h)
h = self.relu(self.fc7(h))
h = self.dropout(h)
logits = self.fc8_new(h)
return logits
def partialBN(self, enable):
pass
@property
def crop_size(self):
return self.input_size
@property
def scale_size(self):
return self.input_size * 128 // 112
def get_augmentation(self):
if self.modality == 'RGB':
return torchvision.transforms.Compose([GroupMultiScaleCrop(self
.input_size, [1, 0.875, 0.75, 0.66]),
GroupRandomHorizontalFlip(is_flow=False)])
def get_inputs():
return [torch.rand([4, 3, 16, 112, 112])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import random
import torchvision
import torch.nn.parallel
import torch.optim
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 200704 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 50176 % 128
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 6272 % 256
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 784 % 512
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 98 % 512
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 4096
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22, primals_23
) = args
args.clear()
assert_size_stride(primals_1, (4, 3, 16, 112, 112), (602112, 200704,
12544, 112, 1))
assert_size_stride(primals_2, (64, 3, 3, 3, 3), (81, 27, 9, 3, 1))
assert_size_stride(primals_3, (64,), (1,))
assert_size_stride(primals_4, (128, 64, 3, 3, 3), (1728, 27, 9, 3, 1))
assert_size_stride(primals_5, (128,), (1,))
assert_size_stride(primals_6, (256, 128, 3, 3, 3), (3456, 27, 9, 3, 1))
assert_size_stride(primals_7, (256,), (1,))
assert_size_stride(primals_8, (256, 256, 3, 3, 3), (6912, 27, 9, 3, 1))
assert_size_stride(primals_9, (256,), (1,))
assert_size_stride(primals_10, (512, 256, 3, 3, 3), (6912, 27, 9, 3, 1))
assert_size_stride(primals_11, (512,), (1,))
assert_size_stride(primals_12, (512, 512, 3, 3, 3), (13824, 27, 9, 3, 1))
assert_size_stride(primals_13, (512,), (1,))
assert_size_stride(primals_14, (512, 512, 3, 3, 3), (13824, 27, 9, 3, 1))
assert_size_stride(primals_15, (512,), (1,))
assert_size_stride(primals_16, (512, 512, 3, 3, 3), (13824, 27, 9, 3, 1))
assert_size_stride(primals_17, (512,), (1,))
assert_size_stride(primals_18, (4096, 8192), (8192, 1))
assert_size_stride(primals_19, (4096,), (1,))
assert_size_stride(primals_20, (4096, 4096), (4096, 1))
assert_size_stride(primals_21, (4096,), (1,))
assert_size_stride(primals_22, (174, 4096), (4096, 1))
assert_size_stride(primals_23, (174,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 16, 112, 112), (12845056, 200704,
12544, 112, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(51380224)](buf1, primals_3,
51380224, XBLOCK=512, num_warps=8, num_stages=1)
del primals_3
buf2 = torch.ops.aten.max_pool3d_with_indices.default(buf1, [1, 2,
2], [1, 2, 2])
buf3 = buf2[0]
buf4 = buf2[1]
del buf2
buf5 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1, 1),
padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 128, 16, 56, 56), (6422528, 50176,
3136, 56, 1))
buf6 = buf5
del buf5
triton_poi_fused_convolution_relu_1[grid(25690112)](buf6, primals_5,
25690112, XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf7 = torch.ops.aten.max_pool3d_with_indices.default(buf6, [2, 2,
2], [2, 2, 2])
buf8 = buf7[0]
buf9 = buf7[1]
del buf7
buf10 = extern_kernels.convolution(buf8, primals_6, stride=(1, 1, 1
), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 256, 8, 28, 28), (1605632, 6272, 784,
28, 1))
buf11 = buf10
del buf10
triton_poi_fused_convolution_relu_2[grid(6422528)](buf11, primals_7,
6422528, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_7
buf12 = extern_kernels.convolution(buf11, primals_8, stride=(1, 1,
1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 256, 8, 28, 28), (1605632, 6272, 784,
28, 1))
buf13 = buf12
del buf12
triton_poi_fused_convolution_relu_2[grid(6422528)](buf13, primals_9,
6422528, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_9
buf14 = torch.ops.aten.max_pool3d_with_indices.default(buf13, [2, 2,
2], [2, 2, 2])
buf15 = buf14[0]
buf16 = buf14[1]
del buf14
buf17 = extern_kernels.convolution(buf15, primals_10, stride=(1, 1,
1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 512, 4, 14, 14), (401408, 784, 196,
14, 1))
buf18 = buf17
del buf17
triton_poi_fused_convolution_relu_3[grid(1605632)](buf18,
primals_11, 1605632, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_11
buf19 = extern_kernels.convolution(buf18, primals_12, stride=(1, 1,
1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf19, (4, 512, 4, 14, 14), (401408, 784, 196,
14, 1))
buf20 = buf19
del buf19
triton_poi_fused_convolution_relu_3[grid(1605632)](buf20,
primals_13, 1605632, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_13
buf21 = torch.ops.aten.max_pool3d_with_indices.default(buf20, [2, 2,
2], [2, 2, 2])
buf22 = buf21[0]
buf23 = buf21[1]
del buf21
buf24 = extern_kernels.convolution(buf22, primals_14, stride=(1, 1,
1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 512, 2, 7, 7), (50176, 98, 49, 7, 1))
buf25 = buf24
del buf24
triton_poi_fused_convolution_relu_4[grid(200704)](buf25, primals_15,
200704, XBLOCK=512, num_warps=8, num_stages=1)
del primals_15
buf26 = extern_kernels.convolution(buf25, primals_16, stride=(1, 1,
1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 512, 2, 7, 7), (50176, 98, 49, 7, 1))
buf27 = buf26
del buf26
triton_poi_fused_convolution_relu_4[grid(200704)](buf27, primals_17,
200704, XBLOCK=512, num_warps=8, num_stages=1)
del primals_17
buf28 = torch.ops.aten.max_pool3d_with_indices.default(buf27, [2, 2,
2], [2, 2, 2], [0, 1, 1])
buf29 = buf28[0]
buf30 = buf28[1]
del buf28
buf31 = empty_strided_cuda((4, 4096), (4096, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf29, (4, 8192), (8192, 1), 0
), reinterpret_tensor(primals_18, (8192, 4096), (1, 8192), 0),
out=buf31)
buf32 = buf31
del buf31
triton_poi_fused_relu_5[grid(16384)](buf32, primals_19, 16384,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_19
buf33 = empty_strided_cuda((4, 4096), (4096, 1), torch.float32)
extern_kernels.mm(buf32, reinterpret_tensor(primals_20, (4096, 4096
), (1, 4096), 0), out=buf33)
buf34 = buf33
del buf33
triton_poi_fused_relu_5[grid(16384)](buf34, primals_21, 16384,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_21
buf35 = empty_strided_cuda((4, 174), (174, 1), torch.float32)
extern_kernels.addmm(primals_23, buf34, reinterpret_tensor(
primals_22, (4096, 174), (1, 4096), 0), alpha=1, beta=1, out=buf35)
del primals_23
return (buf35, primals_2, primals_4, primals_6, primals_8, primals_10,
primals_12, primals_14, primals_16, primals_1, buf1, buf3, buf4,
buf6, buf8, buf9, buf11, buf13, buf15, buf16, buf18, buf20, buf22,
buf23, buf25, buf27, buf30, reinterpret_tensor(buf29, (4, 8192), (
8192, 1), 0), buf32, buf34, primals_22, primals_20, primals_18)
class GroupMultiScaleCrop(object):
def __init__(self, input_size, scales=None, max_distort=1, fix_crop=
True, more_fix_crop=True):
self.scales = scales if scales is not None else [1, 875, 0.75, 0.66]
self.max_distort = max_distort
self.fix_crop = fix_crop
self.more_fix_crop = more_fix_crop
self.input_size = input_size if not isinstance(input_size, int) else [
input_size, input_size]
self.interpolation = Image.BILINEAR
def __call__(self, img_group):
im_size = img_group[0].size
crop_w, crop_h, offset_w, offset_h = self._sample_crop_size(im_size)
crop_img_group = [img.crop((offset_w, offset_h, offset_w + crop_w,
offset_h + crop_h)) for img in img_group]
ret_img_group = [img.resize((self.input_size[0], self.input_size[1]
), self.interpolation) for img in crop_img_group]
return ret_img_group
def _sample_crop_size(self, im_size):
image_w, image_h = im_size[0], im_size[1]
base_size = min(image_w, image_h)
crop_sizes = [int(base_size * x) for x in self.scales]
crop_h = [(self.input_size[1] if abs(x - self.input_size[1]) < 3 else
x) for x in crop_sizes]
crop_w = [(self.input_size[0] if abs(x - self.input_size[0]) < 3 else
x) for x in crop_sizes]
pairs = []
for i, h in enumerate(crop_h):
for j, w in enumerate(crop_w):
if abs(i - j) <= self.max_distort:
pairs.append((w, h))
crop_pair = random.choice(pairs)
if not self.fix_crop:
w_offset = random.randint(0, image_w - crop_pair[0])
h_offset = random.randint(0, image_h - crop_pair[1])
else:
w_offset, h_offset = self._sample_fix_offset(image_w, image_h,
crop_pair[0], crop_pair[1])
return crop_pair[0], crop_pair[1], w_offset, h_offset
def _sample_fix_offset(self, image_w, image_h, crop_w, crop_h):
offsets = self.fill_fix_offset(self.more_fix_crop, image_w, image_h,
crop_w, crop_h)
return random.choice(offsets)
@staticmethod
def fill_fix_offset(more_fix_crop, image_w, image_h, crop_w, crop_h):
w_step = (image_w - crop_w) // 4
h_step = (image_h - crop_h) // 4
ret = list()
ret.append((0, 0))
ret.append((4 * w_step, 0))
ret.append((0, 4 * h_step))
ret.append((4 * w_step, 4 * h_step))
ret.append((2 * w_step, 2 * h_step))
if more_fix_crop:
ret.append((0, 2 * h_step))
ret.append((4 * w_step, 2 * h_step))
ret.append((2 * w_step, 4 * h_step))
ret.append((2 * w_step, 0 * h_step))
ret.append((1 * w_step, 1 * h_step))
ret.append((3 * w_step, 1 * h_step))
ret.append((1 * w_step, 3 * h_step))
ret.append((3 * w_step, 3 * h_step))
return ret
class GroupRandomHorizontalFlip(object):
"""Randomly horizontally flips the given PIL.Image with a probability of 0.5
"""
def __init__(self, is_flow=False):
self.is_flow = is_flow
def __call__(self, img_group, is_flow=False):
v = random.random()
if v < 0.5:
ret = [img.transpose(Image.FLIP_LEFT_RIGHT) for img in img_group]
if self.is_flow:
for i in range(0, len(ret), 2):
ret[i] = ImageOps.invert(ret[i])
return ret
else:
return img_group
class C3DNew(nn.Module):
def __init__(self):
super(C3DNew, self).__init__()
self.modality = 'RGB'
self.input_size = 112
self.input_mean = [104, 117, 128]
self.input_std = [1]
self.conv1 = nn.Conv3d(3, 64, kernel_size=(3, 3, 3), padding=(1, 1, 1))
self.pool1 = nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2))
self.conv2 = nn.Conv3d(64, 128, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.pool2 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))
self.conv3a = nn.Conv3d(128, 256, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.conv3b = nn.Conv3d(256, 256, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.pool3 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))
self.conv4a = nn.Conv3d(256, 512, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.conv4b = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.pool4 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))
self.conv5a = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.conv5b = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1,
1, 1))
self.pool5 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2),
padding=(0, 1, 1))
self.fc6 = nn.Linear(8192, 4096)
self.fc7 = nn.Linear(4096, 4096)
self.fc8_new = nn.Linear(4096, 174)
self.dropout = nn.Dropout(p=0.5)
self.relu = nn.ReLU()
self.softmax = nn.Softmax()
def partialBN(self, enable):
pass
@property
def crop_size(self):
return self.input_size
@property
def scale_size(self):
return self.input_size * 128 // 112
def get_augmentation(self):
if self.modality == 'RGB':
return torchvision.transforms.Compose([GroupMultiScaleCrop(self
.input_size, [1, 0.875, 0.75, 0.66]),
GroupRandomHorizontalFlip(is_flow=False)])
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3a.weight
primals_7 = self.conv3a.bias
primals_8 = self.conv3b.weight
primals_9 = self.conv3b.bias
primals_10 = self.conv4a.weight
primals_11 = self.conv4a.bias
primals_12 = self.conv4b.weight
primals_13 = self.conv4b.bias
primals_14 = self.conv5a.weight
primals_15 = self.conv5a.bias
primals_16 = self.conv5b.weight
primals_17 = self.conv5b.bias
primals_18 = self.fc6.weight
primals_19 = self.fc6.bias
primals_20 = self.fc7.weight
primals_21 = self.fc7.bias
primals_22 = self.fc8_new.weight
primals_23 = self.fc8_new.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23])
return output[0]
|
coderSkyChen/Action_Recognition_Zoo
|
C3D
| false
| 15,235
|
[
"MIT"
] | 240
|
92ec5ec3efeee852aec5c057798298cd3a8e58ae
|
https://github.com/coderSkyChen/Action_Recognition_Zoo/tree/92ec5ec3efeee852aec5c057798298cd3a8e58ae
|
DeepCritic
|
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1.0 / np.sqrt(fan_in)
return -lim, lim
class DeepCritic(nn.Module):
"""Critic (Value) Model."""
def __init__(self, state_size, action_size, seed, device, hidden_size=32):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
hidden_size (int): Number of nodes in the network layers
"""
super(DeepCritic, self).__init__()
self.seed = torch.manual_seed(seed)
self.device = device
in_dim = hidden_size + action_size + state_size
self.fc1 = nn.Linear(state_size + action_size, hidden_size)
self.fc2 = nn.Linear(in_dim, hidden_size)
self.fc3 = nn.Linear(in_dim, hidden_size)
self.fc4 = nn.Linear(in_dim, hidden_size)
self.fc5 = nn.Linear(hidden_size, 1)
self.reset_parameters()
def reset_parameters(self):
self.fc1.weight.data.uniform_(*hidden_init(self.fc1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(*hidden_init(self.fc2))
self.fc4.weight.data.uniform_(*hidden_init(self.fc2))
self.fc5.weight.data.uniform_(-0.003, 0.003)
def forward(self, state, action):
"""Build a critic (value) network that maps (state, action) pairs -> Q-values."""
xu = torch.cat((state, action), dim=1)
x = F.relu(self.fc1(xu))
x = torch.cat([x, xu], dim=1)
x = F.relu(self.fc2(x))
x = torch.cat([x, xu], dim=1)
x = F.relu(self.fc3(x))
x = torch.cat([x, xu], dim=1)
x = F.relu(self.fc4(x))
return self.fc5(x)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'state_size': 4, 'action_size': 4, 'seed': 4, 'device': 0}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 160
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 40
x1 = xindex // 40
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (32 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tl.full([1], 40, tl.int64)
tmp15 = tl.load(in_ptr2 + (8 * x1 + (-32 + x0)), tmp12 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tl.where(tmp4, tmp11, tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_3(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (32, 8), (8, 1))
assert_size_stride(primals_4, (32,), (1,))
assert_size_stride(primals_5, (32, 40), (40, 1))
assert_size_stride(primals_6, (32,), (1,))
assert_size_stride(primals_7, (32, 40), (40, 1))
assert_size_stride(primals_8, (32,), (1,))
assert_size_stride(primals_9, (32, 40), (40, 1))
assert_size_stride(primals_10, (32,), (1,))
assert_size_stride(primals_11, (1, 32), (32, 1))
assert_size_stride(primals_12, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 32), (32, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 32), (1,
8), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 40), (40, 1), torch.float32)
triton_poi_fused_cat_1[grid(160)](buf1, primals_4, buf0, buf2, 160,
XBLOCK=256, num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((4, 32), (32, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (40, 32), (1,
40), 0), out=buf3)
buf4 = empty_strided_cuda((4, 40), (40, 1), torch.float32)
triton_poi_fused_cat_1[grid(160)](buf3, primals_6, buf0, buf4, 160,
XBLOCK=256, num_warps=4, num_stages=1)
buf5 = empty_strided_cuda((4, 32), (32, 1), torch.float32)
extern_kernels.mm(buf4, reinterpret_tensor(primals_7, (40, 32), (1,
40), 0), out=buf5)
buf6 = empty_strided_cuda((4, 40), (40, 1), torch.float32)
triton_poi_fused_cat_1[grid(160)](buf5, primals_8, buf0, buf6, 160,
XBLOCK=256, num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((4, 32), (32, 1), torch.float32)
extern_kernels.mm(buf6, reinterpret_tensor(primals_9, (40, 32), (1,
40), 0), out=buf7)
buf8 = buf7
del buf7
triton_poi_fused_relu_2[grid(128)](buf8, primals_10, 128, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_10
buf10 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_12, buf8, reinterpret_tensor(
primals_11, (32, 1), (1, 32), 0), alpha=1, beta=1, out=buf10)
del primals_12
buf11 = empty_strided_cuda((4, 32), (32, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_3[grid(128)](buf5,
primals_8, buf11, 128, XBLOCK=128, num_warps=4, num_stages=1)
del buf5
del primals_8
buf12 = empty_strided_cuda((4, 32), (32, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_3[grid(128)](buf3,
primals_6, buf12, 128, XBLOCK=128, num_warps=4, num_stages=1)
del buf3
del primals_6
buf13 = empty_strided_cuda((4, 32), (32, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_3[grid(128)](buf1,
primals_4, buf13, 128, XBLOCK=128, num_warps=4, num_stages=1)
del buf1
del primals_4
return (buf10, buf0, buf2, buf4, buf6, buf8, primals_11, primals_9,
buf11, primals_7, buf12, primals_5, buf13)
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1.0 / np.sqrt(fan_in)
return -lim, lim
class DeepCriticNew(nn.Module):
"""Critic (Value) Model."""
def __init__(self, state_size, action_size, seed, device, hidden_size=32):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
hidden_size (int): Number of nodes in the network layers
"""
super(DeepCriticNew, self).__init__()
self.seed = torch.manual_seed(seed)
self.device = device
in_dim = hidden_size + action_size + state_size
self.fc1 = nn.Linear(state_size + action_size, hidden_size)
self.fc2 = nn.Linear(in_dim, hidden_size)
self.fc3 = nn.Linear(in_dim, hidden_size)
self.fc4 = nn.Linear(in_dim, hidden_size)
self.fc5 = nn.Linear(hidden_size, 1)
self.reset_parameters()
def reset_parameters(self):
self.fc1.weight.data.uniform_(*hidden_init(self.fc1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(*hidden_init(self.fc2))
self.fc4.weight.data.uniform_(*hidden_init(self.fc2))
self.fc5.weight.data.uniform_(-0.003, 0.003)
def forward(self, input_0, input_1):
primals_3 = self.fc1.weight
primals_4 = self.fc1.bias
primals_5 = self.fc2.weight
primals_6 = self.fc2.bias
primals_7 = self.fc3.weight
primals_8 = self.fc3.bias
primals_9 = self.fc4.weight
primals_10 = self.fc4.bias
primals_11 = self.fc5.weight
primals_12 = self.fc5.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
|
drib861204/Soft-Actor-Critic-and-Extensions
|
DeepCritic
| false
| 15,236
|
[
"MIT"
] | 143
|
3075df7430c1c49177b3798d753a9e3f6226672e
|
https://github.com/drib861204/Soft-Actor-Critic-and-Extensions/tree/3075df7430c1c49177b3798d753a9e3f6226672e
|
Critic
|
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1.0 / np.sqrt(fan_in)
return -lim, lim
class Critic(nn.Module):
"""Critic (Value) Model."""
def __init__(self, state_size, action_size, seed, device, hidden_size=32):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
hidden_size (int): Number of nodes in the network layers
"""
super(Critic, self).__init__()
self.seed = torch.manual_seed(seed)
self.device = device
self.fc1 = nn.Linear(state_size + action_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.fc3 = nn.Linear(hidden_size, 1)
self.reset_parameters()
def reset_parameters(self):
self.fc1.weight.data.uniform_(*hidden_init(self.fc1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(-0.003, 0.003)
def forward(self, state, action):
"""Build a critic (value) network that maps (state, action) pairs -> Q-values."""
x = torch.cat((state, action), dim=1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
return self.fc3(x)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'state_size': 4, 'action_size': 4, 'seed': 4, 'device': 0}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (32, 8), (8, 1))
assert_size_stride(primals_4, (32,), (1,))
assert_size_stride(primals_5, (32, 32), (32, 1))
assert_size_stride(primals_6, (32,), (1,))
assert_size_stride(primals_7, (1, 32), (32, 1))
assert_size_stride(primals_8, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 32), (32, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 32), (1,
8), 0), out=buf1)
del primals_3
buf2 = buf1
del buf1
triton_poi_fused_relu_1[grid(128)](buf2, primals_4, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 32), (32, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (32, 32), (1,
32), 0), out=buf3)
buf4 = buf3
del buf3
triton_poi_fused_relu_1[grid(128)](buf4, primals_6, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_6
buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7,
(32, 1), (1, 32), 0), alpha=1, beta=1, out=buf6)
del primals_8
return buf6, buf0, buf2, buf4, primals_7, primals_5
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1.0 / np.sqrt(fan_in)
return -lim, lim
class CriticNew(nn.Module):
"""Critic (Value) Model."""
def __init__(self, state_size, action_size, seed, device, hidden_size=32):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
hidden_size (int): Number of nodes in the network layers
"""
super(CriticNew, self).__init__()
self.seed = torch.manual_seed(seed)
self.device = device
self.fc1 = nn.Linear(state_size + action_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.fc3 = nn.Linear(hidden_size, 1)
self.reset_parameters()
def reset_parameters(self):
self.fc1.weight.data.uniform_(*hidden_init(self.fc1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(-0.003, 0.003)
def forward(self, input_0, input_1):
primals_3 = self.fc1.weight
primals_4 = self.fc1.bias
primals_5 = self.fc2.weight
primals_6 = self.fc2.bias
primals_7 = self.fc3.weight
primals_8 = self.fc3.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
|
drib861204/Soft-Actor-Critic-and-Extensions
|
Critic
| false
| 15,237
|
[
"MIT"
] | 143
|
3075df7430c1c49177b3798d753a9e3f6226672e
|
https://github.com/drib861204/Soft-Actor-Critic-and-Extensions/tree/3075df7430c1c49177b3798d753a9e3f6226672e
|
SubNet
|
import torch
import torch.nn as nn
class SubNet(nn.Module):
"""
The subnetwork that is used in TFN for video and audio in the pre-fusion stage
"""
def __init__(self, in_size, hidden_size, n_class, dropout, modal_name=
'text'):
"""
Args:
in_size: input dimension
hidden_size: hidden layer dimension
dropout: dropout probability
Output:
(return value in forward) a tensor of shape (batch_size, hidden_size)
"""
super(SubNet, self).__init__()
self.drop = nn.Dropout(p=dropout)
self.linear_1 = nn.Linear(in_size, hidden_size)
self.linear_2 = nn.Linear(hidden_size, hidden_size)
self.linear_3 = nn.Linear(hidden_size, n_class)
def forward(self, x):
"""
Args:
x: tensor of shape (batch_size, in_size)
"""
dropped = self.drop(x)
y_1 = torch.tanh(self.linear_1(dropped))
self.linear_2(y_1)
y_2 = torch.tanh(self.linear_2(y_1))
y_3 = self.linear_3(y_2)
return y_2, y_3
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_size': 4, 'hidden_size': 4, 'n_class': 4, 'dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(256)](buf1, primals_3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused_tanh_0[grid(256)](buf3, primals_5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_7
return buf3, reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), buf1, buf3, primals_6, primals_4
class SubNetNew(nn.Module):
"""
The subnetwork that is used in TFN for video and audio in the pre-fusion stage
"""
def __init__(self, in_size, hidden_size, n_class, dropout, modal_name=
'text'):
"""
Args:
in_size: input dimension
hidden_size: hidden layer dimension
dropout: dropout probability
Output:
(return value in forward) a tensor of shape (batch_size, hidden_size)
"""
super(SubNetNew, self).__init__()
self.drop = nn.Dropout(p=dropout)
self.linear_1 = nn.Linear(in_size, hidden_size)
self.linear_2 = nn.Linear(hidden_size, hidden_size)
self.linear_3 = nn.Linear(hidden_size, n_class)
def forward(self, input_0):
primals_2 = self.linear_1.weight
primals_3 = self.linear_1.bias
primals_4 = self.linear_2.weight
primals_5 = self.linear_2.bias
primals_6 = self.linear_3.weight
primals_7 = self.linear_3.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0], output[1]
|
dumpmemory/Multimodal-Infomax
|
SubNet
| false
| 15,238
|
[
"MIT"
] | 57
|
9a6dc8f2bfa861cd447ba65c6a037cd7dd24f473
|
https://github.com/dumpmemory/Multimodal-Infomax/tree/9a6dc8f2bfa861cd447ba65c6a037cd7dd24f473
|
CondInjection
|
import torch
from torch import nn
class CondInjection(nn.Module):
def __init__(self):
super().__init__()
self.weight = nn.Parameter(torch.zeros(1))
def forward(self, image, labels, noise=None):
if noise is None:
batch, _, height, width = image.shape
noise = image.new_empty(batch, 1, height, width).normal_()
labels = labels.view(-1, 1, 1, 1)
batch, _, height, width = image.shape
image.new_ones(batch, 1, height, width) / (labels + 1)
return image + self.weight * noise
def get_inputs():
return [torch.rand([256, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tl.load(in_ptr2 + (x0 + 16 * x2), None, eviction_policy='evict_last'
)
tmp4 = tmp2 * tmp3
tmp5 = tmp0 + tmp4
tl.store(out_ptr0 + x3, tmp5, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (256, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1,), (1,))
buf0 = empty_strided_cuda((256, 1, 4, 4), (16, 16, 4, 1), torch.float32)
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = torch.ops.aten.normal_functional.default(buf0)
del buf0
buf2 = buf1
del buf1
buf3 = empty_strided_cuda((256, 4, 4, 4), (64, 16, 4, 1), torch.float32
)
get_raw_stream(0)
triton_poi_fused_add_mul_0[grid(16384)](primals_1, primals_3, buf2,
buf3, 16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_3
return buf3, buf2
class CondInjectionNew(nn.Module):
def __init__(self):
super().__init__()
self.weight = nn.Parameter(torch.zeros(1))
def forward(self, input_0, input_1):
primals_3 = self.weight
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
|
dubtor/EditGAN-Robert
|
CondInjection
| false
| 15,239
|
[
"BSD-2-Clause"
] | 110
|
8e6d80e7647c3536827f11cf0a9abf51c42794b2
|
https://github.com/dubtor/EditGAN-Robert/tree/8e6d80e7647c3536827f11cf0a9abf51c42794b2
|
DiceLoss
|
import torch
from torch import nn
class DiceLoss(nn.Module):
def __init__(self, epsilon=1e-09):
"""Dice-Loss, 切块损失, 用于不均衡数据, 但是收敛困难, 不太稳定
paper: Dice Loss for Data-imbalanced NLP Tasks
url: https://arxiv.org/pdf/1911.02855.pdf
args:
reduction: str, Specifies the reduction to apply to the output, 输出形式.
eg.``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``
epsilon: float, Minimum of maths, 无穷小. eg. 1e-9
returns:
Tensor of loss.
examples:
>>> label, logits = [[1, 1, 1, 1], [0, 0, 0, 1]], [[0, 1, 1, 0], [1, 0, 0, 1],]
>>> label, logits = torch.tensor(label).long(), torch.tensor(logits).float()
>>> loss = DiceLoss()(logits, label)
"""
super(DiceLoss, self).__init__()
self.epsilon = epsilon
def forward(self, logits, labels):
predict = torch.sigmoid(logits)
intersect = predict * labels + self.epsilon
unionset = predict + labels + self.epsilon
loss = 1 - 2 * intersect.sum() / unionset.sum()
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mul_rsub_sigmoid_sum_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp2 = tl.load(in_ptr1 + r0, None)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp4 = 1e-09
tmp5 = tmp3 + tmp4
tmp6 = tl.broadcast_to(tmp5, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = tmp1 + tmp2
tmp10 = tmp9 + tmp4
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 2.0
tmp15 = tmp8 * tmp14
tmp16 = tmp15 / tmp13
tmp17 = 1.0
tmp18 = tmp17 - tmp16
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp18, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mul_rsub_sigmoid_sum_0[grid(1)](buf2,
arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf2,
class DiceLossNew(nn.Module):
def __init__(self, epsilon=1e-09):
"""Dice-Loss, 切块损失, 用于不均衡数据, 但是收敛困难, 不太稳定
paper: Dice Loss for Data-imbalanced NLP Tasks
url: https://arxiv.org/pdf/1911.02855.pdf
args:
reduction: str, Specifies the reduction to apply to the output, 输出形式.
eg.``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``
epsilon: float, Minimum of maths, 无穷小. eg. 1e-9
returns:
Tensor of loss.
examples:
>>> label, logits = [[1, 1, 1, 1], [0, 0, 0, 1]], [[0, 1, 1, 0], [1, 0, 0, 1],]
>>> label, logits = torch.tensor(label).long(), torch.tensor(logits).float()
>>> loss = DiceLoss()(logits, label)
"""
super(DiceLossNew, self).__init__()
self.epsilon = epsilon
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
dumpmemory/Pytorch-NLU
|
DiceLoss
| false
| 15,240
|
[
"Apache-2.0"
] | 115
|
864fb9acc7751fc51abd3d05d24b5a9a7eab7110
|
https://github.com/dumpmemory/Pytorch-NLU/tree/864fb9acc7751fc51abd3d05d24b5a9a7eab7110
|
LayerNormLSTMCell
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.utils.data
class LayerNormLSTMCell(nn.LSTMCell):
def __init__(self, input_size, hidden_size, bias=True):
super().__init__(input_size, hidden_size, bias)
self.ln_ih = nn.LayerNorm(4 * hidden_size)
self.ln_hh = nn.LayerNorm(4 * hidden_size)
self.ln_ho = nn.LayerNorm(hidden_size)
def forward(self, input, hidden=None):
if hidden is None:
hx = input.new_zeros(input.size(0), self.hidden_size,
requires_grad=False)
cx = input.new_zeros(input.size(0), self.hidden_size,
requires_grad=False)
else:
hx, cx = hidden
gates = self.ln_ih(F.linear(input, self.weight_ih, self.bias_ih)
) + self.ln_hh(F.linear(hx, self.weight_hh, self.bias_hh))
i, f, o = gates[:, :3 * self.hidden_size].sigmoid().chunk(3, 1)
g = gates[:, 3 * self.hidden_size:].tanh()
cy = f * cx + i * g
hy = o * self.ln_ho(cy).tanh()
return hy, cy
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_new_zeros_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_per_fused_add_native_layer_norm_1(in_out_ptr0, in_out_ptr1,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0,
out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp22 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0)
tmp42 = tl.load(in_ptr2 + r1, None, eviction_policy='evict_last')
tmp44 = tl.load(in_ptr3 + r1, None, eviction_policy='evict_last')
tmp48 = tl.load(in_ptr4 + r1, None, eviction_policy='evict_last')
tmp50 = tl.load(in_ptr5 + r1, None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 16.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tmp23 = tl.broadcast_to(tmp22, [XBLOCK, RBLOCK])
tl.where(xmask, tmp23, 0)
tmp26 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK])
tmp28 = tl.where(xmask, tmp26, 0)
tmp29 = tl.sum(tmp28, 1)[:, None]
tmp30 = tmp29 / tmp9
tmp31 = tmp23 - tmp30
tmp32 = tmp31 * tmp31
tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK])
tmp35 = tl.where(xmask, tmp33, 0)
tmp36 = tl.sum(tmp35, 1)[:, None]
tmp37 = tmp36 / tmp17
tmp38 = tmp37 + tmp19
tmp39 = libdevice.rsqrt(tmp38)
tmp40 = tmp0 - tmp10
tmp41 = tmp40 * tmp21
tmp43 = tmp41 * tmp42
tmp45 = tmp43 + tmp44
tmp46 = tmp22 - tmp30
tmp47 = tmp46 * tmp39
tmp49 = tmp47 * tmp48
tmp51 = tmp49 + tmp50
tmp52 = tmp45 + tmp51
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp21, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x0, tmp39, xmask)
tl.store(out_ptr2 + (r1 + 16 * x0), tmp52, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
tl.store(out_ptr1 + x0, tmp30, xmask)
@triton.jit
def triton_poi_fused_sigmoid_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 12
x1 = xindex // 12
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(out_ptr0 + x2, tmp1, xmask)
@triton.jit
def triton_poi_fused_add_mul_tanh_3(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4 + x0 + 12 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (x0 + 12 * x1), xmask)
tmp4 = tl.load(in_ptr1 + (12 + x0 + 16 * x1), xmask)
tmp1 = 0.0
tmp2 = tmp0 * tmp1
tmp5 = libdevice.tanh(tmp4)
tmp6 = tmp3 * tmp5
tmp7 = tmp2 + tmp6
tl.store(out_ptr0 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_4(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_mul_native_layer_norm_tanh_5(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (8 + x0 + 12 * x1), xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 - tmp2
tmp5 = tmp3 * tmp4
tmp7 = tmp5 * tmp6
tmp9 = tmp7 + tmp8
tmp10 = libdevice.tanh(tmp9)
tmp11 = tmp0 * tmp10
tl.store(out_ptr0 + x2, tmp11, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (16, 4), (4, 1))
assert_size_stride(primals_3, (16,), (1,))
assert_size_stride(primals_4, (16,), (1,))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (16, 4), (4, 1))
assert_size_stride(primals_7, (16,), (1,))
assert_size_stride(primals_8, (16,), (1,))
assert_size_stride(primals_9, (16,), (1,))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_new_zeros_0[grid(16)](buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_3, primals_1, reinterpret_tensor(
primals_2, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_2
del primals_3
buf6 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_7, buf0, reinterpret_tensor(primals_6,
(4, 16), (1, 4), 0), alpha=1, beta=1, out=buf6)
del primals_6
del primals_7
buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
buf3 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf5 = reinterpret_tensor(buf3, (4, 1), (1, 1), 0)
del buf3
buf7 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
buf8 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf10 = reinterpret_tensor(buf8, (4, 1), (1, 1), 0)
del buf8
buf11 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
triton_per_fused_add_native_layer_norm_1[grid(4)](buf5, buf10, buf1,
buf6, primals_4, primals_5, primals_8, primals_9, buf2, buf7,
buf11, 4, 16, XBLOCK=1, num_warps=2, num_stages=1)
buf12 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
triton_poi_fused_sigmoid_2[grid(48)](buf11, buf12, 48, XBLOCK=64,
num_warps=1, num_stages=1)
buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_mul_tanh_3[grid(16)](buf12, buf11, buf13, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del buf11
buf14 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf15 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused_native_layer_norm_4[grid(4)](buf13, buf14, buf15,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_mul_native_layer_norm_tanh_5[grid(16)](buf12,
buf13, buf14, buf15, primals_10, primals_11, buf16, 16, XBLOCK=
16, num_warps=1, num_stages=1)
del buf14
del buf15
return (buf16, buf13, primals_1, primals_4, primals_5, primals_8,
primals_9, primals_10, primals_11, buf0, buf1, buf2, buf5, buf6,
buf7, buf10, reinterpret_tensor(buf12, (4, 4), (12, 1), 0),
reinterpret_tensor(buf12, (4, 4), (12, 1), 8), buf13)
class LayerNormLSTMCellNew(nn.LSTMCell):
def __init__(self, input_size, hidden_size, bias=True):
super().__init__(input_size, hidden_size, bias)
self.ln_ih = nn.LayerNorm(4 * hidden_size)
self.ln_hh = nn.LayerNorm(4 * hidden_size)
self.ln_ho = nn.LayerNorm(hidden_size)
def forward(self, input_0):
primals_2 = self.weight_ih
primals_6 = self.weight_hh
primals_3 = self.bias_ih
primals_4 = self.bias_hh
primals_5 = self.ln_ih.weight
primals_7 = self.ln_ih.bias
primals_8 = self.ln_hh.weight
primals_9 = self.ln_hh.bias
primals_10 = self.ln_ho.weight
primals_11 = self.ln_ho.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0], output[1]
|
drgripa1/deepvecfont
|
LayerNormLSTMCell
| false
| 15,241
|
[
"MIT"
] | 68
|
a44d81ba19a22e43b4e576cd8ebc5c2fd961a621
|
https://github.com/drgripa1/deepvecfont/tree/a44d81ba19a22e43b4e576cd8ebc5c2fd961a621
|
FocalLoss
|
import torch
from torch import nn
class FocalLoss(nn.Module):
def __init__(self, alpha=0.5, gamma=2, reduction='mean'):
"""FocalLoss
聚焦损失, 不确定的情况下alpha==0.5效果可能会好一点
url: https://github.com/CoinCheung/pytorch-loss
Usage is same as nn.BCEWithLogits:
>>> loss = criteria(logits, lbs)
"""
super(FocalLoss, self).__init__()
self.reduction = reduction
self.alpha = alpha
self.gamma = gamma
def forward(self, logits, labels):
probs = torch.sigmoid(logits)
coeff = torch.abs(labels - probs).pow(self.gamma).neg()
log_0_probs = torch.where(logits >= 0, -logits + nn.functional.
softplus(logits, -1, 50), -nn.functional.softplus(logits, 1, 50))
log_1_probs = torch.where(logits >= 0, nn.functional.softplus(
logits, -1, 50), logits - nn.functional.softplus(logits, 1, 50))
loss = labels * self.alpha * log_1_probs + (1.0 - labels) * (1.0 -
self.alpha) * log_0_probs
loss = loss * coeff
if self.reduction == 'mean':
loss = loss.mean()
if self.reduction == 'sum':
loss = loss.sum()
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_add_ge_mean_mul_neg_pow_rsub_sigmoid_softplus_sub_where_0(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = 0.0
tmp5 = tmp3 >= tmp4
tmp6 = -1.0
tmp7 = tmp3 * tmp6
tmp8 = 50.0
tmp9 = tmp7 > tmp8
tmp10 = tl_math.exp(tmp7)
tmp11 = libdevice.log1p(tmp10)
tmp12 = tmp11 * tmp6
tmp13 = tl.where(tmp9, tmp3, tmp12)
tmp14 = 1.0
tmp15 = tmp3 * tmp14
tmp16 = tmp15 > tmp8
tmp17 = tl_math.exp(tmp15)
tmp18 = libdevice.log1p(tmp17)
tmp19 = tmp18 * tmp14
tmp20 = tl.where(tmp16, tmp3, tmp19)
tmp21 = tmp3 - tmp20
tmp22 = tl.where(tmp5, tmp13, tmp21)
tmp23 = tmp2 * tmp22
tmp24 = tmp14 - tmp0
tmp25 = tmp24 * tmp1
tmp26 = -tmp3
tmp27 = tmp26 + tmp13
tmp28 = -tmp20
tmp29 = tl.where(tmp5, tmp27, tmp28)
tmp30 = tmp25 * tmp29
tmp31 = tmp23 + tmp30
tmp32 = tl.sigmoid(tmp3)
tmp33 = tmp0 - tmp32
tmp34 = tl_math.abs(tmp33)
tmp35 = tmp34 * tmp34
tmp36 = -tmp35
tmp37 = tmp31 * tmp36
tmp38 = tl.broadcast_to(tmp37, [RBLOCK])
tmp40 = triton_helpers.promote_to_tensor(tl.sum(tmp38, 0))
tmp41 = 256.0
tmp42 = tmp40 / tmp41
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp42, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_abs_add_ge_mean_mul_neg_pow_rsub_sigmoid_softplus_sub_where_0[
grid(1)](buf2, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf2,
class FocalLossNew(nn.Module):
def __init__(self, alpha=0.5, gamma=2, reduction='mean'):
"""FocalLoss
聚焦损失, 不确定的情况下alpha==0.5效果可能会好一点
url: https://github.com/CoinCheung/pytorch-loss
Usage is same as nn.BCEWithLogits:
>>> loss = criteria(logits, lbs)
"""
super(FocalLossNew, self).__init__()
self.reduction = reduction
self.alpha = alpha
self.gamma = gamma
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
dumpmemory/Pytorch-NLU
|
FocalLoss
| false
| 15,242
|
[
"Apache-2.0"
] | 115
|
864fb9acc7751fc51abd3d05d24b5a9a7eab7110
|
https://github.com/dumpmemory/Pytorch-NLU/tree/864fb9acc7751fc51abd3d05d24b5a9a7eab7110
|
CecaModule
|
import math
import torch
import torch.utils.data
import torchvision.transforms.functional as F
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
from torch import optim as optim
class CecaModule(nn.Module):
"""Constructs a circular ECA module.
ECA module where the conv uses circular padding rather than zero padding.
Unlike the spatial dimension, the channels do not have inherent ordering nor
locality. Although this module in essence, applies such an assumption, it is unnecessary
to limit the channels on either "edge" from being circularly adapted to each other.
This will fundamentally increase connectivity and possibly increase performance metrics
(accuracy, robustness), without significantly impacting resource metrics
(parameter size, throughput,latency, etc)
Args:
channels: Number of channels of the input feature map for use in adaptive kernel sizes
for actual calculations according to channel.
gamma, beta: when channel is given parameters of mapping function
refer to original paper https://arxiv.org/pdf/1910.03151.pdf
(default=None. if channel size not given, use k_size given for kernel size.)
kernel_size: Adaptive selection of kernel size (default=3)
"""
def __init__(self, channels=None, kernel_size=3, gamma=2, beta=1):
super(CecaModule, self).__init__()
assert kernel_size % 2 == 1
if channels is not None:
t = int(abs(math.log(channels, 2) + beta) / gamma)
kernel_size = max(t if t % 2 else t + 1, 3)
self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=0,
bias=False)
self.padding = (kernel_size - 1) // 2
def forward(self, x):
y = x.mean((2, 3)).view(x.shape[0], 1, -1)
y = F.pad(y, (self.padding, self.padding), mode='circular')
y = self.conv(y)
y = y.view(x.shape[0], -1, 1, 1).sigmoid()
return x * y.expand_as(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import torch.utils.data
import torch.nn as nn
import torch.nn.parallel
from torch import optim as optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mean_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_copy_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 24
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 5, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = -4 + x0
tmp4 = tl.full([1], 1, tl.int64)
tmp5 = tmp3 < tmp4
tmp6 = tmp5 & tmp2
tmp7 = tmp0 >= tmp4
tmp8 = tmp0 < tmp1
tmp9 = tmp7 & tmp8
tmp10 = tmp9 & tmp6
tmp11 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1), tmp10 & xmask, other=0.0)
tmp12 = 16.0
tmp13 = tmp11 / tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp10, tmp13, tmp14)
tmp16 = float('nan')
tmp17 = tl.where(tmp9, tmp15, tmp16)
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp6, tmp17, tmp18)
tmp20 = tmp3 >= tmp4
tmp21 = tmp3 < tmp1
tmp22 = tmp20 & tmp21
tmp23 = tmp22 & tmp2
tmp24 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1), tmp23 & xmask, other=0.0)
tmp25 = tmp24 / tmp12
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp23, tmp25, tmp26)
tmp28 = tl.where(tmp22, tmp27, tmp16)
tmp29 = tl.where(tmp5, tmp19, tmp28)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp2, tmp29, tmp30)
tmp32 = tmp0 < tmp4
tmp33 = 4 + x0
tmp34 = tmp33 >= tmp4
tmp35 = tmp33 < tmp1
tmp36 = tmp34 & tmp35
tmp37 = tmp36 & tmp32
tmp38 = tl.load(in_ptr0 + (3 + x0 + 4 * x1), tmp37 & xmask, other=0.0)
tmp39 = tmp38 / tmp12
tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype)
tmp41 = tl.where(tmp37, tmp39, tmp40)
tmp42 = tl.where(tmp36, tmp41, tmp16)
tmp43 = tl.full(tmp42.shape, 0.0, tmp42.dtype)
tmp44 = tl.where(tmp32, tmp42, tmp43)
tmp45 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1), tmp9 & xmask, other=0.0)
tmp46 = tmp45 / tmp12
tmp47 = tl.full(tmp46.shape, 0.0, tmp46.dtype)
tmp48 = tl.where(tmp9, tmp46, tmp47)
tmp49 = tl.where(tmp9, tmp48, tmp16)
tmp50 = tl.where(tmp32, tmp44, tmp49)
tmp51 = tl.where(tmp2, tmp31, tmp50)
tl.store(out_ptr0 + x2, tmp51, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 1, 3), (3, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](primals_1, buf0, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf2 = empty_strided_cuda((4, 1, 6), (6, 6, 1), torch.float32)
triton_poi_fused_copy_1[grid(24)](buf0, buf2, 24, XBLOCK=32,
num_warps=1, num_stages=1)
del buf0
buf3 = extern_kernels.convolution(buf2, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf3, (4, 1, 4), (4, 4, 1))
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_2[grid(256)](primals_1, buf3, buf4, 256,
XBLOCK=128, num_warps=4, num_stages=1)
return buf4, primals_1, primals_2, buf2, buf3
class CecaModuleNew(nn.Module):
"""Constructs a circular ECA module.
ECA module where the conv uses circular padding rather than zero padding.
Unlike the spatial dimension, the channels do not have inherent ordering nor
locality. Although this module in essence, applies such an assumption, it is unnecessary
to limit the channels on either "edge" from being circularly adapted to each other.
This will fundamentally increase connectivity and possibly increase performance metrics
(accuracy, robustness), without significantly impacting resource metrics
(parameter size, throughput,latency, etc)
Args:
channels: Number of channels of the input feature map for use in adaptive kernel sizes
for actual calculations according to channel.
gamma, beta: when channel is given parameters of mapping function
refer to original paper https://arxiv.org/pdf/1910.03151.pdf
(default=None. if channel size not given, use k_size given for kernel size.)
kernel_size: Adaptive selection of kernel size (default=3)
"""
def __init__(self, channels=None, kernel_size=3, gamma=2, beta=1):
super(CecaModuleNew, self).__init__()
assert kernel_size % 2 == 1
if channels is not None:
t = int(abs(math.log(channels, 2) + beta) / gamma)
kernel_size = max(t if t % 2 else t + 1, 3)
self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=0,
bias=False)
self.padding = (kernel_size - 1) // 2
def forward(self, input_0):
primals_2 = self.conv.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
dumpmemory/NonDeepNetworks
|
CecaModule
| false
| 15,243
|
[
"BSD-3-Clause"
] | 307
|
5513bf588f4e64c99583440507232675c2e21e34
|
https://github.com/dumpmemory/NonDeepNetworks/tree/5513bf588f4e64c99583440507232675c2e21e34
|
AE
|
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import Linear
class AE(nn.Module):
def __init__(self, n_enc_1, n_enc_2, n_enc_3, n_dec_1, n_dec_2, n_dec_3,
n_input, n_z):
super(AE, self).__init__()
self.enc_1 = Linear(n_input, n_enc_1)
self.enc_2 = Linear(n_enc_1, n_enc_2)
self.enc_3 = Linear(n_enc_2, n_enc_3)
self.z_layer = Linear(n_enc_3, n_z)
self.dec_1 = Linear(n_z, n_dec_1)
self.dec_2 = Linear(n_dec_1, n_dec_2)
self.dec_3 = Linear(n_dec_2, n_dec_3)
self.x_bar_layer = Linear(n_dec_3, n_input)
def forward(self, x):
enc_h1 = F.relu(self.enc_1(x))
enc_h2 = F.relu(self.enc_2(enc_h1))
enc_h3 = F.relu(self.enc_3(enc_h2))
z = self.z_layer(enc_h3)
dec_h1 = F.relu(self.dec_1(z))
dec_h2 = F.relu(self.dec_2(dec_h1))
dec_h3 = F.relu(self.dec_3(dec_h2))
x_bar = self.x_bar_layer(dec_h3)
return x_bar, enc_h1, enc_h2, enc_h3, z
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_enc_1': 4, 'n_enc_2': 4, 'n_enc_3': 4, 'n_dec_1': 4,
'n_dec_2': 4, 'n_dec_3': 4, 'n_input': 4, 'n_z': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from torch.nn import Linear
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4, 4), (4, 1))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (4, 4), (4, 1))
assert_size_stride(primals_15, (4,), (1,))
assert_size_stride(primals_16, (4, 4), (4, 1))
assert_size_stride(primals_17, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(256)](buf1, primals_2, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused_relu_0[grid(256)](buf3, primals_5, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
triton_poi_fused_relu_0[grid(256)](buf5, primals_7, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf6)
del primals_9
buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(buf6, reinterpret_tensor(primals_10, (4, 4), (1,
4), 0), out=buf7)
buf8 = reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf7
buf16 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(256)](buf8,
primals_11, buf16, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_11
buf9 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf8, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_12, (4, 4), (1, 4), 0), out=buf9)
buf10 = reinterpret_tensor(buf9, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf9
buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(256)](buf10,
primals_13, buf15, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_13
buf11 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf10, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_14, (4, 4), (1, 4), 0), out=buf11)
buf12 = reinterpret_tensor(buf11, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf11
buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(256)](buf12,
primals_15, buf14, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_15
buf13 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_17, reinterpret_tensor(buf12, (64, 4),
(4, 1), 0), reinterpret_tensor(primals_16, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf13)
del primals_17
return (reinterpret_tensor(buf13, (4, 4, 4, 4), (64, 16, 4, 1), 0),
buf1, buf3, buf5, reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4,
1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1,
buf3, buf5, buf6, reinterpret_tensor(buf8, (64, 4), (4, 1), 0),
reinterpret_tensor(buf10, (64, 4), (4, 1), 0), reinterpret_tensor(
buf12, (64, 4), (4, 1), 0), primals_16, buf14, primals_14, buf15,
primals_12, buf16, primals_10, primals_8, primals_6, primals_4)
class AENew(nn.Module):
def __init__(self, n_enc_1, n_enc_2, n_enc_3, n_dec_1, n_dec_2, n_dec_3,
n_input, n_z):
super(AENew, self).__init__()
self.enc_1 = Linear(n_input, n_enc_1)
self.enc_2 = Linear(n_enc_1, n_enc_2)
self.enc_3 = Linear(n_enc_2, n_enc_3)
self.z_layer = Linear(n_enc_3, n_z)
self.dec_1 = Linear(n_z, n_dec_1)
self.dec_2 = Linear(n_dec_1, n_dec_2)
self.dec_3 = Linear(n_dec_2, n_dec_3)
self.x_bar_layer = Linear(n_dec_3, n_input)
def forward(self, input_0):
primals_1 = self.enc_1.weight
primals_2 = self.enc_1.bias
primals_4 = self.enc_2.weight
primals_5 = self.enc_2.bias
primals_6 = self.enc_3.weight
primals_7 = self.enc_3.bias
primals_8 = self.z_layer.weight
primals_9 = self.z_layer.bias
primals_10 = self.dec_1.weight
primals_11 = self.dec_1.bias
primals_12 = self.dec_2.weight
primals_13 = self.dec_2.bias
primals_14 = self.dec_3.weight
primals_15 = self.dec_3.bias
primals_16 = self.x_bar_layer.weight
primals_17 = self.x_bar_layer.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17])
return output[0], output[1], output[2], output[3], output[4]
|
drzhang3/SDCN
|
AE
| false
| 15,244
|
[
"Apache-2.0"
] | 146
|
3d11365bcb4af2cbe9625362737f1224aeea3b72
|
https://github.com/drzhang3/SDCN/tree/3d11365bcb4af2cbe9625362737f1224aeea3b72
|
ConvSqu
|
import torch
import torch.utils.data
import torch.nn as nn
import torch.nn.parallel
from torch import optim as optim
def autopad(k, p=None):
if p is None:
p = k // 2 if isinstance(k, int) else [(x // 2) for x in k]
return p
class ConvSqu(nn.Module):
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):
super(ConvSqu, self).__init__()
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False
)
self.act = nn.SiLU() if act else nn.Identity()
def forward(self, x):
return self.act(self.conv(x))
def fuseforward(self, x):
return self.act(self.conv(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'c1': 4, 'c2': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
import torch.nn as nn
import torch.nn.parallel
from torch import optim as optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_silu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_silu_0[grid(256)](buf0, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
return buf1, primals_1, primals_2, buf0
def autopad(k, p=None):
if p is None:
p = k // 2 if isinstance(k, int) else [(x // 2) for x in k]
return p
class ConvSquNew(nn.Module):
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):
super(ConvSquNew, self).__init__()
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False
)
self.act = nn.SiLU() if act else nn.Identity()
def fuseforward(self, x):
return self.act(self.conv(x))
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
dumpmemory/NonDeepNetworks
|
ConvSqu
| false
| 15,245
|
[
"BSD-3-Clause"
] | 307
|
5513bf588f4e64c99583440507232675c2e21e34
|
https://github.com/dumpmemory/NonDeepNetworks/tree/5513bf588f4e64c99583440507232675c2e21e34
|
DeepActor
|
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Normal
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1.0 / np.sqrt(fan_in)
return -lim, lim
class DeepActor(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, device, hidden_size=
32, init_w=0.003, log_std_min=-20, log_std_max=2):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
fc2_units (int): Number of nodes in second hidden layer
"""
super(DeepActor, self).__init__()
self.seed = torch.manual_seed(seed)
self.device = device
self.log_std_min = log_std_min
self.log_std_max = log_std_max
in_dim = hidden_size + state_size
self.fc1 = nn.Linear(state_size, hidden_size)
self.fc2 = nn.Linear(in_dim, hidden_size)
self.fc3 = nn.Linear(in_dim, hidden_size)
self.fc4 = nn.Linear(in_dim, hidden_size)
self.mu = nn.Linear(hidden_size, action_size)
self.log_std_linear = nn.Linear(hidden_size, action_size)
def reset_parameters(self, init_w=0.003):
self.fc1.weight.data.uniform_(*hidden_init(self.fc1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(*hidden_init(self.fc3))
self.fc4.weight.data.uniform_(*hidden_init(self.fc4))
self.mu.weight.data.uniform_(-init_w, init_w)
self.log_std_linear.weight.data.uniform_(-init_w, init_w)
def forward(self, state: 'torch.tensor') ->(float, float):
x = F.relu(self.fc1(state))
x = torch.cat([x, state], dim=1)
x = F.relu(self.fc2(x))
x = torch.cat([x, state], dim=1)
x = F.relu(self.fc3(x))
x = torch.cat([x, state], dim=1)
x = F.relu(self.fc4(x))
mu = self.mu(x)
log_std = self.log_std_linear(x)
log_std = torch.clamp(log_std, self.log_std_min, self.log_std_max)
return mu, log_std
def evaluate(self, state, epsilon=1e-06):
mu, log_std = self.forward(state)
std = log_std.exp()
dist = Normal(mu, std)
e = dist.rsample()
action = torch.tanh(e)
log_prob = (dist.log_prob(e) - torch.log(1 - action.pow(2) + epsilon)
).sum(1, keepdim=True)
return action, log_prob
def get_action(self, state):
"""
returns the action based on a squashed gaussian policy. That means the samples are obtained according to:
a(s,e)= tanh(mu(s)+sigma(s)+e)
"""
mu, log_std = self.forward(state)
std = log_std.exp()
dist = Normal(mu, std)
e = dist.rsample()
action = torch.tanh(e)
return action.detach().cpu()
def get_det_action(self, state):
mu, _log_std = self.forward(state)
return torch.tanh(mu).detach().cpu()
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'state_size': 4, 'action_size': 4, 'seed': 4, 'device': 0}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import numpy as np
import torch.nn as nn
from torch.distributions import Normal
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 36
x1 = xindex // 36
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (32 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tl.full([1], 36, tl.int64)
tmp15 = tl.load(in_ptr2 + (4 * x1 + (-32 + x0)), tmp12 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tl.where(tmp4, tmp11, tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_clamp_ge_le_logical_and_2(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = -20.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 2.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = tmp2 >= tmp3
tmp8 = tmp2 <= tmp5
tmp9 = tmp7 & tmp8
tl.store(out_ptr0 + x2, tmp6, xmask)
tl.store(out_ptr1 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_3(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (32, 4), (4, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (32, 36), (36, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (32, 36), (36, 1))
assert_size_stride(primals_7, (32,), (1,))
assert_size_stride(primals_8, (32, 36), (36, 1))
assert_size_stride(primals_9, (32,), (1,))
assert_size_stride(primals_10, (4, 32), (32, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4, 32), (32, 1))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 32), (32, 1), torch.float32)
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 32),
(1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 36), (36, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(144)](buf0, primals_2, primals_3, buf1,
144, XBLOCK=128, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((4, 32), (32, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (36, 32), (1,
36), 0), out=buf2)
buf3 = empty_strided_cuda((4, 36), (36, 1), torch.float32)
triton_poi_fused_cat_0[grid(144)](buf2, primals_5, primals_3, buf3,
144, XBLOCK=128, num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((4, 32), (32, 1), torch.float32)
extern_kernels.mm(buf3, reinterpret_tensor(primals_6, (36, 32), (1,
36), 0), out=buf4)
buf5 = empty_strided_cuda((4, 36), (36, 1), torch.float32)
triton_poi_fused_cat_0[grid(144)](buf4, primals_7, primals_3, buf5,
144, XBLOCK=128, num_warps=4, num_stages=1)
buf6 = empty_strided_cuda((4, 32), (32, 1), torch.float32)
extern_kernels.mm(buf5, reinterpret_tensor(primals_8, (36, 32), (1,
36), 0), out=buf6)
buf7 = buf6
del buf6
triton_poi_fused_relu_1[grid(128)](buf7, primals_9, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_9
buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_11, buf7, reinterpret_tensor(
primals_10, (32, 4), (1, 32), 0), alpha=1, beta=1, out=buf8)
del primals_11
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf7, reinterpret_tensor(primals_12, (32, 4), (1,
32), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf11 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_clamp_ge_le_logical_and_2[grid(16)](buf9,
primals_13, buf10, buf11, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf9
del primals_13
buf12 = empty_strided_cuda((4, 32), (32, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_3[grid(128)](buf4,
primals_7, buf12, 128, XBLOCK=128, num_warps=4, num_stages=1)
del buf4
del primals_7
buf13 = empty_strided_cuda((4, 32), (32, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_3[grid(128)](buf2,
primals_5, buf13, 128, XBLOCK=128, num_warps=4, num_stages=1)
del buf2
del primals_5
buf14 = empty_strided_cuda((4, 32), (32, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_3[grid(128)](buf0,
primals_2, buf14, 128, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del primals_2
return (buf8, buf10, primals_3, buf1, buf3, buf5, buf7, buf11,
primals_12, primals_10, primals_8, buf12, primals_6, buf13,
primals_4, buf14)
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1.0 / np.sqrt(fan_in)
return -lim, lim
class DeepActorNew(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, device, hidden_size=
32, init_w=0.003, log_std_min=-20, log_std_max=2):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
fc2_units (int): Number of nodes in second hidden layer
"""
super(DeepActorNew, self).__init__()
self.seed = torch.manual_seed(seed)
self.device = device
self.log_std_min = log_std_min
self.log_std_max = log_std_max
in_dim = hidden_size + state_size
self.fc1 = nn.Linear(state_size, hidden_size)
self.fc2 = nn.Linear(in_dim, hidden_size)
self.fc3 = nn.Linear(in_dim, hidden_size)
self.fc4 = nn.Linear(in_dim, hidden_size)
self.mu = nn.Linear(hidden_size, action_size)
self.log_std_linear = nn.Linear(hidden_size, action_size)
def reset_parameters(self, init_w=0.003):
self.fc1.weight.data.uniform_(*hidden_init(self.fc1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(*hidden_init(self.fc3))
self.fc4.weight.data.uniform_(*hidden_init(self.fc4))
self.mu.weight.data.uniform_(-init_w, init_w)
self.log_std_linear.weight.data.uniform_(-init_w, init_w)
def evaluate(self, state, epsilon=1e-06):
mu, log_std = self.forward(state)
std = log_std.exp()
dist = Normal(mu, std)
e = dist.rsample()
action = torch.tanh(e)
log_prob = (dist.log_prob(e) - torch.log(1 - action.pow(2) + epsilon)
).sum(1, keepdim=True)
return action, log_prob
def get_action(self, state):
"""
returns the action based on a squashed gaussian policy. That means the samples are obtained according to:
a(s,e)= tanh(mu(s)+sigma(s)+e)
"""
mu, log_std = self.forward(state)
std = log_std.exp()
dist = Normal(mu, std)
e = dist.rsample()
action = torch.tanh(e)
return action.detach().cpu()
def get_det_action(self, state):
mu, _log_std = self.forward(state)
return torch.tanh(mu).detach().cpu()
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_8 = self.fc4.weight
primals_9 = self.fc4.bias
primals_10 = self.mu.weight
primals_11 = self.mu.bias
primals_12 = self.log_std_linear.weight
primals_13 = self.log_std_linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0], output[1]
|
drib861204/Soft-Actor-Critic-and-Extensions
|
DeepActor
| false
| 15,246
|
[
"MIT"
] | 143
|
3075df7430c1c49177b3798d753a9e3f6226672e
|
https://github.com/drib861204/Soft-Actor-Critic-and-Extensions/tree/3075df7430c1c49177b3798d753a9e3f6226672e
|
AdaILN
|
import torch
import torch.nn as nn
import torch.utils.cpp_extension
class AdaILN(nn.Module):
def __init__(self, channels, resl, eps=1e-08):
super().__init__()
self.rho = nn.Parameter(torch.Tensor(1, channels, 1, 1))
self.rho.data.fill_(1.0)
self.instance_norm = nn.InstanceNorm2d(channels, eps=eps, affine=False)
self.layer_norm = nn.LayerNorm((channels, resl, resl), eps=eps,
elementwise_affine=False)
def forward(self, x, gamma, beta):
i_norm = self.instance_norm(x)
l_norm = self.layer_norm(x)
out = i_norm * self.rho.expand(x.size(0), -1, -1, -1) + l_norm * (1 -
self.rho.expand(x.size(0), -1, -1, -1))
out = out * gamma.view(out.size(0), -1, 1, 1) + beta.view(out.size(
0), -1, 1, 1)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'channels': 4, 'resl': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils.cpp_extension
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_native_layer_norm_0(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 64.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-08
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp21, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_mul_native_layer_norm_rsub_1(
in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5,
out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
x3 = xindex // 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr3 + x3, xmask, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp36 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 16.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-08
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tmp22 = tmp0 - tmp10
tmp23 = tmp22 * tmp21
tmp25 = tmp23 * tmp24
tmp27 = tmp0 - tmp26
tmp29 = tmp27 * tmp28
tmp30 = 1.0
tmp31 = tmp30 - tmp24
tmp32 = tmp29 * tmp31
tmp33 = tmp25 + tmp32
tmp35 = tmp33 * tmp34
tmp37 = tmp35 + tmp36
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp21, xmask)
tl.store(out_ptr1 + (r1 + 16 * x0), tmp37, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf4 = empty_strided_cuda((4, 1, 1, 1), (1, 1, 1, 1), torch.float32)
buf5 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf7 = reinterpret_tensor(buf5, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf5
get_raw_stream(0)
triton_per_fused_native_layer_norm_0[grid(4)](buf7, primals_1, buf4,
4, 64, XBLOCK=1, num_warps=2, num_stages=1)
buf0 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf1 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf3 = reinterpret_tensor(buf1, (1, 16, 1, 1), (16, 1, 1, 1), 0)
del buf1
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_per_fused__native_batch_norm_legit_add_mul_native_layer_norm_rsub_1[
grid(16)](buf3, primals_1, primals_2, buf4, buf7, primals_3,
primals_4, buf0, buf8, 16, 16, XBLOCK=8, num_warps=2, num_stages=1)
del primals_2
del primals_4
return buf8, primals_1, primals_3, buf0, buf3, buf4, buf7
class AdaILNNew(nn.Module):
def __init__(self, channels, resl, eps=1e-08):
super().__init__()
self.rho = nn.Parameter(torch.Tensor(1, channels, 1, 1))
self.rho.data.fill_(1.0)
self.instance_norm = nn.InstanceNorm2d(channels, eps=eps, affine=False)
self.layer_norm = nn.LayerNorm((channels, resl, resl), eps=eps,
elementwise_affine=False)
def forward(self, input_0, input_1, input_2):
primals_2 = self.rho
primals_1 = input_0
primals_3 = input_1
primals_4 = input_2
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
STomoya/animeface
|
AdaILN
| false
| 15,247
|
[
"MIT"
] | 61
|
37b3cd26097d7874559d4c152e41e5712b7a1a42
|
https://github.com/STomoya/animeface/tree/37b3cd26097d7874559d4c152e41e5712b7a1a42
|
DiceLossV1
|
import torch
from torch import nn
class DiceLossV1(nn.Module):
def __init__(self, reduction='mean', epsilon=1e-09):
"""【ERROR, 不收敛-原因未知】Dice-Loss, 切块损失, 用于不均衡数据, 但是收敛困难
paper: Dice Loss for Data-imbalanced NLP Tasks
url: https://arxiv.org/pdf/1911.02855.pdf
args:
reduction: str, Specifies the reduction to apply to the output, 输出形式.
eg.``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``
epsilon: float, Minimum of maths, 无穷小. eg. 1e-9
returns:
Tensor of loss.
examples:
>>> label, logits = [[1, 1, 1, 1], [0, 0, 0, 1]], [[0, 1, 1, 0], [1, 0, 0, 1],]
>>> label, logits = torch.tensor(label).float(), torch.tensor(logits).float()
>>> loss = DiceLoss()(logits, label)
"""
super(DiceLossV1, self).__init__()
self.reduction = reduction
self.epsilon = epsilon
def forward(self, logits, labels):
prob = torch.sigmoid(logits)
index = labels.unsqueeze(1).view(prob.size(0), -1)
prob = torch.gather(prob, dim=1, index=index)
dsc_i = 1 - ((1 - prob) * prob + self.epsilon) / ((1 - prob) * prob +
1 + self.epsilon)
if 'mean' == self.reduction:
loss = dsc_i.mean()
else:
loss = dsc_i.sum()
return loss
def get_inputs():
return [torch.ones([4, 4], dtype=torch.int64), torch.ones([4], dtype=
torch.int64)]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_gather_mean_mul_rsub_sigmoid_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 4),
'index out of bounds: 0 <= tmp4 < 4')
tmp6 = tl.load(in_ptr1 + (tmp4 + 4 * r0), None, eviction_policy=
'evict_last')
tmp7 = tmp6.to(tl.float32)
tmp8 = tl.sigmoid(tmp7)
tmp9 = 1.0
tmp10 = tmp9 - tmp8
tmp11 = tmp10 * tmp8
tmp12 = 1e-09
tmp13 = tmp11 + tmp12
tmp14 = tmp11 + tmp9
tmp15 = tmp14 + tmp12
tmp16 = tmp13 / tmp15
tmp17 = tmp9 - tmp16
tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK])
tmp20 = tl.sum(tmp18, 1)[:, None]
tmp21 = 4.0
tmp22 = tmp20 / tmp21
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp22, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_gather_mean_mul_rsub_sigmoid_0[grid(1)](buf1,
arg1_1, arg0_1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class DiceLossV1New(nn.Module):
def __init__(self, reduction='mean', epsilon=1e-09):
"""【ERROR, 不收敛-原因未知】Dice-Loss, 切块损失, 用于不均衡数据, 但是收敛困难
paper: Dice Loss for Data-imbalanced NLP Tasks
url: https://arxiv.org/pdf/1911.02855.pdf
args:
reduction: str, Specifies the reduction to apply to the output, 输出形式.
eg.``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``
epsilon: float, Minimum of maths, 无穷小. eg. 1e-9
returns:
Tensor of loss.
examples:
>>> label, logits = [[1, 1, 1, 1], [0, 0, 0, 1]], [[0, 1, 1, 0], [1, 0, 0, 1],]
>>> label, logits = torch.tensor(label).float(), torch.tensor(logits).float()
>>> loss = DiceLoss()(logits, label)
"""
super(DiceLossV1New, self).__init__()
self.reduction = reduction
self.epsilon = epsilon
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
dumpmemory/Pytorch-NLU
|
DiceLossV1
| false
| 15,248
|
[
"Apache-2.0"
] | 115
|
864fb9acc7751fc51abd3d05d24b5a9a7eab7110
|
https://github.com/dumpmemory/Pytorch-NLU/tree/864fb9acc7751fc51abd3d05d24b5a9a7eab7110
|
HighwayLayer
|
import torch
from torch import nn
import torch.utils.data
import torch.utils.data.distributed
import torch.utils.checkpoint
import torch.utils.tensorboard
def my_xavier_init(m, gain=1):
"""Xavier initialization: weights initialization that tries to make variance of outputs
of a layer equal to variance of its inputs.
"""
for p in m.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p, gain)
else:
nn.init.constant_(p, 0)
class HighwayLayer(torch.nn.Module):
"""Highway transformation used in span prediction."""
def __init__(self, dim):
super(HighwayLayer, self).__init__()
self.gate_proj = nn.Linear(dim, dim, bias=True)
self.nlin_proj = nn.Linear(dim, dim, bias=True)
my_xavier_init(self.nlin_proj)
my_xavier_init(self.gate_proj)
nn.init.constant_(self.gate_proj.bias, -1)
def forward(self, x):
gate = torch.sigmoid(self.gate_proj(x))
nlin = torch.tanh(self.nlin_proj(x))
res = gate * nlin + (1 - gate) * x
return res
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
import torch.utils.data
import torch.utils.data.distributed
import torch.utils.checkpoint
import torch.utils.tensorboard
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_tanh_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp7 = tl.load(in_ptr2 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = libdevice.tanh(tmp2)
tmp4 = tmp1 * tmp3
tmp5 = 1.0
tmp6 = tmp5 - tmp1
tmp8 = tmp6 * tmp7
tmp9 = tmp4 + tmp8
tl.store(out_ptr0 + x0, tmp9, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_rsub_sigmoid_tanh_0[grid(256)](buf0, buf1,
primals_3, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
return buf2, primals_3, buf0, buf1
def my_xavier_init(m, gain=1):
"""Xavier initialization: weights initialization that tries to make variance of outputs
of a layer equal to variance of its inputs.
"""
for p in m.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p, gain)
else:
nn.init.constant_(p, 0)
class HighwayLayerNew(torch.nn.Module):
"""Highway transformation used in span prediction."""
def __init__(self, dim):
super(HighwayLayerNew, self).__init__()
self.gate_proj = nn.Linear(dim, dim, bias=True)
self.nlin_proj = nn.Linear(dim, dim, bias=True)
my_xavier_init(self.nlin_proj)
my_xavier_init(self.gate_proj)
nn.init.constant_(self.gate_proj.bias, -1)
def forward(self, input_0):
primals_1 = self.gate_proj.weight
primals_2 = self.gate_proj.bias
primals_4 = self.nlin_proj.weight
primals_5 = self.nlin_proj.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
ali-senguel/fairo
|
HighwayLayer
| false
| 15,249
|
[
"MIT"
] | 669
|
1ec5d8ecbdfc782de63a92aad9bf8534110ce762
|
https://github.com/ali-senguel/fairo/tree/1ec5d8ecbdfc782de63a92aad9bf8534110ce762
|
EcaModule
|
import math
import torch
import torch.utils.data
import torch.nn as nn
import torch.nn.parallel
from torch import optim as optim
class EcaModule(nn.Module):
"""Constructs an ECA module.
Args:
channels: Number of channels of the input feature map for use in adaptive kernel sizes
for actual calculations according to channel.
gamma, beta: when channel is given parameters of mapping function
refer to original paper https://arxiv.org/pdf/1910.03151.pdf
(default=None. if channel size not given, use k_size given for kernel size.)
kernel_size: Adaptive selection of kernel size (default=3)
"""
def __init__(self, channels=None, kernel_size=3, gamma=2, beta=1):
super(EcaModule, self).__init__()
assert kernel_size % 2 == 1
if channels is not None:
t = int(abs(math.log(channels, 2) + beta) / gamma)
kernel_size = max(t if t % 2 else t + 1, 3)
self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=(
kernel_size - 1) // 2, bias=False)
def forward(self, x):
y = x.mean((2, 3)).view(x.shape[0], 1, -1)
y = self.conv(y)
y = y.view(x.shape[0], -1, 1, 1).sigmoid()
return x * y.expand_as(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import torch.utils.data
import torch.nn as nn
import torch.nn.parallel
from torch import optim as optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 1, 3), (3, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (4, 1, 4
), (4, 0, 1), 0), primals_2, stride=(1,), padding=(1,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf2, (4, 1, 4), (4, 4, 1))
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_1[grid(256)](primals_1, buf2, buf3, 256,
XBLOCK=256, num_warps=4, num_stages=1)
return buf3, primals_1, primals_2, reinterpret_tensor(buf1, (4, 1, 4),
(4, 4, 1), 0), buf2
class EcaModuleNew(nn.Module):
"""Constructs an ECA module.
Args:
channels: Number of channels of the input feature map for use in adaptive kernel sizes
for actual calculations according to channel.
gamma, beta: when channel is given parameters of mapping function
refer to original paper https://arxiv.org/pdf/1910.03151.pdf
(default=None. if channel size not given, use k_size given for kernel size.)
kernel_size: Adaptive selection of kernel size (default=3)
"""
def __init__(self, channels=None, kernel_size=3, gamma=2, beta=1):
super(EcaModuleNew, self).__init__()
assert kernel_size % 2 == 1
if channels is not None:
t = int(abs(math.log(channels, 2) + beta) / gamma)
kernel_size = max(t if t % 2 else t + 1, 3)
self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=(
kernel_size - 1) // 2, bias=False)
def forward(self, input_0):
primals_2 = self.conv.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
dumpmemory/NonDeepNetworks
|
EcaModule
| false
| 15,250
|
[
"BSD-3-Clause"
] | 307
|
5513bf588f4e64c99583440507232675c2e21e34
|
https://github.com/dumpmemory/NonDeepNetworks/tree/5513bf588f4e64c99583440507232675c2e21e34
|
DownConv
|
import torch
import torch.nn as nn
import torch.nn.functional as F
def conv3x3(in_channels, out_channels, stride=1, padding=1, bias=True, groups=1
):
return nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=
stride, padding=padding, bias=bias, groups=groups)
class DownConv(nn.Module):
"""
A helper Module that performs 2 convolutions and 1 MaxPool.
A ReLU activation follows each convolution.
"""
def __init__(self, in_channels, out_channels, pooling=True):
super(DownConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.pooling = pooling
self.conv1 = conv3x3(self.in_channels, self.out_channels)
self.conv2 = conv3x3(self.out_channels, self.out_channels)
if self.pooling:
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
before_pool = x
if self.pooling:
x = self.pool(x)
return x, before_pool
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(256)](buf1, primals_2, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_0[grid(256)](buf3, primals_5, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(64)](buf3, buf4,
buf5, 64, XBLOCK=64, num_warps=1, num_stages=1)
return buf4, buf3, primals_1, primals_3, primals_4, buf1, buf3, buf5
def conv3x3(in_channels, out_channels, stride=1, padding=1, bias=True, groups=1
):
return nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=
stride, padding=padding, bias=bias, groups=groups)
class DownConvNew(nn.Module):
"""
A helper Module that performs 2 convolutions and 1 MaxPool.
A ReLU activation follows each convolution.
"""
def __init__(self, in_channels, out_channels, pooling=True):
super(DownConvNew, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.pooling = pooling
self.conv1 = conv3x3(self.in_channels, self.out_channels)
self.conv2 = conv3x3(self.out_channels, self.out_channels)
if self.pooling:
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1]
|
duchn92/transfer-object
|
DownConv
| false
| 15,251
|
[
"MIT"
] | 80
|
4db96931545ac0d28891375fbca3c0a5a382fb32
|
https://github.com/duchn92/transfer-object/tree/4db96931545ac0d28891375fbca3c0a5a382fb32
|
LabelSmoothingCrossEntropy
|
import torch
from torch import nn
class LabelSmoothingCrossEntropy(nn.Module):
def __init__(self, eps=0.1, reduction='mean', ignore_index=-100):
"""LabelSmoothingCrossEntropy, no-softmax-input
对logits进行smoothing, 即log_softmax后进行操作
args:
ignore_index: (int, optional): Specifies a target value that is ignored and does not contribute to the input gradient. Default: -100
reduction: str, Specifies the reduction to apply to the output, 输出形式.
eg.``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``
eps: float, Minimum of maths, 极小值. eg. 0.1
returns:
Tensor of loss.
examples:
>>> loss = LabelSmoothingCrossEntropy()(logits, label)
"""
super(LabelSmoothingCrossEntropy, self).__init__()
self.ignore_index = ignore_index
self.reduction = reduction
self.eps = eps
def forward(self, logits, labels):
V = max(logits.size()[-1] - 1, 1)
loss = (1 - self.eps) * -(labels * torch.nn.functional.logsigmoid(
logits) + (1 - labels) * torch.nn.functional.logsigmoid(-logits)
) + self.eps / V
loss = loss.sum(dim=1) / logits.size(1)
if 'mean' == self.reduction:
loss = loss.mean()
elif 'sum' == self.reduction:
loss = loss.sum()
else:
_
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_log_sigmoid_forward_mean_mul_neg_rsub_sum_0(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp26 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp27 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp49 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp50 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp72 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp73 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp2 = 0.0
tmp3 = triton_helpers.minimum(tmp2, tmp1)
tmp4 = tl_math.abs(tmp1)
tmp5 = -tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = libdevice.log1p(tmp6)
tmp8 = tmp3 - tmp7
tmp9 = tmp0 * tmp8
tmp10 = 1.0
tmp11 = tmp10 - tmp0
tmp12 = -tmp1
tmp13 = triton_helpers.minimum(tmp2, tmp12)
tmp14 = tl_math.abs(tmp12)
tmp15 = -tmp14
tmp16 = tl_math.exp(tmp15)
tmp17 = libdevice.log1p(tmp16)
tmp18 = tmp13 - tmp17
tmp19 = tmp11 * tmp18
tmp20 = tmp9 + tmp19
tmp21 = -tmp20
tmp22 = 0.9
tmp23 = tmp21 * tmp22
tmp24 = 0.03333333333333333
tmp25 = tmp23 + tmp24
tmp28 = triton_helpers.minimum(tmp2, tmp27)
tmp29 = tl_math.abs(tmp27)
tmp30 = -tmp29
tmp31 = tl_math.exp(tmp30)
tmp32 = libdevice.log1p(tmp31)
tmp33 = tmp28 - tmp32
tmp34 = tmp26 * tmp33
tmp35 = tmp10 - tmp26
tmp36 = -tmp27
tmp37 = triton_helpers.minimum(tmp2, tmp36)
tmp38 = tl_math.abs(tmp36)
tmp39 = -tmp38
tmp40 = tl_math.exp(tmp39)
tmp41 = libdevice.log1p(tmp40)
tmp42 = tmp37 - tmp41
tmp43 = tmp35 * tmp42
tmp44 = tmp34 + tmp43
tmp45 = -tmp44
tmp46 = tmp45 * tmp22
tmp47 = tmp46 + tmp24
tmp48 = tmp25 + tmp47
tmp51 = triton_helpers.minimum(tmp2, tmp50)
tmp52 = tl_math.abs(tmp50)
tmp53 = -tmp52
tmp54 = tl_math.exp(tmp53)
tmp55 = libdevice.log1p(tmp54)
tmp56 = tmp51 - tmp55
tmp57 = tmp49 * tmp56
tmp58 = tmp10 - tmp49
tmp59 = -tmp50
tmp60 = triton_helpers.minimum(tmp2, tmp59)
tmp61 = tl_math.abs(tmp59)
tmp62 = -tmp61
tmp63 = tl_math.exp(tmp62)
tmp64 = libdevice.log1p(tmp63)
tmp65 = tmp60 - tmp64
tmp66 = tmp58 * tmp65
tmp67 = tmp57 + tmp66
tmp68 = -tmp67
tmp69 = tmp68 * tmp22
tmp70 = tmp69 + tmp24
tmp71 = tmp48 + tmp70
tmp74 = triton_helpers.minimum(tmp2, tmp73)
tmp75 = tl_math.abs(tmp73)
tmp76 = -tmp75
tmp77 = tl_math.exp(tmp76)
tmp78 = libdevice.log1p(tmp77)
tmp79 = tmp74 - tmp78
tmp80 = tmp72 * tmp79
tmp81 = tmp10 - tmp72
tmp82 = -tmp73
tmp83 = triton_helpers.minimum(tmp2, tmp82)
tmp84 = tl_math.abs(tmp82)
tmp85 = -tmp84
tmp86 = tl_math.exp(tmp85)
tmp87 = libdevice.log1p(tmp86)
tmp88 = tmp83 - tmp87
tmp89 = tmp81 * tmp88
tmp90 = tmp80 + tmp89
tmp91 = -tmp90
tmp92 = tmp91 * tmp22
tmp93 = tmp92 + tmp24
tmp94 = tmp71 + tmp93
tmp95 = 0.25
tmp96 = tmp94 * tmp95
tmp97 = tl.broadcast_to(tmp96, [XBLOCK, RBLOCK])
tmp99 = tl.sum(tmp97, 1)[:, None]
tmp100 = 64.0
tmp101 = tmp99 / tmp100
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp101, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_add_div_log_sigmoid_forward_mean_mul_neg_rsub_sum_0[
grid(1)](buf2, arg1_1, arg0_1, 1, 64, XBLOCK=1, num_warps=2,
num_stages=1)
del arg0_1
del arg1_1
return buf2,
class LabelSmoothingCrossEntropyNew(nn.Module):
def __init__(self, eps=0.1, reduction='mean', ignore_index=-100):
"""LabelSmoothingCrossEntropy, no-softmax-input
对logits进行smoothing, 即log_softmax后进行操作
args:
ignore_index: (int, optional): Specifies a target value that is ignored and does not contribute to the input gradient. Default: -100
reduction: str, Specifies the reduction to apply to the output, 输出形式.
eg.``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``
eps: float, Minimum of maths, 极小值. eg. 0.1
returns:
Tensor of loss.
examples:
>>> loss = LabelSmoothingCrossEntropy()(logits, label)
"""
super(LabelSmoothingCrossEntropyNew, self).__init__()
self.ignore_index = ignore_index
self.reduction = reduction
self.eps = eps
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
dumpmemory/Pytorch-NLU
|
LabelSmoothingCrossEntropy
| false
| 15,252
|
[
"Apache-2.0"
] | 115
|
864fb9acc7751fc51abd3d05d24b5a9a7eab7110
|
https://github.com/dumpmemory/Pytorch-NLU/tree/864fb9acc7751fc51abd3d05d24b5a9a7eab7110
|
lstm_cell
|
import torch
import torch.nn as nn
class lstm_cell(nn.Module):
def __init__(self, input_num, hidden_num):
super(lstm_cell, self).__init__()
self.input_num = input_num
self.hidden_num = hidden_num
self.Wxi = nn.Linear(self.input_num, self.hidden_num, bias=True)
self.Whi = nn.Linear(self.hidden_num, self.hidden_num, bias=False)
self.Wxf = nn.Linear(self.input_num, self.hidden_num, bias=True)
self.Whf = nn.Linear(self.hidden_num, self.hidden_num, bias=False)
self.Wxc = nn.Linear(self.input_num, self.hidden_num, bias=True)
self.Whc = nn.Linear(self.hidden_num, self.hidden_num, bias=False)
self.Wxo = nn.Linear(self.input_num, self.hidden_num, bias=True)
self.Who = nn.Linear(self.hidden_num, self.hidden_num, bias=False)
def forward(self, xt, ht_1, ct_1):
it = torch.sigmoid(self.Wxi(xt) + self.Whi(ht_1))
ft = torch.sigmoid(self.Wxf(xt) + self.Whf(ht_1))
ot = torch.sigmoid(self.Wxo(xt) + self.Who(ht_1))
ct = ft * ct_1 + it * torch.tanh(self.Wxc(xt) + self.Whc(ht_1))
ht = ot * torch.tanh(ct)
return ht, ct
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_num': 4, 'hidden_num': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_0(in_out_ptr0,
in_out_ptr1, in_out_ptr2, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, out_ptr0, out_ptr1,
out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp6 = tl.load(in_out_ptr1 + x2, xmask)
tmp7 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr3 + x2, xmask)
tmp12 = tl.load(in_out_ptr2 + x2, xmask)
tmp13 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr5 + x2, xmask)
tmp18 = tl.load(in_ptr6 + x2, xmask)
tmp19 = tl.load(in_ptr7 + x0, xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr8 + x2, xmask)
tmp24 = tl.load(in_ptr9 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.sigmoid(tmp4)
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tmp11 = tl.sigmoid(tmp10)
tmp14 = tmp12 + tmp13
tmp16 = tmp14 + tmp15
tmp17 = libdevice.tanh(tmp16)
tmp20 = tmp18 + tmp19
tmp22 = tmp20 + tmp21
tmp23 = tl.sigmoid(tmp22)
tmp25 = tmp23 * tmp24
tmp26 = tmp5 * tmp17
tmp27 = tmp25 + tmp26
tmp28 = 1.0
tmp29 = tmp28 - tmp23
tmp30 = tmp23 * tmp29
tmp31 = libdevice.tanh(tmp27)
tmp32 = tmp11 * tmp31
tl.store(in_out_ptr0 + x2, tmp5, xmask)
tl.store(in_out_ptr1 + x2, tmp11, xmask)
tl.store(in_out_ptr2 + x2, tmp17, xmask)
tl.store(out_ptr0 + x2, tmp27, xmask)
tl.store(out_ptr1 + x2, tmp30, xmask)
tl.store(out_ptr2 + x2, tmp32, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4,), (1,))
assert_size_stride(primals_15, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_5, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf3)
del primals_6
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_5, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf4)
del primals_8
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf5)
del primals_9
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_5, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf6)
del primals_11
buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf8)
del primals_13
buf9 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_5, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf9)
del primals_15
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
buf10 = reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf8
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_0[grid(256)](
buf2, buf7, buf10, primals_2, buf1, primals_10, buf6,
primals_14, buf9, buf3, primals_7, buf4, primals_12, buf11,
buf13, buf12, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf1
del buf3
del buf4
del buf6
del buf9
del primals_10
del primals_14
del primals_2
del primals_7
return buf12, buf11, primals_12, reinterpret_tensor(primals_3, (64, 4),
(4, 1), 0), reinterpret_tensor(primals_5, (64, 4), (4, 1), 0
), buf2, buf7, buf10, buf11, buf13
class lstm_cellNew(nn.Module):
def __init__(self, input_num, hidden_num):
super(lstm_cellNew, self).__init__()
self.input_num = input_num
self.hidden_num = hidden_num
self.Wxi = nn.Linear(self.input_num, self.hidden_num, bias=True)
self.Whi = nn.Linear(self.hidden_num, self.hidden_num, bias=False)
self.Wxf = nn.Linear(self.input_num, self.hidden_num, bias=True)
self.Whf = nn.Linear(self.hidden_num, self.hidden_num, bias=False)
self.Wxc = nn.Linear(self.input_num, self.hidden_num, bias=True)
self.Whc = nn.Linear(self.hidden_num, self.hidden_num, bias=False)
self.Wxo = nn.Linear(self.input_num, self.hidden_num, bias=True)
self.Who = nn.Linear(self.hidden_num, self.hidden_num, bias=False)
def forward(self, input_0, input_1, input_2):
primals_1 = self.Wxi.weight
primals_2 = self.Wxi.bias
primals_4 = self.Whi.weight
primals_6 = self.Wxf.weight
primals_7 = self.Wxf.bias
primals_8 = self.Whf.weight
primals_9 = self.Wxc.weight
primals_10 = self.Wxc.bias
primals_11 = self.Whc.weight
primals_13 = self.Wxo.weight
primals_14 = self.Wxo.bias
primals_15 = self.Who.weight
primals_3 = input_0
primals_5 = input_1
primals_12 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15])
return output[0], output[1]
|
dreamer121121/action-recognition-models-pytorch
|
lstm_cell
| false
| 15,253
|
[
"MIT"
] | 200
|
6a8a5e9678c359f795079d1f9f3cbdb9502b363d
|
https://github.com/dreamer121121/action-recognition-models-pytorch/tree/6a8a5e9678c359f795079d1f9f3cbdb9502b363d
|
ConvSig
|
import torch
import torch.utils.data
import torch.nn as nn
import torch.nn.parallel
from torch import optim as optim
def autopad(k, p=None):
if p is None:
p = k // 2 if isinstance(k, int) else [(x // 2) for x in k]
return p
class ConvSig(nn.Module):
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):
super(ConvSig, self).__init__()
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False
)
self.act = nn.Sigmoid() if act else nn.Identity()
def forward(self, x):
return self.act(self.conv(x))
def fuseforward(self, x):
return self.act(self.conv(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'c1': 4, 'c2': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
import torch.nn as nn
import torch.nn.parallel
from torch import optim as optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_sigmoid_0[grid(256)](buf1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
return buf1, primals_1, primals_2, buf1
def autopad(k, p=None):
if p is None:
p = k // 2 if isinstance(k, int) else [(x // 2) for x in k]
return p
class ConvSigNew(nn.Module):
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):
super(ConvSigNew, self).__init__()
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False
)
self.act = nn.Sigmoid() if act else nn.Identity()
def fuseforward(self, x):
return self.act(self.conv(x))
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
dumpmemory/NonDeepNetworks
|
ConvSig
| false
| 15,254
|
[
"BSD-3-Clause"
] | 307
|
5513bf588f4e64c99583440507232675c2e21e34
|
https://github.com/dumpmemory/NonDeepNetworks/tree/5513bf588f4e64c99583440507232675c2e21e34
|
CPC
|
import torch
import torch.nn as nn
class CPC(nn.Module):
"""
Contrastive Predictive Coding: score computation. See https://arxiv.org/pdf/1807.03748.pdf.
Args:
x_size (int): embedding size of input modality representation x
y_size (int): embedding size of input modality representation y
"""
def __init__(self, x_size, y_size, n_layers=1, activation='Tanh'):
super().__init__()
self.x_size = x_size
self.y_size = y_size
self.layers = n_layers
self.activation = getattr(nn, activation)
if n_layers == 1:
self.net = nn.Linear(in_features=y_size, out_features=x_size)
else:
net = []
for i in range(n_layers):
if i == 0:
net.append(nn.Linear(self.y_size, self.x_size))
net.append(self.activation())
else:
net.append(nn.Linear(self.x_size, self.x_size))
self.net = nn.Sequential(*net)
def forward(self, x, y):
"""Calulate the score
"""
x_pred = self.net(y)
x_pred = x_pred / x_pred.norm(dim=1, keepdim=True)
x = x / x.norm(dim=1, keepdim=True)
pos = torch.sum(x * x_pred, dim=-1)
neg = torch.logsumexp(torch.matmul(x, x_pred.t()), dim=-1)
nce = -(pos - neg).mean()
return nce
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'x_size': 4, 'y_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tmp0 / tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_per_fused_logsumexp_mean_mul_neg_sub_sum_1(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr2 + 4 * r0, None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr2 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr2 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr2 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp17 = triton_helpers.maximum(tmp15, tmp16)
tmp19 = triton_helpers.maximum(tmp17, tmp18)
tmp21 = triton_helpers.maximum(tmp19, tmp20)
tmp22 = tl_math.abs(tmp21)
tmp23 = float('inf')
tmp24 = tmp22 == tmp23
tmp25 = 0.0
tmp26 = tl.where(tmp24, tmp25, tmp21)
tmp27 = tmp15 - tmp26
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp16 - tmp26
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp28 + tmp30
tmp32 = tmp18 - tmp26
tmp33 = tl_math.exp(tmp32)
tmp34 = tmp31 + tmp33
tmp35 = tmp20 - tmp26
tmp36 = tl_math.exp(tmp35)
tmp37 = tmp34 + tmp36
tmp38 = tl_math.log(tmp37)
tmp39 = tmp38 + tmp26
tmp40 = tmp14 - tmp39
tmp41 = tl.broadcast_to(tmp40, [XBLOCK, RBLOCK])
tmp43 = tl.sum(tmp41, 1)[:, None]
tmp44 = 4.0
tmp45 = tmp43 / tmp44
tmp46 = -tmp45
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp46, None)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, primals_3, reinterpret_tensor(
primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_linalg_vector_norm_0[grid(16)](buf0, buf1, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_div_linalg_vector_norm_0[grid(16)](primals_4, buf2,
16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(buf1, (4, 4), (1, 4), 0),
out=buf3)
buf5 = empty_strided_cuda((), (), torch.float32)
buf6 = buf5
del buf5
triton_per_fused_logsumexp_mean_mul_neg_sub_sum_1[grid(1)](buf6,
buf2, buf1, buf3, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del buf1
return buf6, primals_3, buf0, buf2, buf3
class CPCNew(nn.Module):
"""
Contrastive Predictive Coding: score computation. See https://arxiv.org/pdf/1807.03748.pdf.
Args:
x_size (int): embedding size of input modality representation x
y_size (int): embedding size of input modality representation y
"""
def __init__(self, x_size, y_size, n_layers=1, activation='Tanh'):
super().__init__()
self.x_size = x_size
self.y_size = y_size
self.layers = n_layers
self.activation = getattr(nn, activation)
if n_layers == 1:
self.net = nn.Linear(in_features=y_size, out_features=x_size)
else:
net = []
for i in range(n_layers):
if i == 0:
net.append(nn.Linear(self.y_size, self.x_size))
net.append(self.activation())
else:
net.append(nn.Linear(self.x_size, self.x_size))
self.net = nn.Sequential(*net)
def forward(self, input_0, input_1):
primals_1 = self.net.weight
primals_2 = self.net.bias
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
dumpmemory/Multimodal-Infomax
|
CPC
| false
| 15,255
|
[
"MIT"
] | 57
|
9a6dc8f2bfa861cd447ba65c6a037cd7dd24f473
|
https://github.com/dumpmemory/Multimodal-Infomax/tree/9a6dc8f2bfa861cd447ba65c6a037cd7dd24f473
|
GroupLinear
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
class GroupLinear(nn.Module):
"""
Group Linear operator
"""
def __init__(self, in_planes, out_channels, groups=1, bias=True):
super(GroupLinear, self).__init__()
assert in_planes % groups == 0
assert out_channels % groups == 0
self.in_dim = in_planes
self.out_dim = out_channels
self.groups = groups
self.bias = bias
self.group_in_dim = int(self.in_dim / self.groups)
self.group_out_dim = int(self.out_dim / self.groups)
self.group_weight = nn.Parameter(torch.zeros(self.groups, self.
group_in_dim, self.group_out_dim))
self.group_bias = nn.Parameter(torch.zeros(self.out_dim))
def forward(self, x):
t, b, d = x.size()
x = x.view(t, b, self.groups, int(d / self.groups))
out = torch.einsum('tbgd,gdf->tbgf', (x, self.group_weight)).reshape(t,
b, self.out_dim) + self.group_bias
return out
def extra_repr(self):
s = '{in_dim}, {out_dim}'
if self.groups != 1:
s += ', groups={groups}'
if self.bias is None:
s += ', bias=False'
return s.format(**self.__dict__)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_planes': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 16, 4), (64, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_1, (1, 16, 4), (64, 4,
1), 0), primals_2, out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_add_0[grid(64)](buf1, primals_3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_3
return buf1, reinterpret_tensor(primals_1, (1, 4, 16), (64, 1, 4), 0)
class GroupLinearNew(nn.Module):
"""
Group Linear operator
"""
def __init__(self, in_planes, out_channels, groups=1, bias=True):
super(GroupLinearNew, self).__init__()
assert in_planes % groups == 0
assert out_channels % groups == 0
self.in_dim = in_planes
self.out_dim = out_channels
self.groups = groups
self.bias = bias
self.group_in_dim = int(self.in_dim / self.groups)
self.group_out_dim = int(self.out_dim / self.groups)
self.group_weight = nn.Parameter(torch.zeros(self.groups, self.
group_in_dim, self.group_out_dim))
self.group_bias = nn.Parameter(torch.zeros(self.out_dim))
def extra_repr(self):
s = '{in_dim}, {out_dim}'
if self.groups != 1:
s += ', groups={groups}'
if self.bias is None:
s += ', bias=False'
return s.format(**self.__dict__)
def forward(self, input_0):
primals_2 = self.group_weight
primals_3 = self.group_bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
dumpmemory/TokenLabeling
|
GroupLinear
| false
| 15,256
|
[
"Apache-2.0"
] | 367
|
9dbfd59aedecfe83f6f3253db4e99b82359d48ac
|
https://github.com/dumpmemory/TokenLabeling/tree/9dbfd59aedecfe83f6f3253db4e99b82359d48ac
|
Biaffine
|
import torch
import torch.autograd
import torch.nn as nn
class Biaffine(nn.Module):
def __init__(self, n_in, n_out=1, bias_x=True, bias_y=True):
super(Biaffine, self).__init__()
self.n_in = n_in
self.n_out = n_out
self.bias_x = bias_x
self.bias_y = bias_y
weight = torch.zeros((n_out, n_in + int(bias_x), n_in + int(bias_y)))
nn.init.xavier_normal_(weight)
self.weight = nn.Parameter(weight, requires_grad=True)
def extra_repr(self):
s = f'n_in={self.n_in}, n_out={self.n_out}'
if self.bias_x:
s += f', bias_x={self.bias_x}'
if self.bias_y:
s += f', bias_y={self.bias_y}'
return s
def forward(self, x, y):
if self.bias_x:
x = torch.cat((x, torch.ones_like(x[..., :1])), -1)
if self.bias_y:
y = torch.cat((y, torch.ones_like(y[..., :1])), -1)
s = torch.einsum('bxi,oij,byj->boxy', x, self.weight, y)
s = s.permute(0, 2, 3, 1)
return s
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'n_in': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.autograd
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = xindex // 5
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 5, tl.int64)
tmp9 = 1.0
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp6, tmp9, tmp10)
tmp12 = tl.where(tmp4, tmp5, tmp11)
tl.store(out_ptr0 + x2, tmp12, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (1, 5, 5), (25, 5, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(80)](primals_1, buf0, 80, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((1, 16, 5), (80, 5, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (1, 16, 5), (0, 5, 1),
0), primals_3, out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32)
triton_poi_fused_cat_0[grid(80)](primals_2, buf2, 80, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_2
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf2, reinterpret_tensor(buf1, (4, 5, 4), (20, 1,
5), 0), out=buf3)
del buf1
return reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 1, 4, 4), 0
), reinterpret_tensor(buf2, (4, 5, 4), (20, 1, 5), 0
), reinterpret_tensor(buf0, (1, 5, 16), (80, 1, 5), 0)
class BiaffineNew(nn.Module):
def __init__(self, n_in, n_out=1, bias_x=True, bias_y=True):
super(BiaffineNew, self).__init__()
self.n_in = n_in
self.n_out = n_out
self.bias_x = bias_x
self.bias_y = bias_y
weight = torch.zeros((n_out, n_in + int(bias_x), n_in + int(bias_y)))
nn.init.xavier_normal_(weight)
self.weight = nn.Parameter(weight, requires_grad=True)
def extra_repr(self):
s = f'n_in={self.n_in}, n_out={self.n_out}'
if self.bias_x:
s += f', bias_x={self.bias_x}'
if self.bias_y:
s += f', bias_y={self.bias_y}'
return s
def forward(self, input_0, input_1):
primals_3 = self.weight
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
|
dumpmemory/W2NER
|
Biaffine
| false
| 15,257
|
[
"MIT"
] | 128
|
fb1b6eb1111eb001b1c965097d995244b840bdda
|
https://github.com/dumpmemory/W2NER/tree/fb1b6eb1111eb001b1c965097d995244b840bdda
|
LabelSmoothingCrossEntropyV1
|
import torch
from torch import nn
class LabelSmoothingCrossEntropyV1(nn.Module):
def __init__(self, eps=0.1, reduction='mean', ignore_index=-100):
"""【直接smooth输入logits效果不好】LabelSmoothingCrossEntropy, no-softmax-input
eps==0-1, 通过控制ce权重、新增后置项来处理来平滑
urls: [pytorch | labelSmooth](https://zhuanlan.zhihu.com/p/265704145)
args:
ignore_index: (int, optional): Specifies a target value that is ignored and does not contribute to the input gradient. Default: -100
reduction: str, Specifies the reduction to apply to the output, 输出形式.
eg.``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``
eps: float, Minimum of maths, 极小值. eg. 0.1
returns:
Tensor of loss.
examples:
>>> loss = LabelSmoothingCrossEntropyV1()(logits, label)
"""
super(LabelSmoothingCrossEntropyV1, self).__init__()
self.ignore_index = ignore_index
self.reduction = reduction
self.eps = eps
def forward(self, logits, labels):
V = max(logits.size()[-1] - 1, 1)
logits_smooth = (1 - self.eps) * logits + self.eps / V
logits_smooth_logsigmoid = torch.nn.functional.logsigmoid(logits_smooth
)
loss = -(labels * logits_smooth_logsigmoid + (1 - labels) *
logits_smooth_logsigmoid)
loss = loss.sum(dim=1)
if 'mean' == self.reduction:
loss = loss.mean()
elif 'sum' == self.reduction:
loss = loss.sum()
else:
_
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_log_sigmoid_forward_mean_mul_neg_rsub_sum_0(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp19 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp20 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp35 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp36 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp51 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp52 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp2 = 0.9
tmp3 = tmp1 * tmp2
tmp4 = 0.03333333333333333
tmp5 = tmp3 + tmp4
tmp6 = 0.0
tmp7 = triton_helpers.minimum(tmp6, tmp5)
tmp8 = tl_math.abs(tmp5)
tmp9 = -tmp8
tmp10 = tl_math.exp(tmp9)
tmp11 = libdevice.log1p(tmp10)
tmp12 = tmp7 - tmp11
tmp13 = tmp0 * tmp12
tmp14 = 1.0
tmp15 = tmp14 - tmp0
tmp16 = tmp15 * tmp12
tmp17 = tmp13 + tmp16
tmp18 = -tmp17
tmp21 = tmp20 * tmp2
tmp22 = tmp21 + tmp4
tmp23 = triton_helpers.minimum(tmp6, tmp22)
tmp24 = tl_math.abs(tmp22)
tmp25 = -tmp24
tmp26 = tl_math.exp(tmp25)
tmp27 = libdevice.log1p(tmp26)
tmp28 = tmp23 - tmp27
tmp29 = tmp19 * tmp28
tmp30 = tmp14 - tmp19
tmp31 = tmp30 * tmp28
tmp32 = tmp29 + tmp31
tmp33 = -tmp32
tmp34 = tmp18 + tmp33
tmp37 = tmp36 * tmp2
tmp38 = tmp37 + tmp4
tmp39 = triton_helpers.minimum(tmp6, tmp38)
tmp40 = tl_math.abs(tmp38)
tmp41 = -tmp40
tmp42 = tl_math.exp(tmp41)
tmp43 = libdevice.log1p(tmp42)
tmp44 = tmp39 - tmp43
tmp45 = tmp35 * tmp44
tmp46 = tmp14 - tmp35
tmp47 = tmp46 * tmp44
tmp48 = tmp45 + tmp47
tmp49 = -tmp48
tmp50 = tmp34 + tmp49
tmp53 = tmp52 * tmp2
tmp54 = tmp53 + tmp4
tmp55 = triton_helpers.minimum(tmp6, tmp54)
tmp56 = tl_math.abs(tmp54)
tmp57 = -tmp56
tmp58 = tl_math.exp(tmp57)
tmp59 = libdevice.log1p(tmp58)
tmp60 = tmp55 - tmp59
tmp61 = tmp51 * tmp60
tmp62 = tmp14 - tmp51
tmp63 = tmp62 * tmp60
tmp64 = tmp61 + tmp63
tmp65 = -tmp64
tmp66 = tmp50 + tmp65
tmp67 = tl.broadcast_to(tmp66, [XBLOCK, RBLOCK])
tmp69 = tl.sum(tmp67, 1)[:, None]
tmp70 = 64.0
tmp71 = tmp69 / tmp70
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp71, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_add_log_sigmoid_forward_mean_mul_neg_rsub_sum_0[grid
(1)](buf2, arg1_1, arg0_1, 1, 64, XBLOCK=1, num_warps=2,
num_stages=1)
del arg0_1
del arg1_1
return buf2,
class LabelSmoothingCrossEntropyV1New(nn.Module):
def __init__(self, eps=0.1, reduction='mean', ignore_index=-100):
"""【直接smooth输入logits效果不好】LabelSmoothingCrossEntropy, no-softmax-input
eps==0-1, 通过控制ce权重、新增后置项来处理来平滑
urls: [pytorch | labelSmooth](https://zhuanlan.zhihu.com/p/265704145)
args:
ignore_index: (int, optional): Specifies a target value that is ignored and does not contribute to the input gradient. Default: -100
reduction: str, Specifies the reduction to apply to the output, 输出形式.
eg.``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``
eps: float, Minimum of maths, 极小值. eg. 0.1
returns:
Tensor of loss.
examples:
>>> loss = LabelSmoothingCrossEntropyV1()(logits, label)
"""
super(LabelSmoothingCrossEntropyV1New, self).__init__()
self.ignore_index = ignore_index
self.reduction = reduction
self.eps = eps
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
dumpmemory/Pytorch-NLU
|
LabelSmoothingCrossEntropyV1
| false
| 15,258
|
[
"Apache-2.0"
] | 115
|
864fb9acc7751fc51abd3d05d24b5a9a7eab7110
|
https://github.com/dumpmemory/Pytorch-NLU/tree/864fb9acc7751fc51abd3d05d24b5a9a7eab7110
|
GroupNorm
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
class GroupNorm(nn.Module):
def __init__(self, num_groups, embed_dim, eps=1e-05, affine=True):
super().__init__()
self.gn = nn.GroupNorm(num_groups, embed_dim, eps, affine)
def forward(self, x):
B, T, C = x.shape
x = x.view(B * T, C)
x = self.gn(x)
x = x.view(B, T, C)
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'num_groups': 1, 'embed_dim': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_group_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_group_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 1, 1, 1), (1, 16, 16, 16), torch.float32
)
buf1 = empty_strided_cuda((16, 1, 1, 1), (1, 16, 16, 16), torch.float32
)
get_raw_stream(0)
triton_poi_fused_native_group_norm_0[grid(16)](primals_1, buf0,
buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
triton_poi_fused_native_group_norm_1[grid(64)](primals_1, buf0,
buf1, primals_2, primals_3, buf2, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf0
del buf1
del primals_2
del primals_3
return reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0), primals_1
class GroupNormNew(nn.Module):
def __init__(self, num_groups, embed_dim, eps=1e-05, affine=True):
super().__init__()
self.gn = nn.GroupNorm(num_groups, embed_dim, eps, affine)
def forward(self, input_0):
primals_2 = self.gn.weight
primals_3 = self.gn.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
dumpmemory/TokenLabeling
|
GroupNorm
| false
| 15,259
|
[
"Apache-2.0"
] | 367
|
9dbfd59aedecfe83f6f3253db4e99b82359d48ac
|
https://github.com/dumpmemory/TokenLabeling/tree/9dbfd59aedecfe83f6f3253db4e99b82359d48ac
|
FCLayer
|
import torch
from torch import nn
class FCLayer(nn.Module):
def __init__(self, input_dim, output_dim, dropout_rate=0.1, is_active=
True, is_dropout=True, active_type='mish'):
"""
FC-Layer, mostly last output of model
args:
input_dim: input dimension, 输入维度, eg. 768
output_dim: output dimension, 输出维度, eg. 32
dropout_rate: dropout rate, 随机失活, eg. 0.1
is_dropout: use dropout or not, 是否使用随机失活dropout, eg. True
is_active: use activation or not, 是否使用激活函数如tanh, eg. True
active_type: type of activate function, 激活函数类型, eg. "tanh", "relu"
Returns:
Tensor of batch.
"""
super(FCLayer, self).__init__()
self.linear = nn.Linear(input_dim, output_dim)
self.dropout = nn.Dropout(dropout_rate)
self.is_dropout = is_dropout
self.active_type = active_type
self.is_active = is_active
self.softmax = nn.Softmax(1)
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU(inplace=True)
self.tanh = nn.Tanh()
self.gelu = nn.GELU()
def forward(self, x):
if self.is_dropout:
x = self.dropout(x)
x = self.linear(x)
if self.is_active:
if self.active_type.upper() == 'MISH':
x = x * torch.tanh(nn.functional.softplus(x))
elif self.active_type.upper() == 'SWISH':
x = x * torch.sigmoid(x)
elif self.active_type.upper() == 'TANH':
x = self.tanh(x)
elif self.active_type.upper() == 'GELU':
x = self.gelu(x)
elif self.active_type.upper() == 'RELU':
x = self.relu(x)
else:
x = self.relu(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'output_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_softplus_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 20.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = libdevice.log1p(tmp3)
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp6 = libdevice.tanh(tmp5)
tmp7 = tmp0 * tmp6
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64,
4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_softplus_tanh_0[grid(256)](buf0, buf1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
return buf1, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf0
class FCLayerNew(nn.Module):
def __init__(self, input_dim, output_dim, dropout_rate=0.1, is_active=
True, is_dropout=True, active_type='mish'):
"""
FC-Layer, mostly last output of model
args:
input_dim: input dimension, 输入维度, eg. 768
output_dim: output dimension, 输出维度, eg. 32
dropout_rate: dropout rate, 随机失活, eg. 0.1
is_dropout: use dropout or not, 是否使用随机失活dropout, eg. True
is_active: use activation or not, 是否使用激活函数如tanh, eg. True
active_type: type of activate function, 激活函数类型, eg. "tanh", "relu"
Returns:
Tensor of batch.
"""
super(FCLayerNew, self).__init__()
self.linear = nn.Linear(input_dim, output_dim)
self.dropout = nn.Dropout(dropout_rate)
self.is_dropout = is_dropout
self.active_type = active_type
self.is_active = is_active
self.softmax = nn.Softmax(1)
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU(inplace=True)
self.tanh = nn.Tanh()
self.gelu = nn.GELU()
def forward(self, input_0):
primals_2 = self.linear.weight
primals_3 = self.linear.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
dumpmemory/Pytorch-NLU
|
FCLayer
| false
| 15,260
|
[
"Apache-2.0"
] | 115
|
864fb9acc7751fc51abd3d05d24b5a9a7eab7110
|
https://github.com/dumpmemory/Pytorch-NLU/tree/864fb9acc7751fc51abd3d05d24b5a9a7eab7110
|
ClassifierHead
|
import torch
import torch.utils.data
import torchvision.transforms.functional as F
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
from torch import optim as optim
def adaptive_avgmax_pool2d(x, output_size=1):
x_avg = F.adaptive_avg_pool2d(x, output_size)
x_max = F.adaptive_max_pool2d(x, output_size)
return 0.5 * (x_avg + x_max)
def adaptive_catavgmax_pool2d(x, output_size=1):
x_avg = F.adaptive_avg_pool2d(x, output_size)
x_max = F.adaptive_max_pool2d(x, output_size)
return torch.cat((x_avg, x_max), 1)
def adaptive_pool_feat_mult(pool_type='avg'):
if pool_type == 'catavgmax':
return 2
else:
return 1
def _create_fc(num_features, num_classes, pool_type='avg', use_conv=False):
if num_classes <= 0:
fc = nn.Identity()
elif use_conv:
fc = nn.Conv2d(num_features, num_classes, 1, bias=True)
else:
fc = Linear(num_features, num_classes, bias=True)
return fc
def _create_pool(num_features, num_classes, pool_type='avg', use_conv=False):
flatten_in_pool = not use_conv
if not pool_type:
assert num_classes == 0 or use_conv, 'Pooling can only be disabled if classifier is also removed or conv classifier is used'
flatten_in_pool = False
global_pool = SelectAdaptivePool2d(pool_type=pool_type, flatten=
flatten_in_pool)
num_pooled_features = num_features * global_pool.feat_mult()
return global_pool, num_pooled_features
class FastAdaptiveAvgPool2d(nn.Module):
def __init__(self, flatten=False):
super(FastAdaptiveAvgPool2d, self).__init__()
self.flatten = flatten
def forward(self, x):
return x.mean((2, 3)) if self.flatten else x.mean((2, 3), keepdim=True)
class AdaptiveAvgMaxPool2d(nn.Module):
def __init__(self, output_size=1):
super(AdaptiveAvgMaxPool2d, self).__init__()
self.output_size = output_size
def forward(self, x):
return adaptive_avgmax_pool2d(x, self.output_size)
class AdaptiveCatAvgMaxPool2d(nn.Module):
def __init__(self, output_size=1):
super(AdaptiveCatAvgMaxPool2d, self).__init__()
self.output_size = output_size
def forward(self, x):
return adaptive_catavgmax_pool2d(x, self.output_size)
class SelectAdaptivePool2d(nn.Module):
"""Selectable global pooling layer with dynamic input kernel size
"""
def __init__(self, output_size=1, pool_type='fast', flatten=False):
super(SelectAdaptivePool2d, self).__init__()
self.pool_type = pool_type or ''
self.flatten = flatten
if pool_type == '':
self.pool = nn.Identity()
elif pool_type == 'fast':
assert output_size == 1
self.pool = FastAdaptiveAvgPool2d(self.flatten)
self.flatten = False
elif pool_type == 'avg':
self.pool = nn.AdaptiveAvgPool2d(output_size)
elif pool_type == 'avgmax':
self.pool = AdaptiveAvgMaxPool2d(output_size)
elif pool_type == 'catavgmax':
self.pool = AdaptiveCatAvgMaxPool2d(output_size)
elif pool_type == 'max':
self.pool = nn.AdaptiveMaxPool2d(output_size)
else:
assert False, 'Invalid pool type: %s' % pool_type
def is_identity(self):
return self.pool_type == ''
def forward(self, x):
x = self.pool(x)
if self.flatten:
x = x.flatten(1)
return x
def feat_mult(self):
return adaptive_pool_feat_mult(self.pool_type)
def __repr__(self):
return (self.__class__.__name__ + ' (' + 'pool_type=' + self.
pool_type + ', flatten=' + str(self.flatten) + ')')
class Linear(nn.Linear):
"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`
Wraps torch.nn.Linear to support AMP + torchscript usage by manually casting
weight & bias to input.dtype to work around an issue w/ torch.addmm in this use case.
"""
def forward(self, input: 'torch.Tensor') ->torch.Tensor:
if torch.jit.is_scripting():
bias = self.bias if self.bias is not None else None
return F.linear(input, self.weight, bias=bias)
else:
return F.linear(input, self.weight, self.bias)
class ClassifierHead(nn.Module):
"""Classifier head w/ configurable global pooling and dropout."""
def __init__(self, in_chs, num_classes, pool_type='avg', drop_rate=0.0,
use_conv=False):
super(ClassifierHead, self).__init__()
self.drop_rate = drop_rate
self.global_pool, num_pooled_features = _create_pool(in_chs,
num_classes, pool_type, use_conv=use_conv)
self.fc = _create_fc(num_pooled_features, num_classes, use_conv=
use_conv)
self.flatten_after_fc = use_conv and pool_type
def forward(self, x):
x = self.global_pool(x)
if self.drop_rate:
x = F.dropout(x, p=float(self.drop_rate), training=self.training)
x = self.fc(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_chs': 4, 'num_classes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
import torchvision.transforms.functional as F
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
from torch import optim as optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
del primals_1
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(buf1, (4, 4), (4,
1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha
=1, beta=1, out=buf2)
del primals_2
del primals_3
return buf2, reinterpret_tensor(buf1, (4, 4), (4, 1), 0)
def adaptive_avgmax_pool2d(x, output_size=1):
x_avg = F.adaptive_avg_pool2d(x, output_size)
x_max = F.adaptive_max_pool2d(x, output_size)
return 0.5 * (x_avg + x_max)
def adaptive_catavgmax_pool2d(x, output_size=1):
x_avg = F.adaptive_avg_pool2d(x, output_size)
x_max = F.adaptive_max_pool2d(x, output_size)
return torch.cat((x_avg, x_max), 1)
def adaptive_pool_feat_mult(pool_type='avg'):
if pool_type == 'catavgmax':
return 2
else:
return 1
def _create_fc(num_features, num_classes, pool_type='avg', use_conv=False):
if num_classes <= 0:
fc = nn.Identity()
elif use_conv:
fc = nn.Conv2d(num_features, num_classes, 1, bias=True)
else:
fc = Linear(num_features, num_classes, bias=True)
return fc
def _create_pool(num_features, num_classes, pool_type='avg', use_conv=False):
flatten_in_pool = not use_conv
if not pool_type:
assert num_classes == 0 or use_conv, 'Pooling can only be disabled if classifier is also removed or conv classifier is used'
flatten_in_pool = False
global_pool = SelectAdaptivePool2d(pool_type=pool_type, flatten=
flatten_in_pool)
num_pooled_features = num_features * global_pool.feat_mult()
return global_pool, num_pooled_features
class FastAdaptiveAvgPool2d(nn.Module):
def __init__(self, flatten=False):
super(FastAdaptiveAvgPool2d, self).__init__()
self.flatten = flatten
def forward(self, x):
return x.mean((2, 3)) if self.flatten else x.mean((2, 3), keepdim=True)
class AdaptiveAvgMaxPool2d(nn.Module):
def __init__(self, output_size=1):
super(AdaptiveAvgMaxPool2d, self).__init__()
self.output_size = output_size
def forward(self, x):
return adaptive_avgmax_pool2d(x, self.output_size)
class AdaptiveCatAvgMaxPool2d(nn.Module):
def __init__(self, output_size=1):
super(AdaptiveCatAvgMaxPool2d, self).__init__()
self.output_size = output_size
def forward(self, x):
return adaptive_catavgmax_pool2d(x, self.output_size)
class SelectAdaptivePool2d(nn.Module):
"""Selectable global pooling layer with dynamic input kernel size
"""
def __init__(self, output_size=1, pool_type='fast', flatten=False):
super(SelectAdaptivePool2d, self).__init__()
self.pool_type = pool_type or ''
self.flatten = flatten
if pool_type == '':
self.pool = nn.Identity()
elif pool_type == 'fast':
assert output_size == 1
self.pool = FastAdaptiveAvgPool2d(self.flatten)
self.flatten = False
elif pool_type == 'avg':
self.pool = nn.AdaptiveAvgPool2d(output_size)
elif pool_type == 'avgmax':
self.pool = AdaptiveAvgMaxPool2d(output_size)
elif pool_type == 'catavgmax':
self.pool = AdaptiveCatAvgMaxPool2d(output_size)
elif pool_type == 'max':
self.pool = nn.AdaptiveMaxPool2d(output_size)
else:
assert False, 'Invalid pool type: %s' % pool_type
def is_identity(self):
return self.pool_type == ''
def forward(self, x):
x = self.pool(x)
if self.flatten:
x = x.flatten(1)
return x
def feat_mult(self):
return adaptive_pool_feat_mult(self.pool_type)
def __repr__(self):
return (self.__class__.__name__ + ' (' + 'pool_type=' + self.
pool_type + ', flatten=' + str(self.flatten) + ')')
class Linear(nn.Linear):
"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`
Wraps torch.nn.Linear to support AMP + torchscript usage by manually casting
weight & bias to input.dtype to work around an issue w/ torch.addmm in this use case.
"""
def forward(self, input: 'torch.Tensor') ->torch.Tensor:
if torch.jit.is_scripting():
bias = self.bias if self.bias is not None else None
return F.linear(input, self.weight, bias=bias)
else:
return F.linear(input, self.weight, self.bias)
class ClassifierHeadNew(nn.Module):
"""Classifier head w/ configurable global pooling and dropout."""
def __init__(self, in_chs, num_classes, pool_type='avg', drop_rate=0.0,
use_conv=False):
super(ClassifierHeadNew, self).__init__()
self.drop_rate = drop_rate
self.global_pool, num_pooled_features = _create_pool(in_chs,
num_classes, pool_type, use_conv=use_conv)
self.fc = _create_fc(num_pooled_features, num_classes, use_conv=
use_conv)
self.flatten_after_fc = use_conv and pool_type
def forward(self, input_0):
primals_2 = self.fc.weight
primals_3 = self.fc.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
dumpmemory/NonDeepNetworks
|
ClassifierHead
| false
| 15,261
|
[
"BSD-3-Clause"
] | 307
|
5513bf588f4e64c99583440507232675c2e21e34
|
https://github.com/dumpmemory/NonDeepNetworks/tree/5513bf588f4e64c99583440507232675c2e21e34
|
GroupNormAct
|
import torch
import torch.utils.data
import torchvision.transforms.functional as F
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
from torch import optim as optim
def swish(x, inplace: 'bool'=False):
"""Swish - Described in: https://arxiv.org/abs/1710.05941
"""
return x.mul_(x.sigmoid()) if inplace else x.mul(x.sigmoid())
def is_exportable():
return _EXPORTABLE
def is_no_jit():
return _NO_JIT
def is_scriptable():
return _SCRIPTABLE
def get_act_layer(name='relu'):
""" Activation Layer Factory
Fetching activation layers by name with this function allows export or torch script friendly
functions to be returned dynamically based on current config.
"""
if not name:
return None
if not (is_no_jit() or is_exportable() or is_scriptable()):
if name in _ACT_LAYER_ME:
return _ACT_LAYER_ME[name]
if is_exportable() and name in ('silu', 'swish'):
return Swish
if not (is_no_jit() or is_exportable()):
if name in _ACT_LAYER_JIT:
return _ACT_LAYER_JIT[name]
return _ACT_LAYER_DEFAULT[name]
class Swish(nn.Module):
def __init__(self, inplace: 'bool'=False):
super(Swish, self).__init__()
self.inplace = inplace
def forward(self, x):
return swish(x, self.inplace)
class GroupNormAct(nn.GroupNorm):
def __init__(self, num_channels, num_groups, eps=1e-05, affine=True,
apply_act=True, act_layer=nn.ReLU, inplace=True, drop_block=None):
super(GroupNormAct, self).__init__(num_groups, num_channels, eps=
eps, affine=affine)
if isinstance(act_layer, str):
act_layer = get_act_layer(act_layer)
if act_layer is not None and apply_act:
act_args = dict(inplace=True) if inplace else {}
self.act = act_layer(**act_args)
else:
self.act = nn.Identity()
def forward(self, x):
x = F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps)
x = self.act(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_channels': 4, 'num_groups': 1}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.utils.data
import torch.nn as nn
import torch.nn.parallel
from torch import optim as optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_native_group_norm_relu_threshold_backward_0(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r3 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 64.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = tl.full([1, 1], 0, tl.int32)
tmp29 = triton_helpers.maximum(tmp28, tmp27)
tmp30 = 0.0
tmp31 = tmp29 <= tmp30
tl.store(out_ptr2 + (r1 + 64 * x0), tmp29, xmask)
tl.store(out_ptr3 + (r1 + 64 * x0), tmp31, xmask)
tl.store(out_ptr4 + x0, tmp22, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf5 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
get_raw_stream(0)
triton_per_fused_native_group_norm_relu_threshold_backward_0[grid(4)](
primals_3, primals_1, primals_2, buf0, buf3, buf4, buf5, 4, 64,
XBLOCK=1, num_warps=2, num_stages=1)
del primals_1
del primals_2
return buf3, primals_3, buf4, reinterpret_tensor(buf0, (4, 1, 1), (1, 1,
1), 0), reinterpret_tensor(buf5, (4, 1, 1), (1, 1, 1), 0)
def swish(x, inplace: 'bool'=False):
"""Swish - Described in: https://arxiv.org/abs/1710.05941
"""
return x.mul_(x.sigmoid()) if inplace else x.mul(x.sigmoid())
def is_exportable():
return _EXPORTABLE
def is_no_jit():
return _NO_JIT
def is_scriptable():
return _SCRIPTABLE
def get_act_layer(name='relu'):
""" Activation Layer Factory
Fetching activation layers by name with this function allows export or torch script friendly
functions to be returned dynamically based on current config.
"""
if not name:
return None
if not (is_no_jit() or is_exportable() or is_scriptable()):
if name in _ACT_LAYER_ME:
return _ACT_LAYER_ME[name]
if is_exportable() and name in ('silu', 'swish'):
return Swish
if not (is_no_jit() or is_exportable()):
if name in _ACT_LAYER_JIT:
return _ACT_LAYER_JIT[name]
return _ACT_LAYER_DEFAULT[name]
class Swish(nn.Module):
def __init__(self, inplace: 'bool'=False):
super(Swish, self).__init__()
self.inplace = inplace
def forward(self, x):
return swish(x, self.inplace)
class GroupNormActNew(nn.GroupNorm):
def __init__(self, num_channels, num_groups, eps=1e-05, affine=True,
apply_act=True, act_layer=nn.ReLU, inplace=True, drop_block=None):
super(GroupNormActNew, self).__init__(num_groups, num_channels, eps
=eps, affine=affine)
if isinstance(act_layer, str):
act_layer = get_act_layer(act_layer)
if act_layer is not None and apply_act:
act_args = dict(inplace=True) if inplace else {}
self.act = act_layer(**act_args)
else:
self.act = nn.Identity()
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
dumpmemory/NonDeepNetworks
|
GroupNormAct
| false
| 15,262
|
[
"BSD-3-Clause"
] | 307
|
5513bf588f4e64c99583440507232675c2e21e34
|
https://github.com/dumpmemory/NonDeepNetworks/tree/5513bf588f4e64c99583440507232675c2e21e34
|
CXLoss
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.utils.data
class CXLoss(nn.Module):
def __init__(self, sigma=0.1, b=1.0, similarity='consine'):
super(CXLoss, self).__init__()
self.similarity = similarity
self.sigma = sigma
self.b = b
def center_by_T(self, featureI, featureT):
meanT = featureT.mean(0, keepdim=True).mean(2, keepdim=True).mean(3,
keepdim=True)
return featureI - meanT, featureT - meanT
def l2_normalize_channelwise(self, features):
eps = 1e-06
norms = features.norm(p=2, dim=1, keepdim=True)
features = features.div(norms + eps)
return features
def patch_decomposition(self, features):
N, C, H, W = features.shape
assert N == 1
P = H * W
patches = features.view(1, 1, C, P).permute((3, 2, 0, 1))
return patches
def calc_relative_distances(self, raw_dist, axis=1):
epsilon = 1e-05
div = torch.min(raw_dist, dim=axis, keepdim=True)[0]
relative_dist = raw_dist / (div + epsilon)
return relative_dist
def calc_CX(self, dist, axis=1):
W = torch.exp((self.b - dist) / self.sigma)
W_sum = W.sum(dim=axis, keepdim=True)
return W.div(W_sum)
def forward(self, featureT, featureI):
"""
:param featureT: target
:param featureI: inference
:return:
"""
featureI, featureT = self.center_by_T(featureI, featureT)
featureI = self.l2_normalize_channelwise(featureI)
featureT = self.l2_normalize_channelwise(featureT)
dist = []
N = featureT.size()[0]
for i in range(N):
featureT_i = featureT[i, :, :, :].unsqueeze(0)
featureI_i = featureI[i, :, :, :].unsqueeze(0)
featureT_patch = self.patch_decomposition(featureT_i)
dist_i = F.conv2d(featureI_i, featureT_patch)
dist.append(dist_i)
dist = torch.cat(dist, dim=0)
raw_dist = (1.0 - dist) / 2.0
raw_dist = raw_dist.clip(min=0)
relative_dist = self.calc_relative_distances(raw_dist)
CX = self.calc_CX(relative_dist)
CX = CX.max(dim=3)[0].max(dim=2)[0]
CX = CX.mean(1)
CX = -torch.log(CX)
CX = torch.mean(CX)
if torch.isnan(CX):
pdb.set_trace()
return CX
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 16 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (128 + x0 + 16 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (192 + x0 + 16 * x1), xmask)
tmp9 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp10 = tl.load(in_ptr0 + (68 + x0 + 16 * x1), xmask)
tmp12 = tl.load(in_ptr0 + (132 + x0 + 16 * x1), xmask)
tmp14 = tl.load(in_ptr0 + (196 + x0 + 16 * x1), xmask)
tmp18 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp19 = tl.load(in_ptr0 + (72 + x0 + 16 * x1), xmask)
tmp21 = tl.load(in_ptr0 + (136 + x0 + 16 * x1), xmask)
tmp23 = tl.load(in_ptr0 + (200 + x0 + 16 * x1), xmask)
tmp27 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp28 = tl.load(in_ptr0 + (76 + x0 + 16 * x1), xmask)
tmp30 = tl.load(in_ptr0 + (140 + x0 + 16 * x1), xmask)
tmp32 = tl.load(in_ptr0 + (204 + x0 + 16 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = tmp15 / tmp7
tmp17 = tmp8 + tmp16
tmp20 = tmp18 + tmp19
tmp22 = tmp20 + tmp21
tmp24 = tmp22 + tmp23
tmp25 = tmp24 / tmp7
tmp26 = tmp17 + tmp25
tmp29 = tmp27 + tmp28
tmp31 = tmp29 + tmp30
tmp33 = tmp31 + tmp32
tmp34 = tmp33 / tmp7
tmp35 = tmp26 + tmp34
tmp36 = tmp35 / tmp7
tl.store(out_ptr0 + x2, tmp36, xmask)
@triton.jit
def triton_poi_fused_mean_sub_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr2 + x3, xmask)
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tmp12 = tmp11 - tmp9
tl.store(out_ptr0 + x3, tmp10, xmask)
tl.store(out_ptr1 + x3, tmp12, xmask)
@triton.jit
def triton_poi_fused_add_div_linalg_vector_norm_2(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-06
tmp14 = tmp12 + tmp13
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x3, tmp15, xmask)
@triton.jit
def triton_poi_fused_convolution_3(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (x1 + 16 * y0), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 4 * x1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_4(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x1), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_5(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (64 + x1 + 16 * y0), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + 4 * x1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_6(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (64 + y0 + 16 * x1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_7(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (128 + x1 + 16 * y0), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + 4 * x1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_8(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (128 + y0 + 16 * x1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_9(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (192 + x1 + 16 * y0), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + 4 * x1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_10(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (192 + y0 + 16 * x1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_per_fused_add_cat_clamp_div_exp_min_rsub_sum_11(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
x1 = xindex // 16
r2 = rindex
x0 = xindex % 16
x3 = xindex
tmp0 = x1
tl.full([1, 1], 0, tl.int64)
tmp3 = tl.full([1, 1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (r2 + 16 * x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1, 1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (r2 + 16 * x0), tmp9 & xmask, eviction_policy
='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1, 1], 3, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + (r2 + 16 * x0), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tl.full([1, 1], 4, tl.int64)
tmp19 = tl.load(in_ptr3 + (r2 + 16 * x0), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tmp23 = 1.0
tmp24 = tmp23 - tmp22
tmp25 = 0.5
tmp26 = tmp24 * tmp25
tmp27 = 0.0
tmp28 = triton_helpers.maximum(tmp26, tmp27)
tmp29 = tl.broadcast_to(tmp28, [XBLOCK, RBLOCK])
tmp31 = tl.where(xmask, tmp29, float('inf'))
tmp32 = triton_helpers.min2(tmp31, 1)[:, None]
tmp33 = 1e-05
tmp34 = tmp32 + tmp33
tmp35 = tmp28 / tmp34
tmp36 = tmp23 - tmp35
tmp37 = 10.0
tmp38 = tmp36 * tmp37
tmp39 = tl_math.exp(tmp38)
tmp40 = tl.broadcast_to(tmp39, [XBLOCK, RBLOCK])
tmp42 = tl.where(xmask, tmp40, 0)
tmp43 = tl.sum(tmp42, 1)[:, None]
tl.store(out_ptr0 + (r2 + 16 * x3), tmp26, xmask)
tl.store(out_ptr1 + x3, tmp32, xmask)
tl.store(out_ptr2 + x3, tmp43, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_exp_max_rsub_12(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr2 + 4 * x1, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp16 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr2 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp27 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp33 = tl.load(in_ptr2 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp36 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp38 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp44 = tl.load(in_ptr2 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = 1e-05
tmp5 = tmp3 + tmp4
tmp6 = tmp2 / tmp5
tmp7 = 1.0
tmp8 = tmp7 - tmp6
tmp9 = 10.0
tmp10 = tmp8 * tmp9
tmp11 = tl_math.exp(tmp10)
tmp13 = tmp11 / tmp12
tmp15 = triton_helpers.maximum(tmp14, tmp1)
tmp17 = tmp16 + tmp4
tmp18 = tmp15 / tmp17
tmp19 = tmp7 - tmp18
tmp20 = tmp19 * tmp9
tmp21 = tl_math.exp(tmp20)
tmp23 = tmp21 / tmp22
tmp24 = triton_helpers.maximum(tmp13, tmp23)
tmp26 = triton_helpers.maximum(tmp25, tmp1)
tmp28 = tmp27 + tmp4
tmp29 = tmp26 / tmp28
tmp30 = tmp7 - tmp29
tmp31 = tmp30 * tmp9
tmp32 = tl_math.exp(tmp31)
tmp34 = tmp32 / tmp33
tmp35 = triton_helpers.maximum(tmp24, tmp34)
tmp37 = triton_helpers.maximum(tmp36, tmp1)
tmp39 = tmp38 + tmp4
tmp40 = tmp37 / tmp39
tmp41 = tmp7 - tmp40
tmp42 = tmp41 * tmp9
tmp43 = tl_math.exp(tmp42)
tmp45 = tmp43 / tmp44
tmp46 = triton_helpers.maximum(tmp35, tmp45)
tl.store(out_ptr0 + x2, tmp46, xmask)
@triton.jit
def triton_per_fused_max_mean_13(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (16 + r1 + 64 * x0), xmask, other=0.0)
tmp3 = tl.load(in_ptr0 + (32 + r1 + 64 * x0), xmask, other=0.0)
tmp5 = tl.load(in_ptr0 + (48 + r1 + 64 * x0), xmask, other=0.0)
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_per_fused_isnan_log_max_mean_neg_14(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = 16.0
tmp2 = tmp0 / tmp1
tmp3 = tl_math.log(tmp2)
tmp4 = -tmp3
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp7 = tl.sum(tmp5, 1)[:, None]
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = libdevice.isnan(tmp9).to(tl.int1)
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp9, None)
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp10, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 4, 1, 4), (16, 4, 16, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mean_sub_1[grid(256)](arg1_1, buf0, arg0_1, buf1,
buf3, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_linalg_vector_norm_2[grid(256)](buf1, buf2,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf4 = buf1
del buf1
triton_poi_fused_add_div_linalg_vector_norm_2[grid(256)](buf3, buf4,
256, XBLOCK=256, num_warps=4, num_stages=1)
del buf3
buf5 = empty_strided_cuda((1, 4, 4, 4), (64, 1, 16, 4), torch.float32)
triton_poi_fused_convolution_3[grid(4, 16)](buf2, buf5, 4, 16,
XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((16, 4, 1, 1), (4, 1, 4, 4), torch.float32)
triton_poi_fused_convolution_4[grid(16, 4)](buf4, buf6, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf7 = extern_kernels.convolution(buf5, buf6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (1, 16, 4, 4), (256, 1, 64, 16))
buf8 = reinterpret_tensor(buf6, (1, 4, 4, 4), (64, 1, 16, 4), 0)
del buf6
triton_poi_fused_convolution_5[grid(4, 16)](buf2, buf8, 4, 16,
XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf5, (16, 4, 1, 1), (4, 1, 4, 4), 0)
del buf5
triton_poi_fused_convolution_6[grid(16, 4)](buf4, buf9, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf10 = extern_kernels.convolution(buf8, buf9, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (1, 16, 4, 4), (256, 1, 64, 16))
buf11 = reinterpret_tensor(buf9, (1, 4, 4, 4), (64, 1, 16, 4), 0)
del buf9
triton_poi_fused_convolution_7[grid(4, 16)](buf2, buf11, 4, 16,
XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1)
buf12 = reinterpret_tensor(buf8, (16, 4, 1, 1), (4, 1, 4, 4), 0)
del buf8
triton_poi_fused_convolution_8[grid(16, 4)](buf4, buf12, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf13 = extern_kernels.convolution(buf11, buf12, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (1, 16, 4, 4), (256, 1, 64, 16))
buf14 = reinterpret_tensor(buf12, (1, 4, 4, 4), (64, 1, 16, 4), 0)
del buf12
triton_poi_fused_convolution_9[grid(4, 16)](buf2, buf14, 4, 16,
XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1)
del buf2
buf15 = reinterpret_tensor(buf11, (16, 4, 1, 1), (4, 1, 4, 4), 0)
del buf11
triton_poi_fused_convolution_10[grid(16, 4)](buf4, buf15, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
del buf4
buf16 = extern_kernels.convolution(buf14, buf15, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (1, 16, 4, 4), (256, 1, 64, 16))
buf17 = empty_strided_cuda((4, 16, 4, 4), (256, 1, 64, 16), torch.
float32)
buf18 = reinterpret_tensor(buf15, (4, 1, 4, 4), (16, 64, 4, 1), 0)
del buf15
buf20 = reinterpret_tensor(buf14, (4, 1, 4, 4), (16, 64, 4, 1), 0)
del buf14
triton_per_fused_add_cat_clamp_div_exp_min_rsub_sum_11[grid(64)](buf7,
buf10, buf13, buf16, buf17, buf18, buf20, 64, 16, XBLOCK=32,
num_warps=4, num_stages=1)
del buf10
del buf13
del buf16
buf21 = reinterpret_tensor(buf7, (4, 16, 4), (64, 1, 16), 0)
del buf7
triton_poi_fused_add_clamp_div_exp_max_rsub_12[grid(256)](buf17,
buf18, buf20, buf21, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf17
del buf18
del buf20
buf22 = empty_strided_cuda((4,), (1,), torch.float32)
triton_per_fused_max_mean_13[grid(4)](buf21, buf22, 4, 16, XBLOCK=1,
num_warps=2, num_stages=1)
del buf21
buf23 = empty_strided_cuda((), (), torch.float32)
buf24 = buf23
del buf23
buf25 = empty_strided_cuda((), (), torch.bool)
triton_per_fused_isnan_log_max_mean_neg_14[grid(1)](buf24, buf22,
buf25, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del buf22
return buf24, buf25
class CXLossNew(nn.Module):
def __init__(self, sigma=0.1, b=1.0, similarity='consine'):
super(CXLossNew, self).__init__()
self.similarity = similarity
self.sigma = sigma
self.b = b
def center_by_T(self, featureI, featureT):
meanT = featureT.mean(0, keepdim=True).mean(2, keepdim=True).mean(3,
keepdim=True)
return featureI - meanT, featureT - meanT
def l2_normalize_channelwise(self, features):
eps = 1e-06
norms = features.norm(p=2, dim=1, keepdim=True)
features = features.div(norms + eps)
return features
def patch_decomposition(self, features):
N, C, H, W = features.shape
assert N == 1
P = H * W
patches = features.view(1, 1, C, P).permute((3, 2, 0, 1))
return patches
def calc_relative_distances(self, raw_dist, axis=1):
epsilon = 1e-05
div = torch.min(raw_dist, dim=axis, keepdim=True)[0]
relative_dist = raw_dist / (div + epsilon)
return relative_dist
def calc_CX(self, dist, axis=1):
W = torch.exp((self.b - dist) / self.sigma)
W_sum = W.sum(dim=axis, keepdim=True)
return W.div(W_sum)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
drgripa1/deepvecfont
|
CXLoss
| false
| 15,263
|
[
"MIT"
] | 68
|
a44d81ba19a22e43b4e576cd8ebc5c2fd961a621
|
https://github.com/drgripa1/deepvecfont/tree/a44d81ba19a22e43b4e576cd8ebc5c2fd961a621
|
CriticNet
|
from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
import torch.nn.functional as F
class CriticNet(nn.Module):
def __init__(self, args):
super(CriticNet, self).__init__()
state_dim = args.state_dim
action_dim = args.z_dim
self.l1 = nn.Linear(state_dim, 400)
self.l2 = nn.Linear(400 + action_dim, 300)
self.l3_additional = nn.Linear(300, 300)
self.l3 = nn.Linear(300, 1)
def forward(self, x, u):
x = F.relu(self.l1(x))
x = F.relu(self.l2(torch.cat([x, u], 1)))
x = self.l3_additional(x)
x = self.l3(x)
return x
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'args': _mock_config(state_dim=4, z_dim=4)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 1616
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 404
x1 = xindex // 404
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 400, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (400 * x1 + x0), tmp4 & xmask, eviction_policy
='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tl.full([1], 404, tl.int64)
tmp15 = tl.load(in_ptr2 + (4 * x1 + (-400 + x0)), tmp12 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tl.where(tmp4, tmp11, tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 300
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 400
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (400, 4), (4, 1))
assert_size_stride(primals_2, (400,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (300, 404), (404, 1))
assert_size_stride(primals_6, (300,), (1,))
assert_size_stride(primals_7, (300, 300), (300, 1))
assert_size_stride(primals_8, (300,), (1,))
assert_size_stride(primals_9, (1, 300), (300, 1))
assert_size_stride(primals_10, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 400), (400, 1), torch.float32)
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 400),
(1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 404), (404, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(1616)](buf0, primals_2, primals_4, buf1,
1616, XBLOCK=128, num_warps=4, num_stages=1)
del primals_4
buf2 = empty_strided_cuda((4, 300), (300, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_5, (404, 300), (
1, 404), 0), out=buf2)
buf3 = buf2
del buf2
triton_poi_fused_relu_1[grid(1200)](buf3, primals_6, 1200, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_6
buf4 = empty_strided_cuda((4, 300), (300, 1), torch.float32)
extern_kernels.addmm(primals_8, buf3, reinterpret_tensor(primals_7,
(300, 300), (1, 300), 0), alpha=1, beta=1, out=buf4)
del primals_8
buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_10, buf4, reinterpret_tensor(primals_9,
(300, 1), (1, 300), 0), alpha=1, beta=1, out=buf6)
del primals_10
buf7 = empty_strided_cuda((4, 400), (400, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(1600)](buf0,
primals_2, buf7, 1600, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_2
return (buf6, primals_3, buf1, buf3, buf4, primals_9, primals_7,
primals_5, buf7)
class CriticNetNew(nn.Module):
def __init__(self, args):
super(CriticNetNew, self).__init__()
state_dim = args.state_dim
action_dim = args.z_dim
self.l1 = nn.Linear(state_dim, 400)
self.l2 = nn.Linear(400 + action_dim, 300)
self.l3_additional = nn.Linear(300, 300)
self.l3 = nn.Linear(300, 1)
def forward(self, input_0, input_1):
primals_1 = self.l1.weight
primals_2 = self.l1.bias
primals_5 = self.l2.weight
primals_6 = self.l2.bias
primals_7 = self.l3_additional.weight
primals_8 = self.l3_additional.bias
primals_9 = self.l3.weight
primals_10 = self.l3.bias
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
|
doudoulaile/RL-GAN-Net
|
CriticNet
| false
| 15,264
|
[
"MIT"
] | 112
|
9c221223d1878bc24f0f39ad34928c1bb2974ae3
|
https://github.com/doudoulaile/RL-GAN-Net/tree/9c221223d1878bc24f0f39ad34928c1bb2974ae3
|
LabelSmoothingCrossEntropyV2
|
import torch
from torch import nn
class LabelSmoothingCrossEntropyV2(nn.Module):
""" 平滑的交叉熵, LabelSommth-CrossEntropy
This is the autograd version, you can also try the LabelSmoothSoftmaxCEV2 that uses derived gradients
url: https://github.com/CoinCheung/pytorch-loss
examples:
>>> criteria = LabelSmoothingCrossEntropyV2()
>>> logits = torch.randn(8, 19, 384, 384) # nchw, float/half
>>> lbs = torch.randint(0, 19, (8, 384, 384)) # nhw, int64_t
>>> loss = criteria(logits, lbs)
"""
def __init__(self, lb_smooth=0.1, reduction='mean', ignore_index=-100):
super(LabelSmoothingCrossEntropyV2, self).__init__()
self.log_softmax = nn.LogSoftmax(dim=1)
self.lb_ignore = ignore_index
self.lb_smooth = lb_smooth
self.reduction = reduction
def forward(self, logits, label):
logits = logits.float()
with torch.no_grad():
num_classes = logits.size(1)
label = label.clone().detach()
ignore = label.eq(self.lb_ignore)
n_valid = ignore.eq(0).sum()
label[ignore] = 0
lb_pos, lb_neg = 1.0 - self.lb_smooth, self.lb_smooth / num_classes
label_unsq = label.unsqueeze(1)
lb_one_hot = torch.empty_like(logits).fill_(lb_neg).scatter_(1,
label_unsq, lb_pos).detach()
logs = self.log_softmax(logits)
loss = -torch.sum(logs * lb_one_hot, dim=1)
loss[ignore] = 0
if self.reduction == 'mean':
loss = loss.sum() / n_valid
if self.reduction == 'sum':
loss = loss.sum()
return loss
def get_inputs():
return [torch.ones([4, 4], dtype=torch.int64), torch.ones([4], dtype=
torch.int64)]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax__to_copy_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = tmp0.to(tl.float32)
tmp3 = tmp2.to(tl.float32)
tmp5 = tmp4.to(tl.float32)
tmp6 = triton_helpers.maximum(tmp3, tmp5)
tmp8 = tmp7.to(tl.float32)
tmp9 = triton_helpers.maximum(tmp6, tmp8)
tmp11 = tmp10.to(tl.float32)
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp13 = tmp1 - tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_per_fused__log_softmax_div_eq_index_put_lift_fresh_mul_neg_scatter_sum_1(
in_out_ptr1, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp5 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp1 = tl.full([1, 1], -100, tl.int64)
tmp2 = tmp0 == tmp1
tmp3 = tl.full([1, 1], 0, tl.int64)
tmp4 = tl.where(tmp2, tmp3, tmp0)
tmp6 = tl_math.exp(tmp5)
tmp8 = tl_math.exp(tmp7)
tmp9 = tmp6 + tmp8
tmp11 = tl_math.exp(tmp10)
tmp12 = tmp9 + tmp11
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp12 + tmp14
tmp16 = tl_math.log(tmp15)
tmp17 = tmp5 - tmp16
tmp18 = tmp4 == tmp3
tmp19 = 0.9
tmp20 = 0.025
tmp21 = tl.where(tmp18, tmp19, tmp20)
tmp22 = tmp17 * tmp21
tmp23 = tmp7 - tmp16
tmp24 = tl.full([1, 1], 1, tl.int64)
tmp25 = tmp4 == tmp24
tmp26 = tl.where(tmp25, tmp19, tmp20)
tmp27 = tmp23 * tmp26
tmp28 = tmp22 + tmp27
tmp29 = tmp10 - tmp16
tmp30 = tl.full([1, 1], 2, tl.int64)
tmp31 = tmp4 == tmp30
tmp32 = tl.where(tmp31, tmp19, tmp20)
tmp33 = tmp29 * tmp32
tmp34 = tmp28 + tmp33
tmp35 = tmp13 - tmp16
tmp36 = tl.full([1, 1], 3, tl.int64)
tmp37 = tmp4 == tmp36
tmp38 = tl.where(tmp37, tmp19, tmp20)
tmp39 = tmp35 * tmp38
tmp40 = tmp34 + tmp39
tmp41 = -tmp40
tmp42 = 0.0
tmp43 = tl.where(tmp2, tmp42, tmp41)
tmp44 = tmp2.to(tl.int64)
tmp45 = tmp44 == tmp3
tmp46 = tmp45.to(tl.int64)
tmp47 = tl.broadcast_to(tmp46, [XBLOCK, RBLOCK])
tmp49 = tl.sum(tmp47, 1)[:, None]
tmp50 = tl.broadcast_to(tmp43, [XBLOCK, RBLOCK])
tmp52 = tl.sum(tmp50, 1)[:, None]
tmp53 = tmp49.to(tl.float32)
tmp54 = tmp52 / tmp53
tl.debug_barrier()
tl.store(in_out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp54, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax__to_copy_0[grid(16)](arg0_1, buf0, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del arg0_1
buf4 = empty_strided_cuda((), (), torch.float32)
buf6 = buf4
del buf4
triton_per_fused__log_softmax_div_eq_index_put_lift_fresh_mul_neg_scatter_sum_1[
grid(1)](buf6, arg1_1, buf0, 1, 4, XBLOCK=1, num_warps=2,
num_stages=1)
del arg1_1
del buf0
return buf6,
class LabelSmoothingCrossEntropyV2New(nn.Module):
""" 平滑的交叉熵, LabelSommth-CrossEntropy
This is the autograd version, you can also try the LabelSmoothSoftmaxCEV2 that uses derived gradients
url: https://github.com/CoinCheung/pytorch-loss
examples:
>>> criteria = LabelSmoothingCrossEntropyV2()
>>> logits = torch.randn(8, 19, 384, 384) # nchw, float/half
>>> lbs = torch.randint(0, 19, (8, 384, 384)) # nhw, int64_t
>>> loss = criteria(logits, lbs)
"""
def __init__(self, lb_smooth=0.1, reduction='mean', ignore_index=-100):
super(LabelSmoothingCrossEntropyV2New, self).__init__()
self.log_softmax = nn.LogSoftmax(dim=1)
self.lb_ignore = ignore_index
self.lb_smooth = lb_smooth
self.reduction = reduction
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
dumpmemory/Pytorch-NLU
|
LabelSmoothingCrossEntropyV2
| false
| 15,265
|
[
"Apache-2.0"
] | 115
|
864fb9acc7751fc51abd3d05d24b5a9a7eab7110
|
https://github.com/dumpmemory/Pytorch-NLU/tree/864fb9acc7751fc51abd3d05d24b5a9a7eab7110
|
MLP
|
import torch
import torch.autograd
import torch.nn as nn
class MLP(nn.Module):
def __init__(self, n_in, n_out, dropout=0):
super().__init__()
self.linear = nn.Linear(n_in, n_out)
self.activation = nn.GELU()
self.dropout = nn.Dropout(dropout)
def forward(self, x):
x = self.dropout(x)
x = self.linear(x)
x = self.activation(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_in': 4, 'n_out': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.autograd
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_gelu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64,
4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_gelu_0[grid(256)](buf0, buf1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
return buf1, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf0
class MLPNew(nn.Module):
def __init__(self, n_in, n_out, dropout=0):
super().__init__()
self.linear = nn.Linear(n_in, n_out)
self.activation = nn.GELU()
self.dropout = nn.Dropout(dropout)
def forward(self, input_0):
primals_2 = self.linear.weight
primals_3 = self.linear.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
dumpmemory/W2NER
|
MLP
| false
| 15,266
|
[
"MIT"
] | 128
|
fb1b6eb1111eb001b1c965097d995244b840bdda
|
https://github.com/dumpmemory/W2NER/tree/fb1b6eb1111eb001b1c965097d995244b840bdda
|
ConvMLPStage
|
from torch.nn import Module
import torch
import torch.nn as nn
from torch.nn import Linear
from torch.nn import LayerNorm
from torch.nn import Conv2d
from torch.nn import GELU
from torch.nn import Identity
def drop_path(x, drop_prob: 'float'=0.0, training: 'bool'=False):
"""
Obtained from: github.com:rwightman/pytorch-image-models
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
'survival rate' as the argument.
"""
if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1)
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.
device)
random_tensor.floor_()
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""
Obtained from: github.com:rwightman/pytorch-image-models
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class Mlp(Module):
def __init__(self, embedding_dim_in, hidden_dim=None, embedding_dim_out
=None, activation=GELU):
super().__init__()
hidden_dim = hidden_dim or embedding_dim_in
embedding_dim_out = embedding_dim_out or embedding_dim_in
self.fc1 = Linear(embedding_dim_in, hidden_dim)
self.act = activation()
self.fc2 = Linear(hidden_dim, embedding_dim_out)
def forward(self, x):
return self.fc2(self.act(self.fc1(x)))
class ConvMLPStage(Module):
def __init__(self, embedding_dim, dim_feedforward=2048,
stochastic_depth_rate=0.1):
super(ConvMLPStage, self).__init__()
self.norm1 = LayerNorm(embedding_dim)
self.channel_mlp1 = Mlp(embedding_dim_in=embedding_dim, hidden_dim=
dim_feedforward)
self.norm2 = LayerNorm(embedding_dim)
self.connect = Conv2d(embedding_dim, embedding_dim, kernel_size=(3,
3), stride=(1, 1), padding=(1, 1), groups=embedding_dim, bias=False
)
self.connect_norm = LayerNorm(embedding_dim)
self.channel_mlp2 = Mlp(embedding_dim_in=embedding_dim, hidden_dim=
dim_feedforward)
self.drop_path = DropPath(stochastic_depth_rate
) if stochastic_depth_rate > 0 else Identity()
def forward(self, src):
src = src + self.drop_path(self.channel_mlp1(self.norm1(src)))
src = self.connect(self.connect_norm(src).permute(0, 3, 1, 2)).permute(
0, 2, 3, 1)
src = src + self.drop_path(self.channel_mlp2(self.norm2(src)))
return src
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'embedding_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch.nn import Module
import torch.nn as nn
from torch.nn import Linear
from torch.nn import LayerNorm
from torch.nn import Conv2d
from torch.nn import GELU
from torch.nn import Identity
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_gelu_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, None)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, None)
@triton.jit
def triton_poi_fused_add_native_layer_norm_3(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_add_5(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (2048, 4), (4, 1))
assert_size_stride(primals_5, (2048,), (1,))
assert_size_stride(primals_6, (4, 2048), (2048, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (2048, 4), (4, 1))
assert_size_stride(primals_14, (2048,), (1,))
assert_size_stride(primals_15, (4, 2048), (2048, 1))
assert_size_stride(primals_16, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(64)](primals_3, buf0,
buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(256)](primals_3, buf0,
buf1, primals_1, primals_2, buf2, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_1
del primals_2
buf3 = empty_strided_cuda((64, 2048), (2048, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 2048), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_5
buf4 = empty_strided_cuda((4, 4, 4, 2048), (32768, 8192, 2048, 1),
torch.float32)
triton_poi_fused_gelu_2[grid(131072)](buf3, buf4, 131072, XBLOCK=
512, num_warps=8, num_stages=1)
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf4, (64, 2048),
(2048, 1), 0), reinterpret_tensor(primals_6, (2048, 4), (1,
2048), 0), alpha=1, beta=1, out=buf5)
del primals_7
buf6 = buf1
del buf1
buf7 = buf0
del buf0
triton_poi_fused_add_native_layer_norm_3[grid(64)](primals_3, buf5,
buf6, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_4[grid(256)](primals_3, buf5,
buf6, buf7, primals_8, primals_9, buf8, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_9
buf9 = extern_kernels.convolution(reinterpret_tensor(buf8, (4, 4, 4,
4), (64, 1, 16, 4), 0), primals_10, stride=(1, 1), padding=(1,
1), dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=4, bias=None)
assert_size_stride(buf9, (4, 4, 4, 4), (64, 1, 16, 4))
buf10 = buf7
del buf7
buf11 = buf6
del buf6
triton_poi_fused_native_layer_norm_0[grid(64)](buf9, buf10, buf11,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(256)](buf9, buf10, buf11,
primals_11, primals_12, buf12, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf10
del buf11
del primals_12
buf13 = empty_strided_cuda((64, 2048), (2048, 1), torch.float32)
extern_kernels.addmm(primals_14, reinterpret_tensor(buf12, (64, 4),
(4, 1), 0), reinterpret_tensor(primals_13, (4, 2048), (1, 4), 0
), alpha=1, beta=1, out=buf13)
del primals_14
buf14 = empty_strided_cuda((4, 4, 4, 2048), (32768, 8192, 2048, 1),
torch.float32)
triton_poi_fused_gelu_2[grid(131072)](buf13, buf14, 131072, XBLOCK=
512, num_warps=8, num_stages=1)
buf15 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf14, (64, 2048), (2048, 1),
0), reinterpret_tensor(primals_15, (2048, 4), (1, 2048), 0),
out=buf15)
buf16 = reinterpret_tensor(buf15, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf15
triton_poi_fused_add_5[grid(256)](buf16, buf9, primals_16, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_16
return (buf16, primals_3, primals_8, primals_10, primals_11,
reinterpret_tensor(buf2, (64, 4), (4, 1), 0), buf3,
reinterpret_tensor(buf4, (64, 2048), (2048, 1), 0), buf5,
reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 1, 16, 4), 0), buf9,
reinterpret_tensor(buf12, (64, 4), (4, 1), 0), buf13,
reinterpret_tensor(buf14, (64, 2048), (2048, 1), 0), primals_15,
primals_13, primals_6, primals_4)
def drop_path(x, drop_prob: 'float'=0.0, training: 'bool'=False):
"""
Obtained from: github.com:rwightman/pytorch-image-models
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
'survival rate' as the argument.
"""
if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1)
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.
device)
random_tensor.floor_()
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""
Obtained from: github.com:rwightman/pytorch-image-models
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class Mlp(Module):
def __init__(self, embedding_dim_in, hidden_dim=None, embedding_dim_out
=None, activation=GELU):
super().__init__()
hidden_dim = hidden_dim or embedding_dim_in
embedding_dim_out = embedding_dim_out or embedding_dim_in
self.fc1 = Linear(embedding_dim_in, hidden_dim)
self.act = activation()
self.fc2 = Linear(hidden_dim, embedding_dim_out)
def forward(self, x):
return self.fc2(self.act(self.fc1(x)))
class ConvMLPStageNew(Module):
def __init__(self, embedding_dim, dim_feedforward=2048,
stochastic_depth_rate=0.1):
super(ConvMLPStageNew, self).__init__()
self.norm1 = LayerNorm(embedding_dim)
self.channel_mlp1 = Mlp(embedding_dim_in=embedding_dim, hidden_dim=
dim_feedforward)
self.norm2 = LayerNorm(embedding_dim)
self.connect = Conv2d(embedding_dim, embedding_dim, kernel_size=(3,
3), stride=(1, 1), padding=(1, 1), groups=embedding_dim, bias=False
)
self.connect_norm = LayerNorm(embedding_dim)
self.channel_mlp2 = Mlp(embedding_dim_in=embedding_dim, hidden_dim=
dim_feedforward)
self.drop_path = DropPath(stochastic_depth_rate
) if stochastic_depth_rate > 0 else Identity()
def forward(self, input_0):
primals_1 = self.norm1.weight
primals_2 = self.norm1.bias
primals_4 = self.channel_mlp1.fc1.weight
primals_5 = self.channel_mlp1.fc1.bias
primals_6 = self.channel_mlp1.fc2.weight
primals_7 = self.channel_mlp1.fc2.bias
primals_8 = self.norm2.weight
primals_9 = self.norm2.bias
primals_10 = self.connect.weight
primals_11 = self.connect_norm.weight
primals_12 = self.connect_norm.bias
primals_13 = self.channel_mlp2.fc1.weight
primals_14 = self.channel_mlp2.fc1.bias
primals_15 = self.channel_mlp2.fc2.weight
primals_16 = self.channel_mlp2.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16])
return output[0]
|
dumpmemory/Convolutional-MLPs
|
ConvMLPStage
| false
| 15,267
|
[
"Apache-2.0"
] | 117
|
89008c686e48803c012038f21f97e56276aa84ad
|
https://github.com/dumpmemory/Convolutional-MLPs/tree/89008c686e48803c012038f21f97e56276aa84ad
|
ResNormLayer
|
import torch
from torch import nn
from torch import optim as optim
class ResNormLayer(nn.Module):
def __init__(self, linear_size):
super(ResNormLayer, self).__init__()
self.l_size = linear_size
self.nonlin1 = nn.ReLU(inplace=True)
self.nonlin2 = nn.ReLU(inplace=True)
self.norm_fn1 = nn.LayerNorm(self.l_size)
self.norm_fn2 = nn.LayerNorm(self.l_size)
self.w1 = nn.Linear(self.l_size, self.l_size)
self.w2 = nn.Linear(self.l_size, self.l_size)
def forward(self, x):
y = self.w1(x)
y = self.nonlin1(y)
y = self.norm_fn1(y)
y = self.w2(y)
y = self.nonlin2(y)
y = self.norm_fn2(y)
out = x + y
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'linear_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
from torch import optim as optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last')
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp1, tmp3)
tmp5 = tmp2 + tmp4
tmp7 = triton_helpers.maximum(tmp1, tmp6)
tmp8 = tmp5 + tmp7
tmp10 = triton_helpers.maximum(tmp1, tmp9)
tmp11 = tmp8 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tmp4 - tmp13
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp7 - tmp13
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp10 - tmp13
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp24 / tmp12
tmp26 = 1e-05
tmp27 = tmp25 + tmp26
tmp28 = libdevice.rsqrt(tmp27)
tl.store(out_ptr0 + x2, tmp13, xmask)
tl.store(out_ptr1 + x2, tmp28, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = tmp2 - tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 * tmp7
tmp10 = tmp8 + tmp9
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x3, xmask)
tmp4 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.full([1], 0, tl.int32)
tmp3 = triton_helpers.maximum(tmp2, tmp1)
tmp5 = tmp3 - tmp4
tmp7 = tmp5 * tmp6
tmp9 = tmp7 * tmp8
tmp11 = tmp9 + tmp10
tmp12 = tmp0 + tmp11
tl.store(out_ptr0 + x3, tmp12, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(64)](buf0, buf1, buf2, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(256)](buf0, buf1, buf2,
primals_4, primals_5, buf3, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = buf2
del buf2
buf6 = buf1
del buf1
triton_poi_fused_native_layer_norm_0[grid(64)](buf4, buf5, buf6, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_2[grid(256)](primals_3, buf4,
buf5, buf6, primals_8, primals_9, buf7, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf5
del buf6
del primals_9
return buf7, primals_4, primals_8, reinterpret_tensor(primals_3, (64, 4
), (4, 1), 0), buf0, reinterpret_tensor(buf3, (64, 4), (4, 1), 0
), buf4, primals_6
class ResNormLayerNew(nn.Module):
def __init__(self, linear_size):
super(ResNormLayerNew, self).__init__()
self.l_size = linear_size
self.nonlin1 = nn.ReLU(inplace=True)
self.nonlin2 = nn.ReLU(inplace=True)
self.norm_fn1 = nn.LayerNorm(self.l_size)
self.norm_fn2 = nn.LayerNorm(self.l_size)
self.w1 = nn.Linear(self.l_size, self.l_size)
self.w2 = nn.Linear(self.l_size, self.l_size)
def forward(self, input_0):
primals_2 = self.norm_fn1.weight
primals_4 = self.norm_fn1.bias
primals_5 = self.norm_fn2.weight
primals_7 = self.norm_fn2.bias
primals_1 = self.w1.weight
primals_8 = self.w1.bias
primals_6 = self.w2.weight
primals_9 = self.w2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
dqshuai/MetaFormer
|
ResNormLayer
| false
| 15,268
|
[
"MIT"
] | 67
|
669bf18c35fdb51e35b0a79fa86224a18cd38ac5
|
https://github.com/dqshuai/MetaFormer/tree/669bf18c35fdb51e35b0a79fa86224a18cd38ac5
|
RegressionHead
|
import abc
import torch
import torch.nn as nn
import torch.utils.data.dataset
class BaseHead(nn.Module, metaclass=abc.ABCMeta):
"""Absract class for task heads"""
@abc.abstractmethod
def __init__(self):
super().__init__()
class RegressionHead(BaseHead):
def __init__(self, task, hidden_size, hidden_dropout_prob, **kwargs):
"""From RobertaClassificationHead"""
super().__init__()
self.dense = nn.Linear(hidden_size, hidden_size)
self.dropout = nn.Dropout(hidden_dropout_prob)
self.out_proj = nn.Linear(hidden_size, 1)
def forward(self, pooled):
x = self.dropout(pooled)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
scores = self.out_proj(x)
return scores
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'task': 4, 'hidden_size': 4, 'hidden_dropout_prob': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import abc
import torch.nn as nn
import torch.utils.data.dataset
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (1, 4), (4, 1))
assert_size_stride(primals_5, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(256)](buf1, primals_3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_3
buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_5
return reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 1), 0
), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf1, primals_4
class BaseHead(nn.Module, metaclass=abc.ABCMeta):
"""Absract class for task heads"""
@abc.abstractmethod
def __init__(self):
super().__init__()
class RegressionHeadNew(BaseHead):
def __init__(self, task, hidden_size, hidden_dropout_prob, **kwargs):
"""From RobertaClassificationHead"""
super().__init__()
self.dense = nn.Linear(hidden_size, hidden_size)
self.dropout = nn.Dropout(hidden_dropout_prob)
self.out_proj = nn.Linear(hidden_size, 1)
def forward(self, input_0):
primals_2 = self.dense.weight
primals_3 = self.dense.bias
primals_4 = self.out_proj.weight
primals_5 = self.out_proj.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
dumpmemory/jiant
|
RegressionHead
| false
| 15,269
|
[
"MIT"
] | 1,108
|
f9e0e7c9ecf88da0c26559c5f903aef0338c7bd9
|
https://github.com/dumpmemory/jiant/tree/f9e0e7c9ecf88da0c26559c5f903aef0338c7bd9
|
Net
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class DeepMind(nn.Module):
def __init__(self):
super(DeepMind, self).__init__()
self.conv1 = nn.Conv2d(4, 32, 8, stride=4)
self.conv2 = nn.Conv2d(32, 64, 4, stride=2)
self.conv3 = nn.Conv2d(64, 32, 3, stride=1)
self.fc1 = nn.Linear(32 * 7 * 7, 512)
nn.init.orthogonal_(self.conv1.weight.data, gain=nn.init.
calculate_gain('relu'))
nn.init.orthogonal_(self.conv2.weight.data, gain=nn.init.
calculate_gain('relu'))
nn.init.orthogonal_(self.conv3.weight.data, gain=nn.init.
calculate_gain('relu'))
nn.init.orthogonal_(self.fc1.weight.data, gain=nn.init.
calculate_gain('relu'))
nn.init.constant_(self.conv1.bias.data, 0)
nn.init.constant_(self.conv2.bias.data, 0)
nn.init.constant_(self.conv3.bias.data, 0)
nn.init.constant_(self.fc1.bias.data, 0)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = x.view(-1, 32 * 7 * 7)
x = F.relu(self.fc1(x))
return x
class Net(nn.Module):
def __init__(self, num_actions):
super(Net, self).__init__()
self.cnn_layer = DeepMind()
self.critic = nn.Linear(512, 1)
self.actor = nn.Linear(512, num_actions)
nn.init.orthogonal_(self.critic.weight.data)
nn.init.constant_(self.critic.bias.data, 0)
nn.init.orthogonal_(self.actor.weight.data, gain=0.01)
nn.init.constant_(self.actor.bias.data, 0)
def forward(self, inputs):
x = self.cnn_layer(inputs / 255.0)
value = self.critic(x)
pi = F.softmax(self.actor(x), dim=1)
return value, pi
def get_inputs():
return [torch.rand([4, 4, 144, 144])]
def get_init_inputs():
return [[], {'num_actions': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, None)
tmp1 = 0.00392156862745098
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 156800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 1225 % 32
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_3(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 25088
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 196 % 32
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr0 + x3, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 144, 144), (82944, 20736, 144, 1))
assert_size_stride(primals_2, (32, 4, 8, 8), (256, 64, 8, 1))
assert_size_stride(primals_3, (32,), (1,))
assert_size_stride(primals_4, (64, 32, 4, 4), (512, 16, 4, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (32,), (1,))
assert_size_stride(primals_8, (512, 1568), (1568, 1))
assert_size_stride(primals_9, (512,), (1,))
assert_size_stride(primals_10, (1, 512), (512, 1))
assert_size_stride(primals_11, (1,), (1,))
assert_size_stride(primals_12, (4, 512), (512, 1))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 144, 144), (82944, 20736, 144, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(331776)](primals_1, buf0, 331776,
XBLOCK=1024, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(4, 4),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 32, 35, 35), (39200, 1225, 35, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_relu_1[grid(156800)](buf2, primals_3,
156800, XBLOCK=512, num_warps=8, num_stages=1)
del primals_3
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 64, 16, 16), (16384, 256, 16, 1))
buf4 = buf3
del buf3
triton_poi_fused_convolution_relu_2[grid(65536)](buf4, primals_5,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_5
buf5 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 32, 14, 14), (6272, 196, 14, 1))
buf6 = buf5
del buf5
buf14 = empty_strided_cuda((4, 32, 14, 14), (6272, 196, 14, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_3[grid(25088)](
buf6, primals_7, buf14, 25088, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_7
buf7 = empty_strided_cuda((16, 512), (512, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf6, (16, 1568), (1568, 1), 0
), reinterpret_tensor(primals_8, (1568, 512), (1, 1568), 0),
out=buf7)
buf8 = buf7
del buf7
triton_poi_fused_relu_4[grid(8192)](buf8, primals_9, 8192, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_9
buf10 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_11, buf8, reinterpret_tensor(
primals_10, (512, 1), (1, 512), 0), alpha=1, beta=1, out=buf10)
del primals_11
buf11 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_13, buf8, reinterpret_tensor(
primals_12, (512, 4), (1, 512), 0), alpha=1, beta=1, out=buf11)
del primals_13
buf12 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf11, buf12, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf13 = buf11
del buf11
triton_poi_fused__softmax_6[grid(64)](buf12, buf13, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf12
return (buf10, buf13, primals_2, primals_4, primals_6, buf0, buf2, buf4,
reinterpret_tensor(buf6, (16, 1568), (1568, 1), 0), buf8, buf13,
primals_12, primals_10, primals_8, buf14)
class DeepMind(nn.Module):
def __init__(self):
super(DeepMind, self).__init__()
self.conv1 = nn.Conv2d(4, 32, 8, stride=4)
self.conv2 = nn.Conv2d(32, 64, 4, stride=2)
self.conv3 = nn.Conv2d(64, 32, 3, stride=1)
self.fc1 = nn.Linear(32 * 7 * 7, 512)
nn.init.orthogonal_(self.conv1.weight.data, gain=nn.init.
calculate_gain('relu'))
nn.init.orthogonal_(self.conv2.weight.data, gain=nn.init.
calculate_gain('relu'))
nn.init.orthogonal_(self.conv3.weight.data, gain=nn.init.
calculate_gain('relu'))
nn.init.orthogonal_(self.fc1.weight.data, gain=nn.init.
calculate_gain('relu'))
nn.init.constant_(self.conv1.bias.data, 0)
nn.init.constant_(self.conv2.bias.data, 0)
nn.init.constant_(self.conv3.bias.data, 0)
nn.init.constant_(self.fc1.bias.data, 0)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = x.view(-1, 32 * 7 * 7)
x = F.relu(self.fc1(x))
return x
class NetNew(nn.Module):
def __init__(self, num_actions):
super(NetNew, self).__init__()
self.cnn_layer = DeepMind()
self.critic = nn.Linear(512, 1)
self.actor = nn.Linear(512, num_actions)
nn.init.orthogonal_(self.critic.weight.data)
nn.init.constant_(self.critic.bias.data, 0)
nn.init.orthogonal_(self.actor.weight.data, gain=0.01)
nn.init.constant_(self.actor.bias.data, 0)
def forward(self, input_0):
primals_2 = self.cnn_layer.conv1.weight
primals_3 = self.cnn_layer.conv1.bias
primals_4 = self.cnn_layer.conv2.weight
primals_5 = self.cnn_layer.conv2.bias
primals_6 = self.cnn_layer.conv3.weight
primals_7 = self.cnn_layer.conv3.bias
primals_8 = self.cnn_layer.fc1.weight
primals_9 = self.cnn_layer.fc1.bias
primals_10 = self.critic.weight
primals_11 = self.critic.bias
primals_12 = self.actor.weight
primals_13 = self.actor.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0], output[1]
|
TianhongDai/Self_Imitation_Learning
|
Net
| false
| 15,270
|
[
"MIT"
] | 61
|
e49003582fa3d875495d84682f2a3332d4922dbc
|
https://github.com/TianhongDai/Self_Imitation_Learning/tree/e49003582fa3d875495d84682f2a3332d4922dbc
|
LayerNorm
|
import torch
import torch.autograd
import torch.nn as nn
class LayerNorm(nn.Module):
def __init__(self, input_dim, cond_dim=0, center=True, scale=True,
epsilon=None, conditional=False, hidden_units=None,
hidden_activation='linear', hidden_initializer='xaiver', **kwargs):
super(LayerNorm, self).__init__()
"""
input_dim: inputs.shape[-1]
cond_dim: cond.shape[-1]
"""
self.center = center
self.scale = scale
self.conditional = conditional
self.hidden_units = hidden_units
self.hidden_initializer = hidden_initializer
self.epsilon = epsilon or 1e-12
self.input_dim = input_dim
self.cond_dim = cond_dim
if self.center:
self.beta = nn.Parameter(torch.zeros(input_dim))
if self.scale:
self.gamma = nn.Parameter(torch.ones(input_dim))
if self.conditional:
if self.hidden_units is not None:
self.hidden_dense = nn.Linear(in_features=self.cond_dim,
out_features=self.hidden_units, bias=False)
if self.center:
self.beta_dense = nn.Linear(in_features=self.cond_dim,
out_features=input_dim, bias=False)
if self.scale:
self.gamma_dense = nn.Linear(in_features=self.cond_dim,
out_features=input_dim, bias=False)
self.initialize_weights()
def initialize_weights(self):
if self.conditional:
if self.hidden_units is not None:
if self.hidden_initializer == 'normal':
torch.nn.init.normal(self.hidden_dense.weight)
elif self.hidden_initializer == 'xavier':
torch.nn.init.xavier_uniform_(self.hidden_dense.weight)
if self.center:
torch.nn.init.constant_(self.beta_dense.weight, 0)
if self.scale:
torch.nn.init.constant_(self.gamma_dense.weight, 0)
def forward(self, inputs, cond=None):
if self.conditional:
if self.hidden_units is not None:
cond = self.hidden_dense(cond)
for _ in range(len(inputs.shape) - len(cond.shape)):
cond = cond.unsqueeze(1)
if self.center:
beta = self.beta_dense(cond) + self.beta
if self.scale:
gamma = self.gamma_dense(cond) + self.gamma
else:
if self.center:
beta = self.beta
if self.scale:
gamma = self.gamma
outputs = inputs
if self.center:
mean = torch.mean(outputs, dim=-1).unsqueeze(-1)
outputs = outputs - mean
if self.scale:
variance = torch.mean(outputs ** 2, dim=-1).unsqueeze(-1)
std = (variance + self.epsilon) ** 0.5
outputs = outputs / std
outputs = outputs * gamma
if self.center:
outputs = outputs + beta
return outputs
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.autograd
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_div_mul_pow_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = 1e-12
tmp15 = tmp13 + tmp14
tmp16 = libdevice.sqrt(tmp15)
tmp17 = tmp0 / tmp16
tmp19 = tmp17 * tmp18
tmp21 = tmp19 + tmp20
tl.store(out_ptr0 + x2, tmp21, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_sub_0[grid(256)](primals_3, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_mul_pow_1[grid(256)](buf0, primals_2,
primals_1, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_1
del primals_2
return buf1, primals_3
class LayerNormNew(nn.Module):
def __init__(self, input_dim, cond_dim=0, center=True, scale=True,
epsilon=None, conditional=False, hidden_units=None,
hidden_activation='linear', hidden_initializer='xaiver', **kwargs):
super(LayerNormNew, self).__init__()
"""
input_dim: inputs.shape[-1]
cond_dim: cond.shape[-1]
"""
self.center = center
self.scale = scale
self.conditional = conditional
self.hidden_units = hidden_units
self.hidden_initializer = hidden_initializer
self.epsilon = epsilon or 1e-12
self.input_dim = input_dim
self.cond_dim = cond_dim
if self.center:
self.beta = nn.Parameter(torch.zeros(input_dim))
if self.scale:
self.gamma = nn.Parameter(torch.ones(input_dim))
if self.conditional:
if self.hidden_units is not None:
self.hidden_dense = nn.Linear(in_features=self.cond_dim,
out_features=self.hidden_units, bias=False)
if self.center:
self.beta_dense = nn.Linear(in_features=self.cond_dim,
out_features=input_dim, bias=False)
if self.scale:
self.gamma_dense = nn.Linear(in_features=self.cond_dim,
out_features=input_dim, bias=False)
self.initialize_weights()
def initialize_weights(self):
if self.conditional:
if self.hidden_units is not None:
if self.hidden_initializer == 'normal':
torch.nn.init.normal(self.hidden_dense.weight)
elif self.hidden_initializer == 'xavier':
torch.nn.init.xavier_uniform_(self.hidden_dense.weight)
if self.center:
torch.nn.init.constant_(self.beta_dense.weight, 0)
if self.scale:
torch.nn.init.constant_(self.gamma_dense.weight, 0)
def forward(self, input_0):
primals_1 = self.beta
primals_2 = self.gamma
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
dumpmemory/W2NER
|
LayerNorm
| false
| 15,271
|
[
"MIT"
] | 128
|
fb1b6eb1111eb001b1c965097d995244b840bdda
|
https://github.com/dumpmemory/W2NER/tree/fb1b6eb1111eb001b1c965097d995244b840bdda
|
SoftTargetCrossEntropy
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.nn.functional as F
import torch.utils.data
class SoftTargetCrossEntropy(nn.Module):
def __init__(self):
super(SoftTargetCrossEntropy, self).__init__()
def forward(self, x, target):
N_rep = x.shape[0]
N = target.shape[0]
if not N == N_rep:
target = target.repeat(N_rep // N, 1)
loss = torch.sum(-target * F.log_softmax(x, dim=-1), dim=-1)
return loss.mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_per_fused__log_softmax_mean_mul_neg_sum_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp1 = -tmp0
tmp3 = tl_math.exp(tmp2)
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp3 + tmp5
tmp8 = tl_math.exp(tmp7)
tmp9 = tmp6 + tmp8
tmp11 = tl_math.exp(tmp10)
tmp12 = tmp9 + tmp11
tmp13 = tl_math.log(tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp1 * tmp14
tmp17 = -tmp16
tmp18 = tmp4 - tmp13
tmp19 = tmp17 * tmp18
tmp20 = tmp15 + tmp19
tmp22 = -tmp21
tmp23 = tmp7 - tmp13
tmp24 = tmp22 * tmp23
tmp25 = tmp20 + tmp24
tmp27 = -tmp26
tmp28 = tmp10 - tmp13
tmp29 = tmp27 * tmp28
tmp30 = tmp25 + tmp29
tmp31 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK])
tmp33 = tl.sum(tmp31, 1)[:, None]
tmp34 = 64.0
tmp35 = tmp33 / tmp34
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp35, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
triton_per_fused__log_softmax_mean_mul_neg_sum_1[grid(1)](buf3,
arg1_1, buf0, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg1_1
del buf0
return buf3,
class SoftTargetCrossEntropyNew(nn.Module):
def __init__(self):
super(SoftTargetCrossEntropyNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
dumpmemory/TokenLabeling
|
SoftTargetCrossEntropy
| false
| 15,272
|
[
"Apache-2.0"
] | 367
|
9dbfd59aedecfe83f6f3253db4e99b82359d48ac
|
https://github.com/dumpmemory/TokenLabeling/tree/9dbfd59aedecfe83f6f3253db4e99b82359d48ac
|
AddPositionEmb
|
import torch
from typing import Sequence
import torch.nn as nn
import torch._C
import torch.serialization
import torch.nn.parallel
class AddPositionEmb(nn.Module):
"""Module to add position embedding to input features
"""
def __init__(self, dim=384, spatial_shape=[14, 14]):
super().__init__()
if isinstance(spatial_shape, int):
spatial_shape = [spatial_shape]
assert isinstance(spatial_shape, Sequence
), f'"spatial_shape" must by a sequence or int, get {type(spatial_shape)} instead.'
if len(spatial_shape) == 1:
embed_shape = list(spatial_shape) + [dim]
else:
embed_shape = [dim] + list(spatial_shape)
self.pos_embed = nn.Parameter(torch.zeros(1, *embed_shape))
def forward(self, x):
return x + self.pos_embed
def get_inputs():
return [torch.rand([4, 384, 14, 14])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from typing import Sequence
import torch.nn as nn
import torch._C
import torch.serialization
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 75264
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x2, tmp2, None)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (1, 384, 14, 14), (75264, 196, 14, 1))
assert_size_stride(primals_2, (4, 384, 14, 14), (75264, 196, 14, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 384, 14, 14), (75264, 196, 14, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(301056)](primals_2, primals_1, buf0,
301056, XBLOCK=512, num_warps=8, num_stages=1)
del primals_1
del primals_2
return buf0,
class AddPositionEmbNew(nn.Module):
"""Module to add position embedding to input features
"""
def __init__(self, dim=384, spatial_shape=[14, 14]):
super().__init__()
if isinstance(spatial_shape, int):
spatial_shape = [spatial_shape]
assert isinstance(spatial_shape, Sequence
), f'"spatial_shape" must by a sequence or int, get {type(spatial_shape)} instead.'
if len(spatial_shape) == 1:
embed_shape = list(spatial_shape) + [dim]
else:
embed_shape = [dim] + list(spatial_shape)
self.pos_embed = nn.Parameter(torch.zeros(1, *embed_shape))
def forward(self, input_0):
primals_1 = self.pos_embed
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
dumpmemory/poolformer
|
AddPositionEmb
| false
| 15,273
|
[
"Apache-2.0"
] | 677
|
d108be054469da760141f4789bf87c915c4fd0b2
|
https://github.com/dumpmemory/poolformer/tree/d108be054469da760141f4789bf87c915c4fd0b2
|
AFTFull
|
import torch
from torch import nn
class AFTFull(nn.Module):
def __init__(self, max_seqlen, dim, hidden_dim=64):
super().__init__()
"""
max_seqlen: the maximum number of timesteps (sequence length) to be fed in
dim: the embedding dimension of the tokens
hidden_dim: the hidden dimension used inside AFT Full
Number of heads is 1 as done in the paper
"""
self.dim = dim
self.hidden_dim = hidden_dim
self.to_q = nn.Linear(dim, hidden_dim)
self.to_k = nn.Linear(dim, hidden_dim)
self.to_v = nn.Linear(dim, hidden_dim)
self.project = nn.Linear(hidden_dim, dim)
self.wbias = nn.Parameter(torch.Tensor(max_seqlen, max_seqlen))
nn.init.xavier_uniform_(self.wbias)
def forward(self, x):
B, T, _ = x.shape
Q = self.to_q(x).view(B, T, self.hidden_dim)
K = self.to_k(x).view(B, T, self.hidden_dim)
V = self.to_v(x).view(B, T, self.hidden_dim)
temp_wbias = self.wbias[:T, :T].unsqueeze(0)
"""
From the paper
"""
Q_sig = torch.sigmoid(Q)
temp = torch.exp(temp_wbias) @ torch.mul(torch.exp(K), V)
weighted = temp / (torch.exp(temp_wbias) @ torch.exp(K))
Yt = torch.mul(Q_sig, weighted)
Yt = Yt.view(B, T, self.hidden_dim)
Yt = self.project(Yt)
return Yt
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'max_seqlen': 4, 'dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_exp_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl_math.exp(tmp0)
tl.store(out_ptr0 + x0, tmp1, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 64
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 64 * y3), xmask & ymask, eviction_policy
='evict_last')
tmp2 = tl.load(in_ptr1 + (x2 + 64 * y3), xmask & ymask, eviction_policy
='evict_last')
tmp1 = tl_math.exp(tmp0)
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + (y0 + 4 * x2 + 256 * y1), tmp3, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 256
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 64
y1 = yindex // 64
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 64 * x2 + 256 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl_math.exp(tmp0)
tl.store(out_ptr0 + (x2 + 4 * y3), tmp1, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_div_mul_sigmoid_3(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 64
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 64 * y3), xmask & ymask, eviction_policy
='evict_last')
tmp2 = tl.load(in_ptr1 + (y0 + 4 * x2 + 256 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (y0 + 4 * x2 + 256 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.sigmoid(tmp0)
tmp4 = tmp2 / tmp3
tmp5 = tmp1 * tmp4
tl.store(out_ptr0 + (x2 + 64 * y3), tmp5, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (64, 4), (4, 1))
assert_size_stride(primals_3, (64,), (1,))
assert_size_stride(primals_4, (64, 4), (4, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (64, 4), (4, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, 64), (64, 1))
assert_size_stride(primals_10, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 64), (64, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (16,
4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 64), (1, 4),
0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((16, 64), (64, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (16,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 64), (1, 4),
0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((16, 64), (64, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(primals_1, (16,
4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 64), (1, 4),
0), alpha=1, beta=1, out=buf2)
del primals_6
del primals_7
buf3 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_exp_0[grid(16)](primals_8, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_8
buf4 = empty_strided_cuda((4, 64, 4), (256, 4, 1), torch.float32)
triton_poi_fused_clone_1[grid(16, 64)](buf1, buf2, buf4, 16, 64,
XBLOCK=64, YBLOCK=4, num_warps=4, num_stages=1)
buf5 = empty_strided_cuda((256, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf4, (256, 4), (4, 1), 0),
reinterpret_tensor(buf3, (4, 4), (1, 4), 0), out=buf5)
buf6 = empty_strided_cuda((4, 64, 4), (256, 4, 1), torch.float32)
triton_poi_fused_clone_2[grid(256, 4)](buf1, buf6, 256, 4, XBLOCK=4,
YBLOCK=256, num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((256, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf6, (256, 4), (4, 1), 0),
reinterpret_tensor(buf3, (4, 4), (1, 4), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 64), (256, 64, 1), torch.float32)
triton_poi_fused_clone_div_mul_sigmoid_3[grid(16, 64)](buf0, buf5,
buf7, buf8, 16, 64, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1
)
buf9 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_10, reinterpret_tensor(buf8, (16, 64),
(64, 1), 0), reinterpret_tensor(primals_9, (64, 4), (1, 64), 0),
alpha=1, beta=1, out=buf9)
del primals_10
return reinterpret_tensor(buf9, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), buf0, buf1, buf2, buf3, reinterpret_tensor(buf4, (256, 4), (4, 1), 0
), buf5, reinterpret_tensor(buf6, (256, 4), (4, 1), 0
), buf7, reinterpret_tensor(buf8, (16, 64), (64, 1), 0), primals_9
class AFTFullNew(nn.Module):
def __init__(self, max_seqlen, dim, hidden_dim=64):
super().__init__()
"""
max_seqlen: the maximum number of timesteps (sequence length) to be fed in
dim: the embedding dimension of the tokens
hidden_dim: the hidden dimension used inside AFT Full
Number of heads is 1 as done in the paper
"""
self.dim = dim
self.hidden_dim = hidden_dim
self.to_q = nn.Linear(dim, hidden_dim)
self.to_k = nn.Linear(dim, hidden_dim)
self.to_v = nn.Linear(dim, hidden_dim)
self.project = nn.Linear(hidden_dim, dim)
self.wbias = nn.Parameter(torch.Tensor(max_seqlen, max_seqlen))
nn.init.xavier_uniform_(self.wbias)
def forward(self, input_0):
primals_8 = self.wbias
primals_2 = self.to_q.weight
primals_3 = self.to_q.bias
primals_4 = self.to_k.weight
primals_5 = self.to_k.bias
primals_6 = self.to_v.weight
primals_7 = self.to_v.bias
primals_9 = self.project.weight
primals_10 = self.project.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
|
dumpmemory/aft-pytorch
|
AFTFull
| false
| 15,274
|
[
"MIT"
] | 170
|
9a896966481f4042c2882f544d7bb1381e81dca1
|
https://github.com/dumpmemory/aft-pytorch/tree/9a896966481f4042c2882f544d7bb1381e81dca1
|
AFTSimple
|
import torch
from torch import nn
class AFTSimple(nn.Module):
def __init__(self, max_seqlen, dim, hidden_dim=64):
super().__init__()
"""
max_seqlen: the maximum number of timesteps (sequence length) to be fed in
dim: the embedding dimension of the tokens
hidden_dim: the hidden dimension used inside AFT Full
Number of Heads is 1 as done in the paper.
"""
self.dim = dim
self.hidden_dim = hidden_dim
self.to_q = nn.Linear(dim, hidden_dim)
self.to_k = nn.Linear(dim, hidden_dim)
self.to_v = nn.Linear(dim, hidden_dim)
self.project = nn.Linear(hidden_dim, dim)
def forward(self, x):
B, T, _ = x.shape
Q = self.to_q(x).view(B, T, self.hidden_dim)
K = self.to_k(x).view(B, T, self.hidden_dim)
V = self.to_v(x).view(B, T, self.hidden_dim)
"""
From the paper
"""
weights = torch.mul(torch.softmax(K, 1), V).sum(dim=1, keepdim=True)
Q_sig = torch.sigmoid(Q)
Yt = torch.mul(Q_sig, weights)
Yt = Yt.view(B, T, self.hidden_dim)
Yt = self.project(Yt)
return Yt
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'max_seqlen': 4, 'dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 64
x2 = xindex // 256
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 256 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (64 + x0 + 256 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (128 + x0 + 256 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (192 + x0 + 256 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = xindex // 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 256 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (128 + x0 + 256 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (192 + x0 + 256 * x1), xmask)
tmp8 = tl.load(in_ptr1 + (x0 + 256 * x1), xmask)
tmp11 = tl.load(in_ptr1 + (64 + x0 + 256 * x1), xmask)
tmp15 = tl.load(in_ptr1 + (128 + x0 + 256 * x1), xmask)
tmp19 = tl.load(in_ptr1 + (192 + x0 + 256 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = tmp0 / tmp6
tmp9 = tmp7 * tmp8
tmp10 = tmp1 / tmp6
tmp12 = tmp10 * tmp11
tmp13 = tmp9 + tmp12
tmp14 = tmp3 / tmp6
tmp16 = tmp14 * tmp15
tmp17 = tmp13 + tmp16
tmp18 = tmp5 / tmp6
tmp20 = tmp18 * tmp19
tmp21 = tmp17 + tmp20
tl.store(out_ptr0 + x2, tmp21, xmask)
@triton.jit
def triton_poi_fused__softmax_mul_sigmoid_sum_2(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 64
x2 = xindex // 256
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp2 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + x3, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (64, 4), (4, 1))
assert_size_stride(primals_3, (64,), (1,))
assert_size_stride(primals_4, (64, 4), (4, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (64, 4), (4, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (4, 64), (64, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 64), (64, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (16,
4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 64), (1, 4),
0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((16, 64), (64, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (16,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 64), (1, 4),
0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((16, 64), (64, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(primals_1, (16,
4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 64), (1, 4),
0), alpha=1, beta=1, out=buf2)
del primals_6
del primals_7
buf3 = empty_strided_cuda((4, 4, 64), (256, 64, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(1024)](buf1, buf3, 1024, XBLOCK=
128, num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((4, 1, 64), (64, 256, 1), torch.float32)
triton_poi_fused__softmax_mul_sum_1[grid(256)](buf3, buf2, buf4,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf5 = buf3
del buf3
triton_poi_fused__softmax_mul_sigmoid_sum_2[grid(1024)](buf0, buf4,
buf5, 1024, XBLOCK=256, num_warps=4, num_stages=1)
del buf4
buf6 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (16, 64),
(64, 1), 0), reinterpret_tensor(primals_8, (64, 4), (1, 64), 0),
alpha=1, beta=1, out=buf6)
del primals_9
return reinterpret_tensor(buf6, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), buf0, buf1, buf2, reinterpret_tensor(buf5, (16, 64), (64, 1), 0
), primals_8
class AFTSimpleNew(nn.Module):
def __init__(self, max_seqlen, dim, hidden_dim=64):
super().__init__()
"""
max_seqlen: the maximum number of timesteps (sequence length) to be fed in
dim: the embedding dimension of the tokens
hidden_dim: the hidden dimension used inside AFT Full
Number of Heads is 1 as done in the paper.
"""
self.dim = dim
self.hidden_dim = hidden_dim
self.to_q = nn.Linear(dim, hidden_dim)
self.to_k = nn.Linear(dim, hidden_dim)
self.to_v = nn.Linear(dim, hidden_dim)
self.project = nn.Linear(hidden_dim, dim)
def forward(self, input_0):
primals_2 = self.to_q.weight
primals_3 = self.to_q.bias
primals_4 = self.to_k.weight
primals_5 = self.to_k.bias
primals_6 = self.to_v.weight
primals_7 = self.to_v.bias
primals_8 = self.project.weight
primals_9 = self.project.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
dumpmemory/aft-pytorch
|
AFTSimple
| false
| 15,275
|
[
"MIT"
] | 170
|
9a896966481f4042c2882f544d7bb1381e81dca1
|
https://github.com/dumpmemory/aft-pytorch/tree/9a896966481f4042c2882f544d7bb1381e81dca1
|
PixelNorm
|
import torch
import torch.nn as nn
import torch.utils.cpp_extension
class PixelNorm(nn.Module):
"""pixel normalization"""
def forward(self, x):
x = x / x.pow(2).mean(dim=1, keepdim=True).sqrt().add(1e-08)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils.cpp_extension
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mean_pow_sqrt_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = libdevice.sqrt(tmp13)
tmp15 = 1e-08
tmp16 = tmp14 + tmp15
tmp17 = tmp0 / tmp16
tl.store(out_ptr0 + x3, tmp17, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mean_pow_sqrt_0[grid(256)](arg0_1, buf0,
256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class PixelNormNew(nn.Module):
"""pixel normalization"""
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
STomoya/animeface
|
PixelNorm
| false
| 15,276
|
[
"MIT"
] | 61
|
37b3cd26097d7874559d4c152e41e5712b7a1a42
|
https://github.com/STomoya/animeface/tree/37b3cd26097d7874559d4c152e41e5712b7a1a42
|
SpanFCLayer
|
import torch
from torch import nn
class SpanFCLayer(nn.Module):
def __init__(self, input_dim, output_dim, dropout_rate=0.1, is_active=
True, is_dropout=True, active_type='mish'):
"""SpanFCLayer
Span-FC-Layer, mostly last output of span of model, 新增LayerNorm(条件层标准化)
args:
input_dim: input dimension, 输入维度, eg. 768
output_dim: output dimension, 输出维度, eg. 32
dropout_rate: dropout rate, 随机失活, eg. 0.1
is_dropout: use dropout or not, 是否使用随机失活dropout, eg. True
is_active: use activation or not, 是否使用激活函数如tanh, eg. True
active_type: type of activate function, 激活函数类型, eg. "tanh", "relu", "mish"
Returns:
Tensor of batch.
"""
super(SpanFCLayer, self).__init__()
self.linear_0 = nn.Linear(input_dim, input_dim)
self.linear_1 = nn.Linear(input_dim, output_dim)
self.layer_norm = nn.LayerNorm(input_dim)
self.dropout = nn.Dropout(dropout_rate)
self.is_dropout = is_dropout
self.active_type = active_type
self.is_active = is_active
self.softmax = nn.Softmax(1)
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU(inplace=True)
self.tanh = nn.Tanh()
self.gelu = nn.GELU()
def forward(self, x):
if self.is_dropout:
x = self.dropout(x)
x = self.linear_0(x)
if self.is_active:
if self.active_type.upper() == 'MISH':
x = x * torch.tanh(nn.functional.softplus(x))
elif self.active_type.upper() == 'SWISH':
x = x * torch.sigmoid(x)
elif self.active_type.upper() == 'TANH':
x = self.tanh(x)
elif self.active_type.upper() == 'GELU':
x = self.gelu(x)
elif self.active_type.upper() == 'RELU':
x = self.relu(x)
else:
x = self.relu(x)
x = self.layer_norm(x)
x = self.linear_1(x)
if self.is_active:
if self.active_type.upper() == 'MISH':
x = x * torch.tanh(nn.functional.softplus(x))
elif self.active_type.upper() == 'SWISH':
x = x * torch.sigmoid(x)
elif self.active_type.upper() == 'TANH':
x = self.tanh(x)
elif self.active_type.upper() == 'GELU':
x = self.gelu(x)
elif self.active_type.upper() == 'RELU':
x = self.relu(x)
else:
x = self.relu(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'output_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_native_layer_norm_softplus_tanh_0(in_ptr0,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp24 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 20.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = libdevice.log1p(tmp3)
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp6 = libdevice.tanh(tmp5)
tmp7 = tmp0 * tmp6
tmp9 = tmp8 > tmp1
tmp10 = tl_math.exp(tmp8)
tmp11 = libdevice.log1p(tmp10)
tmp12 = tl.where(tmp9, tmp8, tmp11)
tmp13 = libdevice.tanh(tmp12)
tmp14 = tmp8 * tmp13
tmp15 = tmp7 + tmp14
tmp17 = tmp16 > tmp1
tmp18 = tl_math.exp(tmp16)
tmp19 = libdevice.log1p(tmp18)
tmp20 = tl.where(tmp17, tmp16, tmp19)
tmp21 = libdevice.tanh(tmp20)
tmp22 = tmp16 * tmp21
tmp23 = tmp15 + tmp22
tmp25 = tmp24 > tmp1
tmp26 = tl_math.exp(tmp24)
tmp27 = libdevice.log1p(tmp26)
tmp28 = tl.where(tmp25, tmp24, tmp27)
tmp29 = libdevice.tanh(tmp28)
tmp30 = tmp24 * tmp29
tmp31 = tmp23 + tmp30
tmp32 = 4.0
tmp33 = tmp31 / tmp32
tmp34 = tmp7 - tmp33
tmp35 = tmp34 * tmp34
tmp36 = tmp14 - tmp33
tmp37 = tmp36 * tmp36
tmp38 = tmp35 + tmp37
tmp39 = tmp22 - tmp33
tmp40 = tmp39 * tmp39
tmp41 = tmp38 + tmp40
tmp42 = tmp30 - tmp33
tmp43 = tmp42 * tmp42
tmp44 = tmp41 + tmp43
tmp45 = tmp44 / tmp32
tl.store(out_ptr0 + x0, tmp33, xmask)
tl.store(out_ptr1 + x0, tmp45, xmask)
@triton.jit
def triton_poi_fused_mul_native_layer_norm_softplus_tanh_1(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp8 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp1 = 20.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = libdevice.log1p(tmp3)
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp6 = libdevice.tanh(tmp5)
tmp7 = tmp0 * tmp6
tmp9 = tmp7 - tmp8
tmp11 = 1e-05
tmp12 = tmp10 + tmp11
tmp13 = libdevice.rsqrt(tmp12)
tmp14 = tmp9 * tmp13
tmp16 = tmp14 * tmp15
tmp18 = tmp16 + tmp17
tl.store(out_ptr0 + x2, tmp18, xmask)
@triton.jit
def triton_poi_fused_mul_softplus_tanh_2(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 20.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = libdevice.log1p(tmp3)
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp6 = libdevice.tanh(tmp5)
tmp7 = tmp0 * tmp6
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64,
4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_native_layer_norm_softplus_tanh_0[grid(64)](buf0,
buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_native_layer_norm_softplus_tanh_1[grid(256)](buf0,
buf1, buf2, primals_4, primals_5, buf3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf1
del buf2
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_softplus_tanh_2[grid(256)](buf4, buf5, 256,
XBLOCK=256, num_warps=4, num_stages=1)
return buf5, primals_4, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), buf4, primals_6
class SpanFCLayerNew(nn.Module):
def __init__(self, input_dim, output_dim, dropout_rate=0.1, is_active=
True, is_dropout=True, active_type='mish'):
"""SpanFCLayer
Span-FC-Layer, mostly last output of span of model, 新增LayerNorm(条件层标准化)
args:
input_dim: input dimension, 输入维度, eg. 768
output_dim: output dimension, 输出维度, eg. 32
dropout_rate: dropout rate, 随机失活, eg. 0.1
is_dropout: use dropout or not, 是否使用随机失活dropout, eg. True
is_active: use activation or not, 是否使用激活函数如tanh, eg. True
active_type: type of activate function, 激活函数类型, eg. "tanh", "relu", "mish"
Returns:
Tensor of batch.
"""
super(SpanFCLayerNew, self).__init__()
self.linear_0 = nn.Linear(input_dim, input_dim)
self.linear_1 = nn.Linear(input_dim, output_dim)
self.layer_norm = nn.LayerNorm(input_dim)
self.dropout = nn.Dropout(dropout_rate)
self.is_dropout = is_dropout
self.active_type = active_type
self.is_active = is_active
self.softmax = nn.Softmax(1)
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU(inplace=True)
self.tanh = nn.Tanh()
self.gelu = nn.GELU()
def forward(self, input_0):
primals_2 = self.linear_0.weight
primals_3 = self.linear_0.bias
primals_6 = self.linear_1.weight
primals_4 = self.linear_1.bias
primals_5 = self.layer_norm.weight
primals_7 = self.layer_norm.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
dumpmemory/Pytorch-NLU
|
SpanFCLayer
| false
| 15,277
|
[
"Apache-2.0"
] | 115
|
864fb9acc7751fc51abd3d05d24b5a9a7eab7110
|
https://github.com/dumpmemory/Pytorch-NLU/tree/864fb9acc7751fc51abd3d05d24b5a9a7eab7110
|
LayerNorm
|
import torch
from torch import nn
class LayerNorm(nn.Module):
def __init__(self, dim, eps=1e-05):
super().__init__()
self.eps = eps
self.g = nn.Parameter(torch.ones(1, dim, 1, 1, 1))
self.b = nn.Parameter(torch.zeros(1, dim, 1, 1, 1))
def forward(self, x):
std = torch.var(x, dim=1, unbiased=False, keepdim=True).sqrt()
mean = torch.mean(x, dim=1, keepdim=True)
return (x - mean) / (std + self.eps) * self.g + self.b
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tmp11 = tmp1 - tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp2 - tmp9
tmp14 = tmp13 * tmp13
tmp15 = tmp12 + tmp14
tmp16 = tmp4 - tmp9
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp6 - tmp9
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp21 / tmp8
tmp23 = libdevice.sqrt(tmp22)
tmp24 = 1e-05
tmp25 = tmp23 + tmp24
tmp26 = tmp10 / tmp25
tmp28 = tmp26 * tmp27
tmp30 = tmp28 + tmp29
tl.store(out_ptr0 + x3, tmp30, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1, 1), (4, 1, 1, 1, 1))
assert_size_stride(primals_3, (1, 4, 1, 1, 1), (4, 1, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0[grid(256)](primals_1,
primals_2, primals_3, buf0, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_2
del primals_3
return buf0, primals_1
class LayerNormNew(nn.Module):
def __init__(self, dim, eps=1e-05):
super().__init__()
self.eps = eps
self.g = nn.Parameter(torch.ones(1, dim, 1, 1, 1))
self.b = nn.Parameter(torch.zeros(1, dim, 1, 1, 1))
def forward(self, input_0):
primals_2 = self.g
primals_3 = self.b
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
dumpmemory/uniformer-pytorch
|
LayerNorm
| false
| 15,278
|
[
"MIT"
] | 71
|
756c4edb7ab0947dc202c145f7c95571848e0594
|
https://github.com/dumpmemory/uniformer-pytorch/tree/756c4edb7ab0947dc202c145f7c95571848e0594
|
h_swish
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class h_swish(nn.Module):
def __init__(self, inplace=True):
super(h_swish, self).__init__()
self.inplace = inplace
def forward(self, x):
out = F.relu6(x + 3.0, self.inplace) / 6.0
return out * x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_hardtanh_mul_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 3.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = 0.16666666666666666
tmp8 = tmp6 * tmp7
tmp9 = tmp8 * tmp0
tl.store(out_ptr0 + x0, tmp9, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_hardtanh_mul_0[grid(256)](arg0_1, buf0,
256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class h_swishNew(nn.Module):
def __init__(self, inplace=True):
super(h_swishNew, self).__init__()
self.inplace = inplace
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
dx9527/MobileNetV3-pytorch
|
h_swish
| false
| 15,279
|
[
"MIT"
] | 291
|
7812dbcedd5db4e3bbfc21122b82205848f742cf
|
https://github.com/dx9527/MobileNetV3-pytorch/tree/7812dbcedd5db4e3bbfc21122b82205848f742cf
|
MultiplyLearned
|
import torch
import torch.fft
import torch.nn
class MultiplyLearned(torch.nn.Module):
def __init__(self, omega_0: 'float'):
"""
out = omega_0 * x, with a learned omega_0
"""
super().__init__()
self.omega_0 = torch.nn.Parameter(torch.Tensor(1))
with torch.no_grad():
self.omega_0.fill_(omega_0)
def forward(self, x):
return 100 * self.omega_0 * x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'omega_0': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.fft
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp4 = tl.load(in_ptr1 + x0, xmask)
tmp2 = 100.0
tmp3 = tmp1 * tmp2
tmp5 = tmp3 * tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (1,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](primals_1, primals_2, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
return buf0, primals_2
class MultiplyLearnedNew(torch.nn.Module):
def __init__(self, omega_0: 'float'):
"""
out = omega_0 * x, with a learned omega_0
"""
super().__init__()
self.omega_0 = torch.nn.Parameter(torch.Tensor(1))
with torch.no_grad():
self.omega_0.fill_(omega_0)
def forward(self, input_0):
primals_1 = self.omega_0
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
dwromero/ckconv
|
MultiplyLearned
| false
| 15,280
|
[
"MIT"
] | 74
|
d44c6441a98792477d6259368c210089bb33fe7a
|
https://github.com/dwromero/ckconv/tree/d44c6441a98792477d6259368c210089bb33fe7a
|
MultiLabelCircleLoss
|
import torch
from torch import nn
class MultiLabelCircleLoss(nn.Module):
def __init__(self, reduction='mean', inf=1000000000000.0):
"""CircleLoss of MultiLabel, 多个目标类的多标签分类场景,希望“每个目标类得分都不小于每个非目标类的得分”
多标签分类的交叉熵(softmax+crossentropy推广, N选K问题), LSE函数的梯度恰好是softmax函数
让同类相似度与非同类相似度之间拉开一定的margin。
- 使同类相似度比最大的非同类相似度更大。
- 使最小的同类相似度比最大的非同类相似度更大。
- 所有同类相似度都比所有非同类相似度更大。
urls: [将“softmax+交叉熵”推广到多标签分类问题](https://spaces.ac.cn/archives/7359)
args:
reduction: str, Specifies the reduction to apply to the output, 输出形式.
eg.``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``
inf: float, Minimum of maths, 无穷大. eg. 1e12
returns:
Tensor of loss.
examples:
>>> label, logits = [[1, 1, 1, 1], [0, 0, 0, 1]], [[0, 1, 1, 0], [1, 0, 0, 1],]
>>> label, logits = torch.tensor(label).float(), torch.tensor(logits).float()
>>> loss = MultiLabelCircleLoss()(logits, label)
"""
super(MultiLabelCircleLoss, self).__init__()
self.reduction = reduction
self.inf = inf
def forward(self, logits, labels):
logits = (1 - 2 * labels) * logits
logits_neg = logits - labels * self.inf
logits_pos = logits - (1 - labels) * self.inf
zeros = torch.zeros_like(logits[..., :1])
logits_neg = torch.cat([logits_neg, zeros], dim=-1)
logits_pos = torch.cat([logits_pos, zeros], dim=-1)
neg_loss = torch.logsumexp(logits_neg, dim=-1)
pos_loss = torch.logsumexp(logits_pos, dim=-1)
loss = neg_loss + pos_loss
if 'mean' == self.reduction:
loss = loss.mean()
else:
loss = loss.sum()
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_logsumexp_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.full([1], 0, tl.int64)
tmp2 = tl.full([1], 4, tl.int64)
tmp3 = tmp0 < tmp2
tmp4 = tl.load(in_ptr0 + (4 * x0 + 0), tmp3 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp5 = 2.0
tmp6 = tmp4 * tmp5
tmp7 = 1.0
tmp8 = tmp7 - tmp6
tmp9 = tl.load(in_ptr1 + (4 * x0 + 0), tmp3 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp10 = tmp8 * tmp9
tmp11 = 1000000000000.0
tmp12 = tmp4 * tmp11
tmp13 = tmp10 - tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp3, tmp13, tmp14)
tmp16 = tmp0 >= tmp2
tl.full([1], 5, tl.int64)
tmp19 = 0.0
tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype)
tmp21 = tl.where(tmp16, tmp19, tmp20)
tmp22 = tl.where(tmp3, tmp15, tmp21)
tmp23 = tl.full([1], 1, tl.int64)
tmp25 = tmp23 < tmp2
tmp26 = tl.load(in_ptr0 + (4 * x0 + 1), tmp25 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp27 = tmp26 * tmp5
tmp28 = tmp7 - tmp27
tmp29 = tl.load(in_ptr1 + (4 * x0 + 1), tmp25 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp30 = tmp28 * tmp29
tmp31 = tmp26 * tmp11
tmp32 = tmp30 - tmp31
tmp33 = tl.full(tmp32.shape, 0.0, tmp32.dtype)
tmp34 = tl.where(tmp25, tmp32, tmp33)
tmp35 = tmp23 >= tmp2
tmp37 = tl.where(tmp35, tmp19, tmp20)
tmp38 = tl.where(tmp25, tmp34, tmp37)
tmp39 = triton_helpers.maximum(tmp22, tmp38)
tmp40 = tl.full([1], 2, tl.int64)
tmp42 = tmp40 < tmp2
tmp43 = tl.load(in_ptr0 + (4 * x0 + 2), tmp42 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp44 = tmp43 * tmp5
tmp45 = tmp7 - tmp44
tmp46 = tl.load(in_ptr1 + (4 * x0 + 2), tmp42 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp47 = tmp45 * tmp46
tmp48 = tmp43 * tmp11
tmp49 = tmp47 - tmp48
tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype)
tmp51 = tl.where(tmp42, tmp49, tmp50)
tmp52 = tmp40 >= tmp2
tmp54 = tl.where(tmp52, tmp19, tmp20)
tmp55 = tl.where(tmp42, tmp51, tmp54)
tmp56 = triton_helpers.maximum(tmp39, tmp55)
tmp57 = tl.full([1], 3, tl.int64)
tmp59 = tmp57 < tmp2
tmp60 = tl.load(in_ptr0 + (4 * x0 + 3), tmp59 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp61 = tmp60 * tmp5
tmp62 = tmp7 - tmp61
tmp63 = tl.load(in_ptr1 + (4 * x0 + 3), tmp59 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp64 = tmp62 * tmp63
tmp65 = tmp60 * tmp11
tmp66 = tmp64 - tmp65
tmp67 = tl.full(tmp66.shape, 0.0, tmp66.dtype)
tmp68 = tl.where(tmp59, tmp66, tmp67)
tmp69 = tmp57 >= tmp2
tmp71 = tl.where(tmp69, tmp19, tmp20)
tmp72 = tl.where(tmp59, tmp68, tmp71)
tmp73 = triton_helpers.maximum(tmp56, tmp72)
tmp75 = tmp2 < tmp2
tmp76 = tl.load(in_ptr0 + (4 * x0 + 4), tmp75 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp77 = tmp76 * tmp5
tmp78 = tmp7 - tmp77
tmp79 = tl.load(in_ptr1 + (4 * x0 + 4), tmp75 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp80 = tmp78 * tmp79
tmp81 = tmp76 * tmp11
tmp82 = tmp80 - tmp81
tmp83 = tl.full(tmp82.shape, 0.0, tmp82.dtype)
tmp84 = tl.where(tmp75, tmp82, tmp83)
tmp85 = tmp2 >= tmp2
tmp87 = tl.where(tmp85, tmp19, tmp20)
tmp88 = tl.where(tmp75, tmp84, tmp87)
tmp89 = triton_helpers.maximum(tmp73, tmp88)
tmp90 = tl_math.abs(tmp89)
tmp91 = float('inf')
tmp92 = tmp90 == tmp91
tmp93 = tl.where(tmp92, tmp19, tmp89)
tmp94 = tmp22 - tmp93
tmp95 = tl_math.exp(tmp94)
tmp96 = tmp38 - tmp93
tmp97 = tl_math.exp(tmp96)
tmp98 = tmp95 + tmp97
tmp99 = tmp55 - tmp93
tmp100 = tl_math.exp(tmp99)
tmp101 = tmp98 + tmp100
tmp102 = tmp72 - tmp93
tmp103 = tl_math.exp(tmp102)
tmp104 = tmp101 + tmp103
tmp105 = tmp88 - tmp93
tmp106 = tl_math.exp(tmp105)
tmp107 = tmp104 + tmp106
tmp108 = tmp7 - tmp4
tmp109 = tmp108 * tmp11
tmp110 = tmp10 - tmp109
tmp111 = tl.full(tmp110.shape, 0.0, tmp110.dtype)
tmp112 = tl.where(tmp3, tmp110, tmp111)
tmp113 = tl.where(tmp3, tmp112, tmp21)
tmp114 = tmp7 - tmp26
tmp115 = tmp114 * tmp11
tmp116 = tmp30 - tmp115
tmp117 = tl.full(tmp116.shape, 0.0, tmp116.dtype)
tmp118 = tl.where(tmp25, tmp116, tmp117)
tmp119 = tl.where(tmp25, tmp118, tmp37)
tmp120 = triton_helpers.maximum(tmp113, tmp119)
tmp121 = tmp7 - tmp43
tmp122 = tmp121 * tmp11
tmp123 = tmp47 - tmp122
tmp124 = tl.full(tmp123.shape, 0.0, tmp123.dtype)
tmp125 = tl.where(tmp42, tmp123, tmp124)
tmp126 = tl.where(tmp42, tmp125, tmp54)
tmp127 = triton_helpers.maximum(tmp120, tmp126)
tmp128 = tmp7 - tmp60
tmp129 = tmp128 * tmp11
tmp130 = tmp64 - tmp129
tmp131 = tl.full(tmp130.shape, 0.0, tmp130.dtype)
tmp132 = tl.where(tmp59, tmp130, tmp131)
tmp133 = tl.where(tmp59, tmp132, tmp71)
tmp134 = triton_helpers.maximum(tmp127, tmp133)
tmp135 = tmp7 - tmp76
tmp136 = tmp135 * tmp11
tmp137 = tmp80 - tmp136
tmp138 = tl.full(tmp137.shape, 0.0, tmp137.dtype)
tmp139 = tl.where(tmp75, tmp137, tmp138)
tmp140 = tl.where(tmp75, tmp139, tmp87)
tmp141 = triton_helpers.maximum(tmp134, tmp140)
tl.store(out_ptr0 + x0, tmp89, xmask)
tl.store(out_ptr1 + x0, tmp107, xmask)
tl.store(out_ptr2 + x0, tmp141, xmask)
@triton.jit
def triton_poi_fused_cat_logsumexp_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = xindex // 5
x2 = xindex
tmp25 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = 2.0
tmp7 = tmp5 * tmp6
tmp8 = 1.0
tmp9 = tmp8 - tmp7
tmp10 = tl.load(in_ptr1 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 * tmp10
tmp12 = tmp8 - tmp5
tmp13 = 1000000000000.0
tmp14 = tmp12 * tmp13
tmp15 = tmp11 - tmp14
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp4, tmp15, tmp16)
tmp18 = tmp0 >= tmp3
tl.full([1], 5, tl.int64)
tmp21 = 0.0
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp18, tmp21, tmp22)
tmp24 = tl.where(tmp4, tmp17, tmp23)
tmp26 = tl_math.abs(tmp25)
tmp27 = float('inf')
tmp28 = tmp26 == tmp27
tmp29 = tl.where(tmp28, tmp21, tmp25)
tmp30 = tmp24 - tmp29
tmp31 = tl_math.exp(tmp30)
tl.store(out_ptr0 + x2, tmp31, xmask)
@triton.jit
def triton_per_fused_add_logsumexp_mean_2(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp2 = tl.load(in_ptr1 + r0, None)
tmp9 = tl.load(in_ptr2 + 5 * r0, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (1 + 5 * r0), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr2 + (2 + 5 * r0), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr2 + (3 + 5 * r0), None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr2 + (4 + 5 * r0), None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr3 + r0, None)
tmp1 = tl_math.log(tmp0)
tmp3 = tl_math.abs(tmp2)
tmp4 = float('inf')
tmp5 = tmp3 == tmp4
tmp6 = 0.0
tmp7 = tl.where(tmp5, tmp6, tmp2)
tmp8 = tmp1 + tmp7
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp17 = tmp15 + tmp16
tmp18 = tl_math.log(tmp17)
tmp20 = tl_math.abs(tmp19)
tmp21 = tmp20 == tmp4
tmp22 = tl.where(tmp21, tmp6, tmp19)
tmp23 = tmp18 + tmp22
tmp24 = tmp8 + tmp23
tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK])
tmp27 = tl.sum(tmp25, 1)[:, None]
tmp28 = 64.0
tmp29 = tmp27 / tmp28
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp29, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_logsumexp_0[grid(64)](arg0_1, arg1_1, buf0,
buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32)
triton_poi_fused_cat_logsumexp_1[grid(320)](arg0_1, arg1_1, buf2,
buf3, 320, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
buf4 = empty_strided_cuda((), (), torch.float32)
buf5 = buf4
del buf4
triton_per_fused_add_logsumexp_mean_2[grid(1)](buf5, buf1, buf0,
buf3, buf2, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf1
del buf2
del buf3
return buf5,
class MultiLabelCircleLossNew(nn.Module):
def __init__(self, reduction='mean', inf=1000000000000.0):
"""CircleLoss of MultiLabel, 多个目标类的多标签分类场景,希望“每个目标类得分都不小于每个非目标类的得分”
多标签分类的交叉熵(softmax+crossentropy推广, N选K问题), LSE函数的梯度恰好是softmax函数
让同类相似度与非同类相似度之间拉开一定的margin。
- 使同类相似度比最大的非同类相似度更大。
- 使最小的同类相似度比最大的非同类相似度更大。
- 所有同类相似度都比所有非同类相似度更大。
urls: [将“softmax+交叉熵”推广到多标签分类问题](https://spaces.ac.cn/archives/7359)
args:
reduction: str, Specifies the reduction to apply to the output, 输出形式.
eg.``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``
inf: float, Minimum of maths, 无穷大. eg. 1e12
returns:
Tensor of loss.
examples:
>>> label, logits = [[1, 1, 1, 1], [0, 0, 0, 1]], [[0, 1, 1, 0], [1, 0, 0, 1],]
>>> label, logits = torch.tensor(label).float(), torch.tensor(logits).float()
>>> loss = MultiLabelCircleLoss()(logits, label)
"""
super(MultiLabelCircleLossNew, self).__init__()
self.reduction = reduction
self.inf = inf
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
dumpmemory/Pytorch-NLU
|
MultiLabelCircleLoss
| false
| 15,281
|
[
"Apache-2.0"
] | 115
|
864fb9acc7751fc51abd3d05d24b5a9a7eab7110
|
https://github.com/dumpmemory/Pytorch-NLU/tree/864fb9acc7751fc51abd3d05d24b5a9a7eab7110
|
DotProductLoss
|
import torch
import torch.nn as nn
class DotProductLoss(nn.Module):
def __init__(self):
super(DotProductLoss, self).__init__()
def forward(self, output, target):
return -torch.dot(target.view(-1), output.view(-1)) / target.nelement()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_div_dot_neg_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = -tmp5
tmp7 = 0.00390625
tmp8 = tmp6 * tmp7
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp8, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_div_dot_neg_0[grid(1)](buf1, arg0_1, arg1_1, 1,
256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class DotProductLossNew(nn.Module):
def __init__(self):
super(DotProductLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
ehsanik/dogTorch
|
DotProductLoss
| false
| 15,282
|
[
"MIT"
] | 74
|
3a898862f6283e6603833991eeb62427216f2af7
|
https://github.com/ehsanik/dogTorch/tree/3a898862f6283e6603833991eeb62427216f2af7
|
ContrastiveLoss
|
import torch
import torch.nn.functional as F
class ContrastiveLoss(torch.nn.Module):
def __init__(self, margin=2):
super(ContrastiveLoss, self).__init__()
self.margin = margin
def forward(self, output1, output2, label):
euclidean_distance = F.pairwise_distance(output1, output2)
loss_contrastive = torch.mean((1 - label) * torch.pow(
euclidean_distance, 2) + label * torch.pow(torch.clamp(self.
margin - euclidean_distance, min=0.0), 2))
return loss_contrastive
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_norm_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 - tmp1
tmp3 = 1e-06
tmp4 = tmp2 + tmp3
tmp5 = tmp4 * tmp4
tmp8 = tmp6 - tmp7
tmp9 = tmp8 + tmp3
tmp10 = tmp9 * tmp9
tmp11 = tmp5 + tmp10
tmp14 = tmp12 - tmp13
tmp15 = tmp14 + tmp3
tmp16 = tmp15 * tmp15
tmp17 = tmp11 + tmp16
tmp20 = tmp18 - tmp19
tmp21 = tmp20 + tmp3
tmp22 = tmp21 * tmp21
tmp23 = tmp17 + tmp22
tmp24 = libdevice.sqrt(tmp23)
tl.store(out_ptr0 + x0, tmp24, xmask)
@triton.jit
def triton_per_fused_add_clamp_mean_mul_pow_rsub_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex
r0 = rindex % 64
tmp0 = tl.load(in_ptr0 + r2, None)
tmp3 = tl.load(in_ptr1 + r0, None, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp3 * tmp3
tmp5 = tmp2 * tmp4
tmp6 = 2.0
tmp7 = tmp6 - tmp3
tmp8 = 0.0
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp10 = tmp9 * tmp9
tmp11 = tmp0 * tmp10
tmp12 = tmp5 + tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 256.0
tmp17 = tmp15 / tmp16
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_norm_sub_0[grid(64)](arg1_1, arg0_1, buf0, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused_add_clamp_mean_mul_pow_rsub_1[grid(1)](buf2,
arg2_1, buf0, 1, 256, num_warps=2, num_stages=1)
del arg2_1
del buf0
return buf2,
class ContrastiveLossNew(torch.nn.Module):
def __init__(self, margin=2):
super(ContrastiveLossNew, self).__init__()
self.margin = margin
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
e-Neural/OfflineSignatureVerification
|
ContrastiveLoss
| false
| 15,283
|
[
"MIT"
] | 51
|
ea11009a3b2ac82c7091075466c505602a50817a
|
https://github.com/e-Neural/OfflineSignatureVerification/tree/ea11009a3b2ac82c7091075466c505602a50817a
|
ImageToSequence
|
import torch
from typing import NamedTuple
from torch.nn.utils.rnn import pack_padded_sequence
def image_to_sequence(x, columnwise=True, return_packed=False):
x, xs = (x.data, x.sizes) if isinstance(x, PaddedTensor) else (x, None)
if x.dim() == 2:
x = x.view(1, 1, x.size(0), x.size(1))
elif x.dim() == 3:
x = x.view(1, x.size(0), x.size(1), x.size(2))
assert x.dim() == 4
n, c, h, w = x.size()
if columnwise:
x = x.permute(3, 0, 1, 2).contiguous().view(w, n, h * c)
else:
x = x.permute(2, 0, 1, 3).contiguous().view(h, n, w * c)
if xs is None:
return x
xs = xs[:, 1 if columnwise else 0]
return pack_padded_sequence(x, xs.tolist()) if return_packed else (x,
xs.tolist())
class PaddedTensor(NamedTuple):
data: 'torch.Tensor'
sizes: 'torch.Tensor'
@classmethod
def build(cls, data: 'torch.Tensor', sizes: 'torch.Tensor'):
assert isinstance(data, torch.Tensor)
assert isinstance(sizes, torch.Tensor)
assert sizes.dim() == 2, 'PaddedTensor.sizes must have 2 dimensions'
assert sizes.size(1) in (2, 3
), f'PaddedTensor.sizes is incorrect: expected=2 (HxW) or 3 (CxHxW), found={sizes.size(1)}'
assert data.size(0) == sizes.size(0
), f'Batch size {sizes.size(0)} does not match the number of samples in the batch {data.size(0)}'
return cls(data, sizes)
def __repr__(self) ->str:
return (
f'PaddedTensor(data.size()={list(self.data.size())}, sizes={self.sizes.tolist()}, device={str(self.data.device)})'
)
@property
def device(self) ->torch.device:
return self.data.device
class ImageToSequence(torch.nn.Module):
def __init__(self, columnwise=True, return_packed=False):
super().__init__()
self._columnwise = columnwise
self._return_packed = return_packed
def forward(self, x):
return image_to_sequence(x, columnwise=self._columnwise,
return_packed=self._return_packed)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from typing import NamedTuple
from torch.nn.utils.rnn import pack_padded_sequence
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 64
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (x1 + 64 * y0), tmp0, xmask & ymask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(4, 64)](arg0_1, buf0, 4, 64, XBLOCK=
32, YBLOCK=4, num_warps=4, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 4, 16), (64, 16, 1), 0),
def image_to_sequence(x, columnwise=True, return_packed=False):
x, xs = (x.data, x.sizes) if isinstance(x, PaddedTensor) else (x, None)
if x.dim() == 2:
x = x.view(1, 1, x.size(0), x.size(1))
elif x.dim() == 3:
x = x.view(1, x.size(0), x.size(1), x.size(2))
assert x.dim() == 4
n, c, h, w = x.size()
if columnwise:
x = x.permute(3, 0, 1, 2).contiguous().view(w, n, h * c)
else:
x = x.permute(2, 0, 1, 3).contiguous().view(h, n, w * c)
if xs is None:
return x
xs = xs[:, 1 if columnwise else 0]
return pack_padded_sequence(x, xs.tolist()) if return_packed else (x,
xs.tolist())
class PaddedTensor(NamedTuple):
data: 'torch.Tensor'
sizes: 'torch.Tensor'
@classmethod
def build(cls, data: 'torch.Tensor', sizes: 'torch.Tensor'):
assert isinstance(data, torch.Tensor)
assert isinstance(sizes, torch.Tensor)
assert sizes.dim() == 2, 'PaddedTensor.sizes must have 2 dimensions'
assert sizes.size(1) in (2, 3
), f'PaddedTensor.sizes is incorrect: expected=2 (HxW) or 3 (CxHxW), found={sizes.size(1)}'
assert data.size(0) == sizes.size(0
), f'Batch size {sizes.size(0)} does not match the number of samples in the batch {data.size(0)}'
return cls(data, sizes)
def __repr__(self) ->str:
return (
f'PaddedTensor(data.size()={list(self.data.size())}, sizes={self.sizes.tolist()}, device={str(self.data.device)})'
)
@property
def device(self) ->torch.device:
return self.data.device
class ImageToSequenceNew(torch.nn.Module):
def __init__(self, columnwise=True, return_packed=False):
super().__init__()
self._columnwise = columnwise
self._return_packed = return_packed
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
eivtho/PyLaia
|
ImageToSequence
| false
| 15,284
|
[
"MIT"
] | 89
|
2a7a6e2eeb9b5af68c0faed0c564b02063e72be0
|
https://github.com/eivtho/PyLaia/tree/2a7a6e2eeb9b5af68c0faed0c564b02063e72be0
|
GaussianNoise
|
import torch
import torch.nn as nn
class GaussianNoise(nn.Module):
"""A gaussian noise module.
Args:
stddev (float): The standard deviation of the normal distribution.
Default: 0.1.
Shape:
- Input: (batch, *)
- Output: (batch, *) (same shape as input)
"""
def __init__(self, mean=0.0, stddev=0.1):
super(GaussianNoise, self).__init__()
self.mean = mean
self.stddev = stddev
def forward(self, x):
noise = torch.empty_like(x)
noise.normal_(0, self.stddev)
return x + noise
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_out_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = torch.ops.aten.normal_functional.default(buf0, 0.0, 0.1)
del buf0
buf2 = buf1
del buf1
buf3 = buf2
del buf2
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](buf3, arg0_1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf3,
class GaussianNoiseNew(nn.Module):
"""A gaussian noise module.
Args:
stddev (float): The standard deviation of the normal distribution.
Default: 0.1.
Shape:
- Input: (batch, *)
- Output: (batch, *) (same shape as input)
"""
def __init__(self, mean=0.0, stddev=0.1):
super(GaussianNoiseNew, self).__init__()
self.mean = mean
self.stddev = stddev
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
eezkni/UEGAN
|
GaussianNoise
| false
| 15,285
|
[
"MIT"
] | 73
|
a6616ac559819d487cae0f301d98cf2922a11a09
|
https://github.com/eezkni/UEGAN/tree/a6616ac559819d487cae0f301d98cf2922a11a09
|
LR_PAD
|
import torch
import torch.nn as nn
def lr_pad(x, padding=1):
""" Pad left/right-most to each other instead of zero padding """
return torch.cat([x[..., -padding:], x, x[..., :padding]], dim=3)
class LR_PAD(nn.Module):
""" Pad left/right-most to each other instead of zero padding """
def __init__(self, padding=1):
super(LR_PAD, self).__init__()
self.padding = padding
def forward(self, x):
return lr_pad(x, self.padding)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (3 + 4 * x1), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 5, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + (4 * x1 + (-1 + x0)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tl.full([1], 6, tl.int64)
tmp14 = tl.load(in_ptr0 + 4 * x1, tmp11 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tl.where(tmp9, tmp10, tmp14)
tmp16 = tl.where(tmp4, tmp5, tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 6), (96, 24, 6, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(384)](arg0_1, buf0, 384, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
def lr_pad(x, padding=1):
""" Pad left/right-most to each other instead of zero padding """
return torch.cat([x[..., -padding:], x, x[..., :padding]], dim=3)
class LR_PADNew(nn.Module):
""" Pad left/right-most to each other instead of zero padding """
def __init__(self, padding=1):
super(LR_PADNew, self).__init__()
self.padding = padding
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
ekbanasolutions/HorizonNet
|
LR_PAD
| false
| 15,286
|
[
"MIT"
] | 254
|
4eff713f8d446c53c479d86b4d06af166b724a74
|
https://github.com/ekbanasolutions/HorizonNet/tree/4eff713f8d446c53c479d86b4d06af166b724a74
|
fChannelAttention
|
import math
import torch
import torch.optim
import torch.utils.data
class fChannelAttention(torch.nn.Module):
def __init__(self, N_in, ratio=1):
super(fChannelAttention, self).__init__()
self.N_in = N_in
self.ratio = ratio
self.weight_fc1 = torch.nn.Parameter(torch.Tensor(self.N_in //
ratio, self.N_in))
self.weight_fc2 = torch.nn.Parameter(torch.Tensor(self.N_in, self.
N_in // ratio))
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.kaiming_uniform_(self.weight_fc1, a=math.sqrt(5))
torch.nn.init.kaiming_uniform_(self.weight_fc2, a=math.sqrt(5))
def forward(self, input):
input_mean = input.mean(dim=[-2, -1]).unsqueeze(-1)
input_max = input.max(dim=-2)[0].max(dim=-1)[0].unsqueeze(-1)
avg_out = self._linear(torch.relu(self._linear(input_mean, self.
weight_fc1)), self.weight_fc2)
max_out = self._linear(torch.relu(self._linear(input_max, self.
weight_fc1)), self.weight_fc2)
out = torch.sigmoid(avg_out + max_out)
out = torch.reshape(out, [input.shape[0], self.N_in, 1, 1])
return out
def _linear(self, input, w):
in_reshaped = input.unsqueeze(-3)
w_reshaped = w.reshape(1, w.shape[0], w.shape[1], 1)
output = (in_reshaped * w_reshaped).sum(-2)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'N_in': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import math
import torch.optim
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_max_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp24 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp26 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp13 = triton_helpers.maximum(tmp11, tmp12)
tmp14 = triton_helpers.maximum(tmp6, tmp13)
tmp17 = triton_helpers.maximum(tmp15, tmp16)
tmp19 = triton_helpers.maximum(tmp17, tmp18)
tmp21 = triton_helpers.maximum(tmp19, tmp20)
tmp22 = triton_helpers.maximum(tmp14, tmp21)
tmp25 = triton_helpers.maximum(tmp23, tmp24)
tmp27 = triton_helpers.maximum(tmp25, tmp26)
tmp29 = triton_helpers.maximum(tmp27, tmp28)
tmp30 = triton_helpers.maximum(tmp22, tmp29)
tl.store(out_ptr0 + x0, tmp30, xmask)
@triton.jit
def triton_poi_fused_mul_relu_sum_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr2 + 4 * x1, xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr2 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr2 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr2 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp15 = tl.full([1], 0, tl.int32)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp18 = tmp17 * tmp1
tmp20 = tmp19 * tmp4
tmp21 = tmp18 + tmp20
tmp23 = tmp22 * tmp8
tmp24 = tmp21 + tmp23
tmp26 = tmp25 * tmp12
tmp27 = tmp24 + tmp26
tmp28 = triton_helpers.maximum(tmp15, tmp27)
tl.store(out_ptr0 + x2, tmp16, xmask)
tl.store(out_ptr1 + x2, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_sigmoid_backward_sum_3(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr2 + 4 * x1, xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr2 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr2 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp23 = tl.load(in_ptr2 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp16 = tmp15 * tmp1
tmp18 = tmp17 * tmp4
tmp19 = tmp16 + tmp18
tmp21 = tmp20 * tmp8
tmp22 = tmp19 + tmp21
tmp24 = tmp23 * tmp12
tmp25 = tmp22 + tmp24
tmp26 = tmp14 + tmp25
tmp27 = tl.sigmoid(tmp26)
tmp28 = 1.0
tmp29 = tmp28 - tmp27
tmp30 = tmp27 * tmp29
tl.store(in_out_ptr0 + x2, tmp27, xmask)
tl.store(out_ptr0 + x2, tmp30, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf2, primals_1, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_max_1[grid(16)](primals_1, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_1
buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_mul_relu_sum_2[grid(16)](buf2, primals_2, buf1,
buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf6 = reinterpret_tensor(buf5, (4, 4, 1), (4, 1, 1), 0)
del buf5
buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
triton_poi_fused_add_mul_sigmoid_sigmoid_backward_sum_3[grid(16)](buf6,
buf3, primals_3, buf4, buf7, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del buf3
del buf4
return reinterpret_tensor(buf6, (4, 4, 1, 1), (4, 1, 1, 1), 0
), primals_2, primals_3, reinterpret_tensor(buf2, (4, 1, 4, 1), (4,
4, 1, 1), 0), reinterpret_tensor(buf1, (4, 1, 4, 1), (4, 4, 1, 1), 0
), buf7
class fChannelAttentionNew(torch.nn.Module):
def __init__(self, N_in, ratio=1):
super(fChannelAttentionNew, self).__init__()
self.N_in = N_in
self.ratio = ratio
self.weight_fc1 = torch.nn.Parameter(torch.Tensor(self.N_in //
ratio, self.N_in))
self.weight_fc2 = torch.nn.Parameter(torch.Tensor(self.N_in, self.
N_in // ratio))
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.kaiming_uniform_(self.weight_fc1, a=math.sqrt(5))
torch.nn.init.kaiming_uniform_(self.weight_fc2, a=math.sqrt(5))
def _linear(self, input, w):
in_reshaped = input.unsqueeze(-3)
w_reshaped = w.reshape(1, w.shape[0], w.shape[1], 1)
output = (in_reshaped * w_reshaped).sum(-2)
return output
def forward(self, input_0):
primals_2 = self.weight_fc1
primals_3 = self.weight_fc2
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
dwromero/att_gconvs
|
fChannelAttention
| false
| 15,287
|
[
"MIT"
] | 53
|
872259cad49763fdcfa3e96e80b6b5c331adf084
|
https://github.com/dwromero/att_gconvs/tree/872259cad49763fdcfa3e96e80b6b5c331adf084
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.