entry_point
stringlengths 1
65
| original_triton_python_code
stringlengths 208
619k
| optimised_triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
listlengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
|
|---|---|---|---|---|---|---|---|---|---|---|
FixupResidualChain
|
import torch
import numpy as np
import torch as th
import torch.utils.data
import torch.nn as nn
from collections import OrderedDict
def _get_activation(activation):
valid = ['relu', 'leaky_relu', 'lrelu', 'tanh', 'sigmoid']
assert activation in valid, 'activation should be one of {}'.format(valid)
if activation == 'relu':
return nn.ReLU(inplace=True)
if activation == 'leaky_relu' or activation == 'lrelu':
return nn.LeakyReLU(inplace=True)
if activation == 'sigmoid':
return nn.Sigmoid()
if activation == 'tanh':
return nn.Tanh()
return None
def _init_fc_or_conv(fc_conv, activation):
gain = 1.0
if activation is not None:
gain = nn.init.calculate_gain(activation)
nn.init.xavier_uniform_(fc_conv.weight, gain)
if fc_conv.bias is not None:
nn.init.constant_(fc_conv.bias, 0.0)
def _get_norm_layer(norm_layer, channels):
valid = ['instance', 'batch']
assert norm_layer in valid, 'norm_layer should be one of {}'.format(valid)
if norm_layer == 'instance':
layer = nn.InstanceNorm2d(channels, affine=True)
elif norm_layer == 'batch':
layer = nn.BatchNorm2d(channels, affine=True)
nn.init.constant_(layer.bias, 0.0)
nn.init.constant_(layer.weight, 1.0)
return layer
class ConvModule(nn.Module):
"""Basic convolution module with conv + norm(optional) + activation(optional).
Args:
n_in(int): number of input channels.
n_out(int): number of output channels.
ksize(int): size of the convolution kernel (square).
stride(int): downsampling factor
pad(bool): if True, zero pad the convolutions to maintain a constant size.
activation(str): nonlinear activation function between convolutions.
norm_layer(str): normalization to apply between the convolution modules.
"""
def __init__(self, n_in, n_out, ksize=3, stride=1, pad=True, activation
=None, norm_layer=None):
super(ConvModule, self).__init__()
assert isinstance(n_in, int
) and n_in > 0, 'Input channels should be a positive integer got {}'.format(
n_in)
assert isinstance(n_out, int
) and n_out > 0, 'Output channels should be a positive integer got {}'.format(
n_out)
assert isinstance(ksize, int
) and ksize > 0, 'Kernel size should be a positive integer got {}'.format(
ksize)
padding = (ksize - 1) // 2 if pad else 0
use_bias_in_conv = norm_layer is None
self.add_module('conv', nn.Conv2d(n_in, n_out, ksize, stride=stride,
padding=padding, bias=use_bias_in_conv))
if norm_layer is not None:
self.add_module('norm', _get_norm_layer(norm_layer, n_out))
if activation is not None:
self.add_module('activation', _get_activation(activation))
_init_fc_or_conv(self.conv, activation)
def forward(self, x):
for c in self.children():
x = c(x)
return x
class FixupBasicBlock(nn.Module):
expansion = 1
def __init__(self, n_features, ksize=3, pad=True, activation='relu'):
super(FixupBasicBlock, self).__init__()
self.bias1a = nn.Parameter(th.zeros(1))
self.conv1 = ConvModule(n_features, n_features, ksize=ksize, stride
=1, pad=pad, activation=None, norm_layer=None)
self.bias1b = nn.Parameter(th.zeros(1))
self.activation = _get_activation(activation)
self.bias2a = nn.Parameter(th.zeros(1))
self.conv2 = ConvModule(n_features, n_features, ksize=ksize, stride
=1, pad=pad, activation=None, norm_layer=None)
self.scale = nn.Parameter(th.ones(1))
self.bias2b = nn.Parameter(th.zeros(1))
self.activation2 = _get_activation(activation)
self.ksize = 3
self.pad = pad
def forward(self, x):
identity = x
out = self.conv1(x + self.bias1a)
out = self.activation(out + self.bias1b)
out = self.conv2(out + self.bias2a)
out = out * self.scale + self.bias2b
crop = (self.ksize - 1) // 2 * 2
if crop > 0 and not self.pad:
identity = identity[:, :, crop:-crop, crop:-crop]
out += identity
out = self.activation2(out)
return out
class FixupResidualChain(nn.Module):
"""Linear chain of residual blocks.
Args:
n_features(int): number of input channels.
depth(int): number of residual blocks
ksize(int): size of the convolution kernel (square).
activation(str): nonlinear activation function between convolutions.
norm_layer(str): normalization to apply between the convolution modules.
pad(bool): if True, zero pad the convs to maintain a constant size.
"""
def __init__(self, n_features, depth=3, ksize=3, activation='relu',
norm_layer=None, pad=True):
super(FixupResidualChain, self).__init__()
assert isinstance(n_features, int
) and n_features > 0, 'Number of feature channels should be a positive integer'
assert isinstance(ksize, int) and ksize > 0 or isinstance(ksize, list
), 'Kernel size should be a positive integer or a list of integers'
assert isinstance(depth, int
) and depth > 0 and depth < 16, 'Depth should be a positive integer lower than 16'
self.depth = depth
layers = OrderedDict()
for lvl in range(depth):
blockname = 'resblock{}'.format(lvl)
layers[blockname] = FixupBasicBlock(n_features, ksize=ksize,
activation=activation, pad=pad)
self.net = nn.Sequential(layers)
self._reset_weights()
def _reset_weights(self):
for m in self.net.modules():
if isinstance(m, FixupBasicBlock):
nn.init.normal_(m.conv1.conv.weight, mean=0, std=np.sqrt(2 /
(m.conv1.conv.weight.shape[0] * np.prod(m.conv1.conv.
weight.shape[2:]))) * self.depth ** -0.5)
nn.init.constant_(m.conv2.conv.weight, 0)
def forward(self, x):
x = self.net(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import numpy as np
import torch as th
import torch.utils.data
import torch.nn as nn
from collections import OrderedDict
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_poi_fused_add_convolution_relu_threshold_backward_1(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp8 = tl.load(in_ptr3 + 0)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp5 = tmp2 + tmp4
tmp6 = tl.full([1], 0, tl.int32)
tmp7 = triton_helpers.maximum(tmp6, tmp5)
tmp10 = tmp7 + tmp9
tmp11 = 0.0
tmp12 = tmp7 <= tmp11
tl.store(out_ptr0 + x3, tmp10, xmask)
tl.store(out_ptr1 + x3, tmp12, xmask)
@triton.jit
def triton_poi_fused_add_convolution_mul_relu_2(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp6 = tl.load(in_ptr2 + 0)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp9 = tl.load(in_ptr3 + x3, xmask)
tmp13 = tl.load(in_ptr4 + 0)
tmp14 = tl.broadcast_to(tmp13, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp5 = tmp2 * tmp4
tmp8 = tmp5 + tmp7
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 0, tl.int32)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp15 = tmp12 + tmp14
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp15, xmask)
@triton.jit
def triton_poi_fused_add_convolution_mul_relu_threshold_backward_3(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7,
out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp6 = tl.load(in_ptr2 + 0)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp9 = tl.load(in_ptr3 + x3, xmask)
tmp10 = tl.load(in_ptr4 + 0)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp13 = tl.load(in_ptr5 + 0)
tmp14 = tl.broadcast_to(tmp13, [XBLOCK])
tmp16 = tl.load(in_ptr6 + x3, xmask)
tmp24 = tl.load(in_ptr7 + 0)
tmp25 = tl.broadcast_to(tmp24, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp5 = tmp2 * tmp4
tmp8 = tmp5 + tmp7
tmp12 = tmp9 * tmp11
tmp15 = tmp12 + tmp14
tmp17 = tmp15 + tmp16
tmp18 = tl.full([1], 0, tl.int32)
tmp19 = triton_helpers.maximum(tmp18, tmp17)
tmp20 = tmp8 + tmp19
tmp21 = triton_helpers.maximum(tmp18, tmp20)
tmp22 = 0.0
tmp23 = tmp19 <= tmp22
tmp26 = tmp21 + tmp25
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp21, xmask)
tl.store(out_ptr1 + x3, tmp23, xmask)
tl.store(out_ptr2 + x3, tmp26, xmask)
@triton.jit
def triton_poi_fused_add_convolution_mul_relu_threshold_backward_4(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp6 = tl.load(in_ptr2 + 0)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp9 = tl.load(in_ptr3 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp2 * tmp4
tmp8 = tmp5 + tmp7
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 0, tl.int32)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp13 = 0.0
tmp14 = tmp12 <= tmp13
tmp15 = tmp9 <= tmp13
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp12, xmask)
tl.store(out_ptr1 + x3, tmp14, xmask)
tl.store(out_ptr2 + x3, tmp15, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27, primals_28
) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (1,), (1,))
assert_size_stride(primals_6, (1,), (1,))
assert_size_stride(primals_7, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (1,), (1,))
assert_size_stride(primals_10, (1,), (1,))
assert_size_stride(primals_11, (1,), (1,))
assert_size_stride(primals_12, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (1,), (1,))
assert_size_stride(primals_15, (1,), (1,))
assert_size_stride(primals_16, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_17, (4,), (1,))
assert_size_stride(primals_18, (1,), (1,))
assert_size_stride(primals_19, (1,), (1,))
assert_size_stride(primals_20, (1,), (1,))
assert_size_stride(primals_21, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_22, (4,), (1,))
assert_size_stride(primals_23, (1,), (1,))
assert_size_stride(primals_24, (1,), (1,))
assert_size_stride(primals_25, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_26, (4,), (1,))
assert_size_stride(primals_27, (1,), (1,))
assert_size_stride(primals_28, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](primals_1, primals_2, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf22 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_add_convolution_relu_threshold_backward_1[grid(256)](
buf1, primals_4, primals_5, primals_6, buf2, buf22, 256, XBLOCK
=256, num_warps=4, num_stages=1)
del primals_4
del primals_5
del primals_6
buf3 = extern_kernels.convolution(buf2, primals_7, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = buf3
del buf3
buf5 = buf1
del buf1
triton_poi_fused_add_convolution_mul_relu_2[grid(256)](buf4,
primals_8, primals_9, primals_10, primals_1, primals_11, buf5,
256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
del primals_8
buf6 = extern_kernels.convolution(buf5, primals_12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1))
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf20 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_add_convolution_relu_threshold_backward_1[grid(256)](
buf6, primals_13, primals_14, primals_15, buf7, buf20, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_13
del primals_14
del primals_15
buf8 = extern_kernels.convolution(buf7, primals_16, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 4, 4, 4), (64, 16, 4, 1))
buf9 = buf8
del buf8
buf10 = buf6
del buf6
buf21 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_convolution_mul_relu_threshold_backward_3[grid
(256)](buf9, primals_17, primals_18, primals_19, buf4,
primals_9, primals_10, primals_1, primals_20, buf10, buf21,
buf11, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_10
del primals_17
del primals_19
del primals_20
buf12 = extern_kernels.convolution(buf11, primals_21, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 4, 4, 4), (64, 16, 4, 1))
buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf18 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_add_convolution_relu_threshold_backward_1[grid(256)](
buf12, primals_22, primals_23, primals_24, buf13, buf18, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_22
del primals_23
del primals_24
buf14 = extern_kernels.convolution(buf13, primals_25, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 4, 4, 4), (64, 16, 4, 1))
buf15 = buf14
del buf14
buf16 = buf12
del buf12
buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf19 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_add_convolution_mul_relu_threshold_backward_4[grid
(256)](buf15, primals_26, primals_27, primals_28, buf10, buf16,
buf17, buf19, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf10
del primals_26
del primals_28
return (buf16, primals_3, primals_7, primals_9, primals_12, primals_16,
primals_18, primals_21, primals_25, primals_27, buf0, buf2, buf4,
buf5, buf7, buf9, buf11, buf13, buf15, buf17, buf18, buf19, buf20,
buf21, buf22)
def _get_activation(activation):
valid = ['relu', 'leaky_relu', 'lrelu', 'tanh', 'sigmoid']
assert activation in valid, 'activation should be one of {}'.format(valid)
if activation == 'relu':
return nn.ReLU(inplace=True)
if activation == 'leaky_relu' or activation == 'lrelu':
return nn.LeakyReLU(inplace=True)
if activation == 'sigmoid':
return nn.Sigmoid()
if activation == 'tanh':
return nn.Tanh()
return None
def _init_fc_or_conv(fc_conv, activation):
gain = 1.0
if activation is not None:
gain = nn.init.calculate_gain(activation)
nn.init.xavier_uniform_(fc_conv.weight, gain)
if fc_conv.bias is not None:
nn.init.constant_(fc_conv.bias, 0.0)
def _get_norm_layer(norm_layer, channels):
valid = ['instance', 'batch']
assert norm_layer in valid, 'norm_layer should be one of {}'.format(valid)
if norm_layer == 'instance':
layer = nn.InstanceNorm2d(channels, affine=True)
elif norm_layer == 'batch':
layer = nn.BatchNorm2d(channels, affine=True)
nn.init.constant_(layer.bias, 0.0)
nn.init.constant_(layer.weight, 1.0)
return layer
class ConvModule(nn.Module):
"""Basic convolution module with conv + norm(optional) + activation(optional).
Args:
n_in(int): number of input channels.
n_out(int): number of output channels.
ksize(int): size of the convolution kernel (square).
stride(int): downsampling factor
pad(bool): if True, zero pad the convolutions to maintain a constant size.
activation(str): nonlinear activation function between convolutions.
norm_layer(str): normalization to apply between the convolution modules.
"""
def __init__(self, n_in, n_out, ksize=3, stride=1, pad=True, activation
=None, norm_layer=None):
super(ConvModule, self).__init__()
assert isinstance(n_in, int
) and n_in > 0, 'Input channels should be a positive integer got {}'.format(
n_in)
assert isinstance(n_out, int
) and n_out > 0, 'Output channels should be a positive integer got {}'.format(
n_out)
assert isinstance(ksize, int
) and ksize > 0, 'Kernel size should be a positive integer got {}'.format(
ksize)
padding = (ksize - 1) // 2 if pad else 0
use_bias_in_conv = norm_layer is None
self.add_module('conv', nn.Conv2d(n_in, n_out, ksize, stride=stride,
padding=padding, bias=use_bias_in_conv))
if norm_layer is not None:
self.add_module('norm', _get_norm_layer(norm_layer, n_out))
if activation is not None:
self.add_module('activation', _get_activation(activation))
_init_fc_or_conv(self.conv, activation)
def forward(self, x):
for c in self.children():
x = c(x)
return x
class FixupBasicBlock(nn.Module):
expansion = 1
def __init__(self, n_features, ksize=3, pad=True, activation='relu'):
super(FixupBasicBlock, self).__init__()
self.bias1a = nn.Parameter(th.zeros(1))
self.conv1 = ConvModule(n_features, n_features, ksize=ksize, stride
=1, pad=pad, activation=None, norm_layer=None)
self.bias1b = nn.Parameter(th.zeros(1))
self.activation = _get_activation(activation)
self.bias2a = nn.Parameter(th.zeros(1))
self.conv2 = ConvModule(n_features, n_features, ksize=ksize, stride
=1, pad=pad, activation=None, norm_layer=None)
self.scale = nn.Parameter(th.ones(1))
self.bias2b = nn.Parameter(th.zeros(1))
self.activation2 = _get_activation(activation)
self.ksize = 3
self.pad = pad
def forward(self, x):
identity = x
out = self.conv1(x + self.bias1a)
out = self.activation(out + self.bias1b)
out = self.conv2(out + self.bias2a)
out = out * self.scale + self.bias2b
crop = (self.ksize - 1) // 2 * 2
if crop > 0 and not self.pad:
identity = identity[:, :, crop:-crop, crop:-crop]
out += identity
out = self.activation2(out)
return out
class FixupResidualChainNew(nn.Module):
"""Linear chain of residual blocks.
Args:
n_features(int): number of input channels.
depth(int): number of residual blocks
ksize(int): size of the convolution kernel (square).
activation(str): nonlinear activation function between convolutions.
norm_layer(str): normalization to apply between the convolution modules.
pad(bool): if True, zero pad the convs to maintain a constant size.
"""
def __init__(self, n_features, depth=3, ksize=3, activation='relu',
norm_layer=None, pad=True):
super(FixupResidualChainNew, self).__init__()
assert isinstance(n_features, int
) and n_features > 0, 'Number of feature channels should be a positive integer'
assert isinstance(ksize, int) and ksize > 0 or isinstance(ksize, list
), 'Kernel size should be a positive integer or a list of integers'
assert isinstance(depth, int
) and depth > 0 and depth < 16, 'Depth should be a positive integer lower than 16'
self.depth = depth
layers = OrderedDict()
for lvl in range(depth):
blockname = 'resblock{}'.format(lvl)
layers[blockname] = FixupBasicBlock(n_features, ksize=ksize,
activation=activation, pad=pad)
self.net = nn.Sequential(layers)
self._reset_weights()
def _reset_weights(self):
for m in self.net.modules():
if isinstance(m, FixupBasicBlock):
nn.init.normal_(m.conv1.conv.weight, mean=0, std=np.sqrt(2 /
(m.conv1.conv.weight.shape[0] * np.prod(m.conv1.conv.
weight.shape[2:]))) * self.depth ** -0.5)
nn.init.constant_(m.conv2.conv.weight, 0)
def forward(self, input_0):
primals_2 = self.net.resblock0.bias1a
primals_5 = self.net.resblock0.bias1b
primals_6 = self.net.resblock0.bias2a
primals_9 = self.net.resblock0.scale
primals_10 = self.net.resblock0.bias2b
primals_3 = self.net.resblock0.conv1.conv.weight
primals_4 = self.net.resblock0.conv1.conv.bias
primals_7 = self.net.resblock0.conv2.conv.weight
primals_8 = self.net.resblock0.conv2.conv.bias
primals_11 = self.net.resblock1.bias1a
primals_14 = self.net.resblock1.bias1b
primals_15 = self.net.resblock1.bias2a
primals_18 = self.net.resblock1.scale
primals_19 = self.net.resblock1.bias2b
primals_12 = self.net.resblock1.conv1.conv.weight
primals_13 = self.net.resblock1.conv1.conv.bias
primals_16 = self.net.resblock1.conv2.conv.weight
primals_17 = self.net.resblock1.conv2.conv.bias
primals_20 = self.net.resblock2.bias1a
primals_23 = self.net.resblock2.bias1b
primals_24 = self.net.resblock2.bias2a
primals_27 = self.net.resblock2.scale
primals_28 = self.net.resblock2.bias2b
primals_21 = self.net.resblock2.conv1.conv.weight
primals_22 = self.net.resblock2.conv1.conv.bias
primals_25 = self.net.resblock2.conv2.conv.weight
primals_26 = self.net.resblock2.conv2.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28])
return output[0]
|
sutkarsh/ttools
|
FixupResidualChain
| false
| 10,939
|
[
"MIT"
] | 0
|
a2e5fbf308566c0c54ab9d6ad1d9f8bc63f8fe99
|
https://github.com/sutkarsh/ttools/tree/a2e5fbf308566c0c54ab9d6ad1d9f8bc63f8fe99
|
TransformerLayer
|
import math
import torch
import uuid
from torch import Tensor
import torch.nn as nn
from typing import Tuple
import torch.nn.functional as F
from typing import Optional
from typing import Dict
from torch.nn import Parameter
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different
(and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def utils_softmax(x, dim: 'int', onnx_trace: 'bool'=False):
if onnx_trace:
return F.softmax(x.float(), dim=dim)
else:
return F.softmax(x, dim=dim, dtype=torch.float32)
def with_incremental_state(cls):
cls.__bases__ = (FairseqIncrementalState,) + tuple(b for b in cls.
__bases__ if b != FairseqIncrementalState)
return cls
class ESM1LayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12, affine=True):
"""Construct a layernorm layer in the TF style (eps inside the sqrt)."""
super().__init__()
self.hidden_size = (hidden_size,) if isinstance(hidden_size, int
) else tuple(hidden_size)
self.eps = eps
self.affine = bool(affine)
if self.affine:
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
else:
self.weight, self.bias = None, None
def forward(self, x):
dims = tuple(-(i + 1) for i in range(len(self.hidden_size)))
means = x.mean(dims, keepdim=True)
x_zeromean = x - means
variances = x_zeromean.pow(2).mean(dims, keepdim=True)
x = x_zeromean / torch.sqrt(variances + self.eps)
if self.affine:
x = self.weight * x + self.bias
return x
class FairseqIncrementalState(object):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.init_incremental_state()
def init_incremental_state(self):
self._incremental_state_id = str(uuid.uuid4())
def _get_full_incremental_state_key(self, key: 'str') ->str:
return '{}.{}'.format(self._incremental_state_id, key)
def get_incremental_state(self, incremental_state:
'Optional[Dict[str, Dict[str, Optional[Tensor]]]]', key: 'str'
) ->Optional[Dict[str, Optional[Tensor]]]:
"""Helper for getting incremental state for an nn.Module."""
full_key = self._get_full_incremental_state_key(key)
if incremental_state is None or full_key not in incremental_state:
return None
return incremental_state[full_key]
def set_incremental_state(self, incremental_state:
'Optional[Dict[str, Dict[str, Optional[Tensor]]]]', key: 'str',
value: 'Dict[str, Optional[Tensor]]') ->Optional[Dict[str, Dict[str,
Optional[Tensor]]]]:
"""Helper for setting incremental state for an nn.Module."""
if incremental_state is not None:
full_key = self._get_full_incremental_state_key(key)
incremental_state[full_key] = value
return incremental_state
@with_incremental_state
class MultiheadAttention(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, embed_dim, num_heads, kdim=None, vdim=None, dropout=
0.0, bias=True, add_bias_kv=False, add_zero_attn=False,
self_attention=False, encoder_decoder_attention=False):
super().__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads'
self.scaling = self.head_dim ** -0.5
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
assert not self.self_attention or self.qkv_same_dim, 'Self-attention requires query, key and value to be of the same size'
self.k_proj = nn.Linear(self.kdim, embed_dim, bias=bias)
self.v_proj = nn.Linear(self.vdim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
self.onnx_trace = False
self.enable_torch_version = False
if hasattr(F, 'multi_head_attention_forward'):
self.enable_torch_version = True
else:
self.enable_torch_version = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def reset_parameters(self):
if self.qkv_same_dim:
nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
else:
nn.init.xavier_uniform_(self.k_proj.weight)
nn.init.xavier_uniform_(self.v_proj.weight)
nn.init.xavier_uniform_(self.q_proj.weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.out_proj.bias is not None:
nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def forward(self, query, key: 'Optional[Tensor]', value:
'Optional[Tensor]', key_padding_mask: 'Optional[Tensor]'=None,
incremental_state:
'Optional[Dict[str, Dict[str, Optional[Tensor]]]]'=None,
need_weights: 'bool'=True, static_kv: 'bool'=False, attn_mask:
'Optional[Tensor]'=None, before_softmax: 'bool'=False,
need_head_weights: 'bool'=False) ->Tuple[Tensor, Optional[Tensor]]:
"""Input shape: Time x Batch x Channel
Args:
key_padding_mask (ByteTensor, optional): mask to exclude
keys that are pads, of shape `(batch, src_len)`, where
padding elements are indicated by 1s.
need_weights (bool, optional): return the attention weights,
averaged over heads (default: False).
attn_mask (ByteTensor, optional): typically used to
implement causal attention, where the mask prevents the
attention from looking forward in time (default: None).
before_softmax (bool, optional): return the raw attention
weights and values before the attention softmax.
need_head_weights (bool, optional): return the attention
weights for each head. Implies *need_weights*. Default:
return the average attention weights over all heads.
"""
if need_head_weights:
need_weights = True
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
if (self.enable_torch_version and not self.onnx_trace and
incremental_state is None and not static_kv and not torch.jit.
is_scripting() and not need_head_weights):
assert key is not None and value is not None
return F.multi_head_attention_forward(query, key, value, self.
embed_dim, self.num_heads, torch.empty([0]), torch.cat((
self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)),
self.bias_k, self.bias_v, self.add_zero_attn, self.dropout,
self.out_proj.weight, self.out_proj.bias, self.training,
key_padding_mask, need_weights, attn_mask,
use_separate_proj_weight=True, q_proj_weight=self.q_proj.
weight, k_proj_weight=self.k_proj.weight, v_proj_weight=
self.v_proj.weight)
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if saved_state is not None and 'prev_key' in saved_state:
if static_kv:
assert self.encoder_decoder_attention and not self.self_attention
key = value = None
else:
saved_state = None
if self.self_attention:
q = self.q_proj(query)
k = self.k_proj(query)
v = self.v_proj(query)
elif self.encoder_decoder_attention:
q = self.q_proj(query)
if key is None:
assert value is None
k = v = None
else:
k = self.k_proj(key)
v = self.v_proj(key)
else:
assert key is not None and value is not None
q = self.q_proj(query)
k = self.k_proj(key)
v = self.v_proj(value)
q *= self.scaling
if self.bias_k is not None:
assert self.bias_v is not None
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(
attn_mask.size(0), 1)], dim=1)
if key_padding_mask is not None:
key_padding_mask = torch.cat([key_padding_mask,
key_padding_mask.new_zeros(key_padding_mask.size(0), 1)
], dim=1)
q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
if saved_state is not None:
if 'prev_key' in saved_state:
_prev_key = saved_state['prev_key']
assert _prev_key is not None
prev_key = _prev_key.view(bsz * self.num_heads, -1, self.
head_dim)
if static_kv:
k = prev_key
else:
assert k is not None
k = torch.cat([prev_key, k], dim=1)
if 'prev_value' in saved_state:
_prev_value = saved_state['prev_value']
assert _prev_value is not None
prev_value = _prev_value.view(bsz * self.num_heads, -1,
self.head_dim)
if static_kv:
v = prev_value
else:
assert v is not None
v = torch.cat([prev_value, v], dim=1)
prev_key_padding_mask: 'Optional[Tensor]' = None
if 'prev_key_padding_mask' in saved_state:
prev_key_padding_mask = saved_state['prev_key_padding_mask']
assert k is not None and v is not None
key_padding_mask = (MultiheadAttention.
_append_prev_key_padding_mask(key_padding_mask=
key_padding_mask, prev_key_padding_mask=
prev_key_padding_mask, batch_size=bsz, src_len=k.size(1),
static_kv=static_kv))
saved_state['prev_key'] = k.view(bsz, self.num_heads, -1, self.
head_dim)
saved_state['prev_value'] = v.view(bsz, self.num_heads, -1,
self.head_dim)
saved_state['prev_key_padding_mask'] = key_padding_mask
assert incremental_state is not None
incremental_state = self._set_input_buffer(incremental_state,
saved_state)
assert k is not None
src_len = k.size(1)
if key_padding_mask is not None and key_padding_mask.dim() == 0:
key_padding_mask = None
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if self.add_zero_attn:
assert v is not None
src_len += 1
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])],
dim=1)
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])],
dim=1)
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(
attn_mask.size(0), 1)], dim=1)
if key_padding_mask is not None:
key_padding_mask = torch.cat([key_padding_mask, torch.zeros
(key_padding_mask.size(0), 1).type_as(key_padding_mask)
], dim=1)
attn_weights = torch.bmm(q, k.transpose(1, 2))
attn_weights = MultiheadAttention.apply_sparse_mask(attn_weights,
tgt_len, src_len, bsz)
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len,
src_len]
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(0)
if self.onnx_trace:
attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1)
attn_weights += attn_mask
if key_padding_mask is not None:
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len,
src_len)
attn_weights = attn_weights.masked_fill(key_padding_mask.
unsqueeze(1).unsqueeze(2), float('-inf'))
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len,
src_len)
if before_softmax:
return attn_weights, v
attn_weights_float = utils_softmax(attn_weights, dim=-1, onnx_trace
=self.onnx_trace)
attn_weights = attn_weights_float.type_as(attn_weights)
attn_probs = F.dropout(attn_weights_float.type_as(attn_weights), p=
self.dropout, training=self.training)
assert v is not None
attn = torch.bmm(attn_probs, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.
head_dim]
if self.onnx_trace and attn.size(1) == 1:
attn = attn.contiguous().view(tgt_len, bsz, embed_dim)
else:
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz,
embed_dim)
attn = self.out_proj(attn)
attn_weights: 'Optional[Tensor]' = None
if need_weights:
attn_weights = attn_weights_float.view(bsz, self.num_heads,
tgt_len, src_len).transpose(1, 0)
if not need_head_weights:
attn_weights = attn_weights.mean(dim=0)
return attn, attn_weights
@staticmethod
def _append_prev_key_padding_mask(key_padding_mask: 'Optional[Tensor]',
prev_key_padding_mask: 'Optional[Tensor]', batch_size: 'int',
src_len: 'int', static_kv: 'bool') ->Optional[Tensor]:
if prev_key_padding_mask is not None and static_kv:
new_key_padding_mask = prev_key_padding_mask
elif prev_key_padding_mask is not None and key_padding_mask is not None:
new_key_padding_mask = torch.cat([prev_key_padding_mask.float(),
key_padding_mask.float()], dim=1)
elif prev_key_padding_mask is not None:
filler = torch.zeros((batch_size, src_len -
prev_key_padding_mask.size(1)), device=
prev_key_padding_mask.device)
new_key_padding_mask = torch.cat([prev_key_padding_mask.float(),
filler.float()], dim=1)
elif key_padding_mask is not None:
filler = torch.zeros((batch_size, src_len - key_padding_mask.
size(1)), device=key_padding_mask.device)
new_key_padding_mask = torch.cat([filler.float(),
key_padding_mask.float()], dim=1)
else:
new_key_padding_mask = prev_key_padding_mask
return new_key_padding_mask
@torch.jit.export
def reorder_incremental_state(self, incremental_state:
'Dict[str, Dict[str, Optional[Tensor]]]', new_order: 'Tensor'):
"""Reorder buffered internal state (for incremental generation)."""
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
for k in input_buffer.keys():
input_buffer_k = input_buffer[k]
if input_buffer_k is not None:
if self.encoder_decoder_attention and input_buffer_k.size(0
) == new_order.size(0):
break
input_buffer[k] = input_buffer_k.index_select(0, new_order)
incremental_state = self._set_input_buffer(incremental_state,
input_buffer)
return incremental_state
def _get_input_buffer(self, incremental_state:
'Optional[Dict[str, Dict[str, Optional[Tensor]]]]') ->Dict[str,
Optional[Tensor]]:
result = self.get_incremental_state(incremental_state, 'attn_state')
if result is not None:
return result
else:
empty_result: 'Dict[str, Optional[Tensor]]' = {}
return empty_result
def _set_input_buffer(self, incremental_state:
'Dict[str, Dict[str, Optional[Tensor]]]', buffer:
'Dict[str, Optional[Tensor]]'):
return self.set_incremental_state(incremental_state, 'attn_state',
buffer)
def apply_sparse_mask(attn_weights, tgt_len: 'int', src_len: 'int', bsz:
'int'):
return attn_weights
def upgrade_state_dict_named(self, state_dict, name):
prefix = name + '.' if name != '' else ''
items_to_add = {}
keys_to_remove = []
for k in state_dict.keys():
if k.endswith(prefix + 'in_proj_weight'):
dim = int(state_dict[k].shape[0] / 3)
items_to_add[prefix + 'q_proj.weight'] = state_dict[k][:dim]
items_to_add[prefix + 'k_proj.weight'] = state_dict[k][dim:
2 * dim]
items_to_add[prefix + 'v_proj.weight'] = state_dict[k][2 * dim:
]
keys_to_remove.append(k)
k_bias = prefix + 'in_proj_bias'
if k_bias in state_dict.keys():
dim = int(state_dict[k].shape[0] / 3)
items_to_add[prefix + 'q_proj.bias'] = state_dict[k_bias][:
dim]
items_to_add[prefix + 'k_proj.bias'] = state_dict[k_bias][
dim:2 * dim]
items_to_add[prefix + 'v_proj.bias'] = state_dict[k_bias][
2 * dim:]
keys_to_remove.append(prefix + 'in_proj_bias')
for k in keys_to_remove:
del state_dict[k]
for key, value in items_to_add.items():
state_dict[key] = value
class TransformerLayer(nn.Module):
"""Transformer layer block."""
def __init__(self, embed_dim, ffn_embed_dim, attention_heads,
add_bias_kv=True, use_esm1b_layer_norm=False):
super().__init__()
self.embed_dim = embed_dim
self.ffn_embed_dim = ffn_embed_dim
self.attention_heads = attention_heads
self._init_submodules(add_bias_kv, use_esm1b_layer_norm)
def _init_submodules(self, add_bias_kv, use_esm1b_layer_norm):
BertLayerNorm = (ESM1bLayerNorm if use_esm1b_layer_norm else
ESM1LayerNorm)
self.self_attn = MultiheadAttention(self.embed_dim, self.
attention_heads, add_bias_kv=add_bias_kv, add_zero_attn=False)
self.self_attn_layer_norm = BertLayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, self.ffn_embed_dim)
self.fc2 = nn.Linear(self.ffn_embed_dim, self.embed_dim)
self.final_layer_norm = BertLayerNorm(self.embed_dim)
def forward(self, x, self_attn_mask=None, self_attn_padding_mask=None,
need_head_weights=False):
residual = x
x = self.self_attn_layer_norm(x)
x, attn = self.self_attn(query=x, key=x, value=x, key_padding_mask=
self_attn_padding_mask, need_weights=True, need_head_weights=
need_head_weights, attn_mask=self_attn_mask)
x = residual + x
residual = x
x = self.final_layer_norm(x)
x = gelu(self.fc1(x))
x = self.fc2(x)
x = residual + x
return x, attn
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'embed_dim': 4, 'ffn_embed_dim': 4, 'attention_heads': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import uuid
from torch import Tensor
import torch.nn as nn
from typing import Tuple
import torch.nn.functional as F
from typing import Optional
from typing import Dict
from torch.nn import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mean_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_sqrt_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp2 * tmp2
tmp5 = tmp4 * tmp4
tmp6 = tmp3 + tmp5
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = 4.0
tmp14 = tmp12 / tmp13
tmp15 = 1e-12
tmp16 = tmp14 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tmp18 = tmp1 / tmp17
tmp19 = tmp0 * tmp18
tmp21 = tmp19 + tmp20
tl.store(out_ptr0 + x2, tmp21, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 12
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (-4 + x0), tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tl.full([1], 12, tl.int64)
tmp14 = tl.load(in_ptr2 + (-8 + x0), tmp11 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tl.where(tmp9, tmp10, tmp14)
tmp16 = tl.where(tmp4, tmp5, tmp15)
tl.store(out_ptr0 + x0, tmp16, xmask)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x3 = xindex % 16
x0 = xindex % 4
x4 = xindex
tmp0 = x2
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x3 + 16 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 5, tl.int64)
tmp9 = tl.load(in_ptr1 + x0, tmp6 & xmask, eviction_policy='evict_last',
other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x4, tmp10, xmask)
@triton.jit
def triton_poi_fused_mul_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = x2 % 4
tl.full([1], 0, tl.int64)
tmp4 = tl.full([1], 4, tl.int64)
tmp5 = tmp1 < tmp4
tmp6 = tl.load(in_ptr0 + x0 % 4, tmp5 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tmp1 >= tmp4
tmp8 = tl.full([1], 8, tl.int64)
tmp9 = tmp1 < tmp8
tmp10 = tmp7 & tmp9
tmp11 = tl.load(in_ptr1 + (-4 + x0 % 4), tmp10 & xmask, eviction_policy
='evict_last', other=0.0)
tmp12 = tmp1 >= tmp8
tl.full([1], 12, tl.int64)
tmp15 = tl.load(in_ptr2 + (-8 + x0 % 4), tmp12 & xmask, eviction_policy
='evict_last', other=0.0)
tmp16 = tl.where(tmp10, tmp11, tmp15)
tmp17 = tl.where(tmp5, tmp6, tmp16)
tmp18 = tmp0 + tmp17
tmp19 = 1.0
tmp20 = tmp18 * tmp19
tl.store(in_out_ptr0 + x2, tmp20, xmask)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 5 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 5 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 5 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 5 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (4 + 5 * x0), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp0 - tmp8
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp1 - tmp8
tmp12 = tl_math.exp(tmp11)
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tl_math.exp(tmp14)
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp16 + tmp18
tmp20 = tmp7 - tmp8
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp22, xmask)
@triton.jit
def triton_poi_fused__softmax_6(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 5
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = tl_math.exp(tmp2)
tmp5 = tmp3 / tmp4
tl.store(in_out_ptr0 + x2, tmp5, xmask)
@triton.jit
def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_mean_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 20
x1 = xindex // 20
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 80 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (20 + x0 + 80 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (40 + x0 + 80 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (60 + x0 + 80 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_mean_pow_sub_9(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_sqrt_sub_10(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x2, xmask)
tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 - tmp4
tmp7 = 1e-12
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tmp10 = tmp5 / tmp9
tmp11 = tmp0 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_add_div_erf_mul_11(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865475
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_12(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_out_ptr0 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (1, 1, 4), (4, 4, 1))
assert_size_stride(primals_8, (1, 1, 4), (4, 4, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4, 4), (4, 1))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4,), (1,))
assert_size_stride(primals_15, (4,), (1,))
assert_size_stride(primals_16, (4, 4), (4, 1))
assert_size_stride(primals_17, (4,), (1,))
assert_size_stride(primals_18, (4, 4), (4, 1))
assert_size_stride(primals_19, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_sub_0[grid(64)](primals_1, buf0, 64, XBLOCK=
64, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_pow_sqrt_1[grid(64)](primals_2,
buf0, primals_3, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
del primals_3
buf2 = reinterpret_tensor(buf0, (16, 4), (4, 1), 0)
del buf0
extern_kernels.mm(reinterpret_tensor(buf1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf2)
buf3 = empty_strided_cuda((12,), (1,), torch.float32)
triton_poi_fused_cat_2[grid(12)](primals_4, primals_5, primals_6,
buf3, 12, XBLOCK=16, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(buf3, (4,), (1,), 4),
reinterpret_tensor(buf1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_12, (4, 4), (1, 4), 0), alpha=1,
beta=1, out=buf4)
buf5 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(buf3, (4,), (1,), 8),
reinterpret_tensor(buf1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), alpha=1,
beta=1, out=buf5)
del buf3
buf6 = empty_strided_cuda((5, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_cat_3[grid(80)](buf5, primals_8, buf6, 80, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_8
buf7 = reinterpret_tensor(buf2, (16, 4, 1), (1, 16, 64), 0)
del buf2
triton_poi_fused_mul_4[grid(64)](buf7, primals_4, primals_5,
primals_6, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_4
del primals_5
del primals_6
buf8 = empty_strided_cuda((5, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_cat_3[grid(80)](buf4, primals_7, buf8, 80, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_7
buf9 = empty_strided_cuda((16, 4, 5), (20, 5, 1), torch.float32)
extern_kernels.bmm(buf7, reinterpret_tensor(buf8, (16, 1, 5), (1, 0,
16), 0), out=buf9)
buf10 = reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 64), 0)
del buf4
buf11 = reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 64), 0)
del buf5
triton_poi_fused__softmax_5[grid(64)](buf9, buf10, buf11, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf12 = buf9
del buf9
triton_poi_fused__softmax_6[grid(320)](buf12, buf10, buf11, 320,
XBLOCK=256, num_warps=4, num_stages=1)
buf13 = reinterpret_tensor(buf11, (16, 4, 1), (4, 1, 1), 0)
del buf11
extern_kernels.bmm(buf12, reinterpret_tensor(buf6, (16, 5, 1), (1,
16, 0), 0), out=buf13)
buf14 = reinterpret_tensor(buf10, (4, 16, 1), (16, 1, 1), 0)
del buf10
triton_poi_fused_clone_7[grid(4, 16)](buf13, buf14, 4, 16, XBLOCK=
16, YBLOCK=4, num_warps=1, num_stages=1)
buf15 = reinterpret_tensor(buf13, (16, 4), (4, 1), 0)
del buf13
extern_kernels.addmm(primals_10, reinterpret_tensor(buf14, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf15)
del primals_10
buf16 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32)
triton_poi_fused_mean_8[grid(80)](buf12, buf16, 80, XBLOCK=128,
num_warps=4, num_stages=1)
buf17 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf18 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_mean_pow_sub_9[grid(16)](primals_1, buf15,
buf17, buf18, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf19 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_sqrt_sub_10[grid(64)](primals_14,
primals_1, buf15, buf17, buf18, primals_15, buf19, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del buf17
del buf18
del primals_15
buf20 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_17, reinterpret_tensor(buf19, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_16, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf20)
del primals_17
buf21 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_div_erf_mul_11[grid(64)](buf20, buf21, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf22 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf21, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_18, (4, 4), (1, 4), 0), out=buf22)
buf23 = reinterpret_tensor(buf22, (4, 4, 4), (16, 4, 1), 0)
del buf22
triton_poi_fused_add_12[grid(64)](buf23, primals_1, buf15,
primals_19, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_19
return buf23, buf16, primals_1, primals_14, reinterpret_tensor(buf1, (
16, 4), (4, 1), 0), buf12, reinterpret_tensor(buf14, (16, 4), (4, 1), 0
), buf15, reinterpret_tensor(buf19, (16, 4), (4, 1), 0
), buf20, reinterpret_tensor(buf21, (16, 4), (4, 1), 0
), primals_18, primals_16, primals_9, reinterpret_tensor(buf6, (16,
1, 5), (1, 1, 16), 0), reinterpret_tensor(buf7, (16, 1, 4), (1, 1,
16), 0), reinterpret_tensor(buf8, (16, 5, 1), (1, 16, 1), 0
), primals_13, primals_12, primals_11
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different
(and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def utils_softmax(x, dim: 'int', onnx_trace: 'bool'=False):
if onnx_trace:
return F.softmax(x.float(), dim=dim)
else:
return F.softmax(x, dim=dim, dtype=torch.float32)
def with_incremental_state(cls):
cls.__bases__ = (FairseqIncrementalState,) + tuple(b for b in cls.
__bases__ if b != FairseqIncrementalState)
return cls
class ESM1LayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12, affine=True):
"""Construct a layernorm layer in the TF style (eps inside the sqrt)."""
super().__init__()
self.hidden_size = (hidden_size,) if isinstance(hidden_size, int
) else tuple(hidden_size)
self.eps = eps
self.affine = bool(affine)
if self.affine:
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
else:
self.weight, self.bias = None, None
def forward(self, x):
dims = tuple(-(i + 1) for i in range(len(self.hidden_size)))
means = x.mean(dims, keepdim=True)
x_zeromean = x - means
variances = x_zeromean.pow(2).mean(dims, keepdim=True)
x = x_zeromean / torch.sqrt(variances + self.eps)
if self.affine:
x = self.weight * x + self.bias
return x
class FairseqIncrementalState(object):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.init_incremental_state()
def init_incremental_state(self):
self._incremental_state_id = str(uuid.uuid4())
def _get_full_incremental_state_key(self, key: 'str') ->str:
return '{}.{}'.format(self._incremental_state_id, key)
def get_incremental_state(self, incremental_state:
'Optional[Dict[str, Dict[str, Optional[Tensor]]]]', key: 'str'
) ->Optional[Dict[str, Optional[Tensor]]]:
"""Helper for getting incremental state for an nn.Module."""
full_key = self._get_full_incremental_state_key(key)
if incremental_state is None or full_key not in incremental_state:
return None
return incremental_state[full_key]
def set_incremental_state(self, incremental_state:
'Optional[Dict[str, Dict[str, Optional[Tensor]]]]', key: 'str',
value: 'Dict[str, Optional[Tensor]]') ->Optional[Dict[str, Dict[str,
Optional[Tensor]]]]:
"""Helper for setting incremental state for an nn.Module."""
if incremental_state is not None:
full_key = self._get_full_incremental_state_key(key)
incremental_state[full_key] = value
return incremental_state
@with_incremental_state
class MultiheadAttention(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, embed_dim, num_heads, kdim=None, vdim=None, dropout=
0.0, bias=True, add_bias_kv=False, add_zero_attn=False,
self_attention=False, encoder_decoder_attention=False):
super().__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads'
self.scaling = self.head_dim ** -0.5
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
assert not self.self_attention or self.qkv_same_dim, 'Self-attention requires query, key and value to be of the same size'
self.k_proj = nn.Linear(self.kdim, embed_dim, bias=bias)
self.v_proj = nn.Linear(self.vdim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
self.onnx_trace = False
self.enable_torch_version = False
if hasattr(F, 'multi_head_attention_forward'):
self.enable_torch_version = True
else:
self.enable_torch_version = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def reset_parameters(self):
if self.qkv_same_dim:
nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
else:
nn.init.xavier_uniform_(self.k_proj.weight)
nn.init.xavier_uniform_(self.v_proj.weight)
nn.init.xavier_uniform_(self.q_proj.weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.out_proj.bias is not None:
nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def forward(self, query, key: 'Optional[Tensor]', value:
'Optional[Tensor]', key_padding_mask: 'Optional[Tensor]'=None,
incremental_state:
'Optional[Dict[str, Dict[str, Optional[Tensor]]]]'=None,
need_weights: 'bool'=True, static_kv: 'bool'=False, attn_mask:
'Optional[Tensor]'=None, before_softmax: 'bool'=False,
need_head_weights: 'bool'=False) ->Tuple[Tensor, Optional[Tensor]]:
"""Input shape: Time x Batch x Channel
Args:
key_padding_mask (ByteTensor, optional): mask to exclude
keys that are pads, of shape `(batch, src_len)`, where
padding elements are indicated by 1s.
need_weights (bool, optional): return the attention weights,
averaged over heads (default: False).
attn_mask (ByteTensor, optional): typically used to
implement causal attention, where the mask prevents the
attention from looking forward in time (default: None).
before_softmax (bool, optional): return the raw attention
weights and values before the attention softmax.
need_head_weights (bool, optional): return the attention
weights for each head. Implies *need_weights*. Default:
return the average attention weights over all heads.
"""
if need_head_weights:
need_weights = True
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
if (self.enable_torch_version and not self.onnx_trace and
incremental_state is None and not static_kv and not torch.jit.
is_scripting() and not need_head_weights):
assert key is not None and value is not None
return F.multi_head_attention_forward(query, key, value, self.
embed_dim, self.num_heads, torch.empty([0]), torch.cat((
self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)),
self.bias_k, self.bias_v, self.add_zero_attn, self.dropout,
self.out_proj.weight, self.out_proj.bias, self.training,
key_padding_mask, need_weights, attn_mask,
use_separate_proj_weight=True, q_proj_weight=self.q_proj.
weight, k_proj_weight=self.k_proj.weight, v_proj_weight=
self.v_proj.weight)
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if saved_state is not None and 'prev_key' in saved_state:
if static_kv:
assert self.encoder_decoder_attention and not self.self_attention
key = value = None
else:
saved_state = None
if self.self_attention:
q = self.q_proj(query)
k = self.k_proj(query)
v = self.v_proj(query)
elif self.encoder_decoder_attention:
q = self.q_proj(query)
if key is None:
assert value is None
k = v = None
else:
k = self.k_proj(key)
v = self.v_proj(key)
else:
assert key is not None and value is not None
q = self.q_proj(query)
k = self.k_proj(key)
v = self.v_proj(value)
q *= self.scaling
if self.bias_k is not None:
assert self.bias_v is not None
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(
attn_mask.size(0), 1)], dim=1)
if key_padding_mask is not None:
key_padding_mask = torch.cat([key_padding_mask,
key_padding_mask.new_zeros(key_padding_mask.size(0), 1)
], dim=1)
q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
if saved_state is not None:
if 'prev_key' in saved_state:
_prev_key = saved_state['prev_key']
assert _prev_key is not None
prev_key = _prev_key.view(bsz * self.num_heads, -1, self.
head_dim)
if static_kv:
k = prev_key
else:
assert k is not None
k = torch.cat([prev_key, k], dim=1)
if 'prev_value' in saved_state:
_prev_value = saved_state['prev_value']
assert _prev_value is not None
prev_value = _prev_value.view(bsz * self.num_heads, -1,
self.head_dim)
if static_kv:
v = prev_value
else:
assert v is not None
v = torch.cat([prev_value, v], dim=1)
prev_key_padding_mask: 'Optional[Tensor]' = None
if 'prev_key_padding_mask' in saved_state:
prev_key_padding_mask = saved_state['prev_key_padding_mask']
assert k is not None and v is not None
key_padding_mask = (MultiheadAttention.
_append_prev_key_padding_mask(key_padding_mask=
key_padding_mask, prev_key_padding_mask=
prev_key_padding_mask, batch_size=bsz, src_len=k.size(1),
static_kv=static_kv))
saved_state['prev_key'] = k.view(bsz, self.num_heads, -1, self.
head_dim)
saved_state['prev_value'] = v.view(bsz, self.num_heads, -1,
self.head_dim)
saved_state['prev_key_padding_mask'] = key_padding_mask
assert incremental_state is not None
incremental_state = self._set_input_buffer(incremental_state,
saved_state)
assert k is not None
src_len = k.size(1)
if key_padding_mask is not None and key_padding_mask.dim() == 0:
key_padding_mask = None
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if self.add_zero_attn:
assert v is not None
src_len += 1
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])],
dim=1)
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])],
dim=1)
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(
attn_mask.size(0), 1)], dim=1)
if key_padding_mask is not None:
key_padding_mask = torch.cat([key_padding_mask, torch.zeros
(key_padding_mask.size(0), 1).type_as(key_padding_mask)
], dim=1)
attn_weights = torch.bmm(q, k.transpose(1, 2))
attn_weights = MultiheadAttention.apply_sparse_mask(attn_weights,
tgt_len, src_len, bsz)
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len,
src_len]
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(0)
if self.onnx_trace:
attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1)
attn_weights += attn_mask
if key_padding_mask is not None:
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len,
src_len)
attn_weights = attn_weights.masked_fill(key_padding_mask.
unsqueeze(1).unsqueeze(2), float('-inf'))
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len,
src_len)
if before_softmax:
return attn_weights, v
attn_weights_float = utils_softmax(attn_weights, dim=-1, onnx_trace
=self.onnx_trace)
attn_weights = attn_weights_float.type_as(attn_weights)
attn_probs = F.dropout(attn_weights_float.type_as(attn_weights), p=
self.dropout, training=self.training)
assert v is not None
attn = torch.bmm(attn_probs, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.
head_dim]
if self.onnx_trace and attn.size(1) == 1:
attn = attn.contiguous().view(tgt_len, bsz, embed_dim)
else:
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz,
embed_dim)
attn = self.out_proj(attn)
attn_weights: 'Optional[Tensor]' = None
if need_weights:
attn_weights = attn_weights_float.view(bsz, self.num_heads,
tgt_len, src_len).transpose(1, 0)
if not need_head_weights:
attn_weights = attn_weights.mean(dim=0)
return attn, attn_weights
@staticmethod
def _append_prev_key_padding_mask(key_padding_mask: 'Optional[Tensor]',
prev_key_padding_mask: 'Optional[Tensor]', batch_size: 'int',
src_len: 'int', static_kv: 'bool') ->Optional[Tensor]:
if prev_key_padding_mask is not None and static_kv:
new_key_padding_mask = prev_key_padding_mask
elif prev_key_padding_mask is not None and key_padding_mask is not None:
new_key_padding_mask = torch.cat([prev_key_padding_mask.float(),
key_padding_mask.float()], dim=1)
elif prev_key_padding_mask is not None:
filler = torch.zeros((batch_size, src_len -
prev_key_padding_mask.size(1)), device=
prev_key_padding_mask.device)
new_key_padding_mask = torch.cat([prev_key_padding_mask.float(),
filler.float()], dim=1)
elif key_padding_mask is not None:
filler = torch.zeros((batch_size, src_len - key_padding_mask.
size(1)), device=key_padding_mask.device)
new_key_padding_mask = torch.cat([filler.float(),
key_padding_mask.float()], dim=1)
else:
new_key_padding_mask = prev_key_padding_mask
return new_key_padding_mask
@torch.jit.export
def reorder_incremental_state(self, incremental_state:
'Dict[str, Dict[str, Optional[Tensor]]]', new_order: 'Tensor'):
"""Reorder buffered internal state (for incremental generation)."""
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
for k in input_buffer.keys():
input_buffer_k = input_buffer[k]
if input_buffer_k is not None:
if self.encoder_decoder_attention and input_buffer_k.size(0
) == new_order.size(0):
break
input_buffer[k] = input_buffer_k.index_select(0, new_order)
incremental_state = self._set_input_buffer(incremental_state,
input_buffer)
return incremental_state
def _get_input_buffer(self, incremental_state:
'Optional[Dict[str, Dict[str, Optional[Tensor]]]]') ->Dict[str,
Optional[Tensor]]:
result = self.get_incremental_state(incremental_state, 'attn_state')
if result is not None:
return result
else:
empty_result: 'Dict[str, Optional[Tensor]]' = {}
return empty_result
def _set_input_buffer(self, incremental_state:
'Dict[str, Dict[str, Optional[Tensor]]]', buffer:
'Dict[str, Optional[Tensor]]'):
return self.set_incremental_state(incremental_state, 'attn_state',
buffer)
def apply_sparse_mask(attn_weights, tgt_len: 'int', src_len: 'int', bsz:
'int'):
return attn_weights
def upgrade_state_dict_named(self, state_dict, name):
prefix = name + '.' if name != '' else ''
items_to_add = {}
keys_to_remove = []
for k in state_dict.keys():
if k.endswith(prefix + 'in_proj_weight'):
dim = int(state_dict[k].shape[0] / 3)
items_to_add[prefix + 'q_proj.weight'] = state_dict[k][:dim]
items_to_add[prefix + 'k_proj.weight'] = state_dict[k][dim:
2 * dim]
items_to_add[prefix + 'v_proj.weight'] = state_dict[k][2 * dim:
]
keys_to_remove.append(k)
k_bias = prefix + 'in_proj_bias'
if k_bias in state_dict.keys():
dim = int(state_dict[k].shape[0] / 3)
items_to_add[prefix + 'q_proj.bias'] = state_dict[k_bias][:
dim]
items_to_add[prefix + 'k_proj.bias'] = state_dict[k_bias][
dim:2 * dim]
items_to_add[prefix + 'v_proj.bias'] = state_dict[k_bias][
2 * dim:]
keys_to_remove.append(prefix + 'in_proj_bias')
for k in keys_to_remove:
del state_dict[k]
for key, value in items_to_add.items():
state_dict[key] = value
class TransformerLayerNew(nn.Module):
"""Transformer layer block."""
def __init__(self, embed_dim, ffn_embed_dim, attention_heads,
add_bias_kv=True, use_esm1b_layer_norm=False):
super().__init__()
self.embed_dim = embed_dim
self.ffn_embed_dim = ffn_embed_dim
self.attention_heads = attention_heads
self._init_submodules(add_bias_kv, use_esm1b_layer_norm)
def _init_submodules(self, add_bias_kv, use_esm1b_layer_norm):
BertLayerNorm = (ESM1bLayerNorm if use_esm1b_layer_norm else
ESM1LayerNorm)
self.self_attn = MultiheadAttention(self.embed_dim, self.
attention_heads, add_bias_kv=add_bias_kv, add_zero_attn=False)
self.self_attn_layer_norm = BertLayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, self.ffn_embed_dim)
self.fc2 = nn.Linear(self.ffn_embed_dim, self.embed_dim)
self.final_layer_norm = BertLayerNorm(self.embed_dim)
def forward(self, input_0):
primals_7 = self.self_attn.bias_k
primals_8 = self.self_attn.bias_v
primals_9 = self.self_attn.k_proj.weight
primals_2 = self.self_attn.k_proj.bias
primals_11 = self.self_attn.v_proj.weight
primals_3 = self.self_attn.v_proj.bias
primals_12 = self.self_attn.q_proj.weight
primals_4 = self.self_attn.q_proj.bias
primals_13 = self.self_attn.out_proj.weight
primals_5 = self.self_attn.out_proj.bias
primals_6 = self.self_attn_layer_norm.weight
primals_10 = self.self_attn_layer_norm.bias
primals_16 = self.fc1.weight
primals_14 = self.fc1.bias
primals_18 = self.fc2.weight
primals_15 = self.fc2.bias
primals_17 = self.final_layer_norm.weight
primals_19 = self.final_layer_norm.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19])
return output[0], output[1]
|
sohrabi1/esm
|
TransformerLayer
| false
| 10,940
|
[
"MIT"
] | 0
|
e1f60a66b5c351d9d0011926549890b6744903c1
|
https://github.com/sohrabi1/esm/tree/e1f60a66b5c351d9d0011926549890b6744903c1
|
Pooling
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import *
class ReLUConvBN(nn.Module):
"""
Parameters
---
C_in: int
the number of input channels
C_out: int
the number of output channels
stride: int
stride of the convolution
padding: int
zero-padding added to both sides of the input
dilation: int
spacing between kernel elements
bn_affine: bool
If set to ``True``, ``torch.nn.BatchNorm2d`` will have learnable affine parameters. Default: True
bn_momentun: float
the value used for the running_mean and running_var computation. Default: 0.1
bn_track_running_stats: bool
When set to ``True``, ``torch.nn.BatchNorm2d`` tracks the running mean and variance. Default: True
"""
def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation,
bn_affine=True, bn_momentum=0.1, bn_track_running_stats=True):
super(ReLUConvBN, self).__init__()
self.op = nn.Sequential(nn.ReLU(inplace=False), nn.Conv2d(C_in,
C_out, kernel_size, stride=stride, padding=padding, dilation=
dilation, bias=False), nn.BatchNorm2d(C_out, affine=bn_affine,
momentum=bn_momentum, track_running_stats=bn_track_running_stats))
def forward(self, x):
"""
Parameters
---
x: torch.Tensor
input tensor
"""
return self.op(x)
class Pooling(nn.Module):
"""
Parameters
---
C_in: int
the number of input channels
C_out: int
the number of output channels
stride: int
stride of the convolution
bn_affine: bool
If set to ``True``, ``torch.nn.BatchNorm2d`` will have learnable affine parameters. Default: True
bn_momentun: float
the value used for the running_mean and running_var computation. Default: 0.1
bn_track_running_stats: bool
When set to ``True``, ``torch.nn.BatchNorm2d`` tracks the running mean and variance. Default: True
"""
def __init__(self, C_in, C_out, stride, bn_affine=True, bn_momentum=0.1,
bn_track_running_stats=True):
super(Pooling, self).__init__()
if C_in == C_out:
self.preprocess = None
else:
self.preprocess = ReLUConvBN(C_in, C_out, 1, 1, 0, 0, bn_affine,
bn_momentum, bn_track_running_stats)
self.op = nn.AvgPool2d(3, stride=stride, padding=1,
count_include_pad=False)
def forward(self, x):
"""
Parameters
---
x: torch.Tensor
input tensor
"""
if self.preprocess:
x = self.preprocess(x)
return self.op(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'C_in': 4, 'C_out': 4, 'stride': 1}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x4 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-5 + x4), tmp10 & xmask, other=0.0)
tmp12 = x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-4 + x4), tmp16 & xmask, other=0.0)
tmp18 = tmp17 + tmp11
tmp19 = 1 + x0
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-3 + x4), tmp23 & xmask, other=0.0)
tmp25 = tmp24 + tmp18
tmp26 = x1
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (-1 + x4), tmp30 & xmask, other=0.0)
tmp32 = tmp31 + tmp25
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + x4, tmp33 & xmask, other=0.0)
tmp35 = tmp34 + tmp32
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (1 + x4), tmp36 & xmask, other=0.0)
tmp38 = tmp37 + tmp35
tmp39 = 1 + x1
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (3 + x4), tmp43 & xmask, other=0.0)
tmp45 = tmp44 + tmp38
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (4 + x4), tmp46 & xmask, other=0.0)
tmp48 = tmp47 + tmp45
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (5 + x4), tmp49 & xmask, other=0.0)
tmp51 = tmp50 + tmp48
tmp52 = (0 * (0 >= -1 + x0) + (-1 + x0) * (-1 + x0 > 0)) * (0 * (0 >= -
1 + x1) + (-1 + x1) * (-1 + x1 > 0)) + (4 * (4 <= 2 + x0) + (2 + x0
) * (2 + x0 < 4)) * (4 * (4 <= 2 + x1) + (2 + x1) * (2 + x1 < 4)
) + -1 * (0 * (0 >= -1 + x0) + (-1 + x0) * (-1 + x0 > 0)) * (4 * (4 <=
2 + x1) + (2 + x1) * (2 + x1 < 4)) + -1 * (0 * (0 >= -1 + x1) + (-1 +
x1) * (-1 + x1 > 0)) * (4 * (4 <= 2 + x0) + (2 + x0) * (2 + x0 < 4))
tmp53 = tmp51 / tmp52
tl.store(out_ptr0 + x4, tmp53, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_avg_pool2d_0[grid(256)](arg0_1, buf0, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class ReLUConvBN(nn.Module):
"""
Parameters
---
C_in: int
the number of input channels
C_out: int
the number of output channels
stride: int
stride of the convolution
padding: int
zero-padding added to both sides of the input
dilation: int
spacing between kernel elements
bn_affine: bool
If set to ``True``, ``torch.nn.BatchNorm2d`` will have learnable affine parameters. Default: True
bn_momentun: float
the value used for the running_mean and running_var computation. Default: 0.1
bn_track_running_stats: bool
When set to ``True``, ``torch.nn.BatchNorm2d`` tracks the running mean and variance. Default: True
"""
def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation,
bn_affine=True, bn_momentum=0.1, bn_track_running_stats=True):
super(ReLUConvBN, self).__init__()
self.op = nn.Sequential(nn.ReLU(inplace=False), nn.Conv2d(C_in,
C_out, kernel_size, stride=stride, padding=padding, dilation=
dilation, bias=False), nn.BatchNorm2d(C_out, affine=bn_affine,
momentum=bn_momentum, track_running_stats=bn_track_running_stats))
def forward(self, x):
"""
Parameters
---
x: torch.Tensor
input tensor
"""
return self.op(x)
class PoolingNew(nn.Module):
"""
Parameters
---
C_in: int
the number of input channels
C_out: int
the number of output channels
stride: int
stride of the convolution
bn_affine: bool
If set to ``True``, ``torch.nn.BatchNorm2d`` will have learnable affine parameters. Default: True
bn_momentun: float
the value used for the running_mean and running_var computation. Default: 0.1
bn_track_running_stats: bool
When set to ``True``, ``torch.nn.BatchNorm2d`` tracks the running mean and variance. Default: True
"""
def __init__(self, C_in, C_out, stride, bn_affine=True, bn_momentum=0.1,
bn_track_running_stats=True):
super(PoolingNew, self).__init__()
if C_in == C_out:
self.preprocess = None
else:
self.preprocess = ReLUConvBN(C_in, C_out, 1, 1, 0, 0, bn_affine,
bn_momentum, bn_track_running_stats)
self.op = nn.AvgPool2d(3, stride=stride, padding=1,
count_include_pad=False)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
rmfan/nni
|
Pooling
| false
| 10,941
|
[
"MIT"
] | 0
|
727ee1ce47e070061fe3dab8a2da5d3cd5e55546
|
https://github.com/rmfan/nni/tree/727ee1ce47e070061fe3dab8a2da5d3cd5e55546
|
ResidualBlock
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.functional as F
class ResidualBlock(nn.Module):
def __init__(self, channels):
super(ResidualBlock, self).__init__()
self.channels = channels
self.conv1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
def forward(self, x):
y = F.relu(self.conv1(x))
y = self.conv2(y)
return F.relu(x + y)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_convolution_relu_threshold_backward_1(in_out_ptr0,
in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_out_ptr0 + x3, xmask)
tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = 0.0
tmp8 = tmp6 <= tmp7
tl.store(in_out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(256)](buf1, primals_2, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_add_convolution_relu_threshold_backward_1[grid(256)](
buf3, primals_3, primals_5, buf4, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_5
return buf3, primals_1, primals_3, primals_4, buf1, buf4
class ResidualBlockNew(nn.Module):
def __init__(self, channels):
super(ResidualBlockNew, self).__init__()
self.channels = channels
self.conv1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
vanthq/EarRecognition
|
ResidualBlock
| false
| 10,942
|
[
"MIT"
] | 0
|
7decddc97c4b27cd8457308b3d3836388936e7a8
|
https://github.com/vanthq/EarRecognition/tree/7decddc97c4b27cd8457308b3d3836388936e7a8
|
ProdAttention
|
import math
import torch
import torch.nn as nn
import torch.optim
class ProdAttention(nn.Module):
def __init__(self, log_t=False):
super(ProdAttention, self).__init__()
self.log_t = log_t
def forward(self, eh, dhx, ax=None):
pax = eh * dhx
pax = torch.sum(pax, dim=2)
if self.log_t:
log_t = math.log(pax.size()[1])
pax = log_t * pax
ax = nn.functional.softmax(pax, dim=1)
sx = ax.unsqueeze(2)
sx = torch.sum(eh * sx, dim=1, keepdim=True)
return sx, ax
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 16 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp4 = tl.load(in_ptr1 + (4 + x0 + 16 * x1), xmask)
tmp7 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp8 = tl.load(in_ptr1 + (8 + x0 + 16 * x1), xmask)
tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp12 = tl.load(in_ptr1 + (12 + x0 + 16 * x1), xmask)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x2, tmp14, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x3 = xindex % 16
x0 = xindex % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3 + 64 * x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x3 + 64 * x2), xmask)
tmp4 = tl.load(in_ptr1 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (32 + x3 + 64 * x2), xmask)
tmp8 = tl.load(in_ptr1 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x3 + 64 * x2), xmask)
tmp12 = tl.load(in_ptr1 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x4, tmp14, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sum_0[grid(64)](arg0_1, arg1_1, buf0, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(64)](buf0, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf2 = buf0
del buf0
triton_poi_fused__softmax_2[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = reinterpret_tensor(buf1, (4, 1, 4, 4), (16, 16, 4, 1), 0)
del buf1
triton_poi_fused_mul_sum_3[grid(64)](arg0_1, buf2, buf3, 64, XBLOCK
=64, num_warps=1, num_stages=1)
del arg0_1
return buf3, buf2
class ProdAttentionNew(nn.Module):
def __init__(self, log_t=False):
super(ProdAttentionNew, self).__init__()
self.log_t = log_t
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0], output[1]
|
wgfi110/speech
|
ProdAttention
| false
| 10,943
|
[
"Apache-2.0"
] | 0
|
59a3e2d8d2d99d31cf32e06c1a0751eb36a3c02b
|
https://github.com/wgfi110/speech/tree/59a3e2d8d2d99d31cf32e06c1a0751eb36a3c02b
|
BackboneModel1
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import *
class BackboneModel1(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 1, 1, 1)
def forward(self, x):
return self.conv1(x)
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16384)](buf1, primals_2, 16384,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return buf1, primals_1, primals_3
class BackboneModel1New(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 1, 1, 1)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
rmfan/nni
|
BackboneModel1
| false
| 10,944
|
[
"MIT"
] | 0
|
727ee1ce47e070061fe3dab8a2da5d3cd5e55546
|
https://github.com/rmfan/nni/tree/727ee1ce47e070061fe3dab8a2da5d3cd5e55546
|
BCE_LOSS
|
import math
import torch
from torch.nn.modules.loss import _Loss
import torch.optim
import torch.nn
class BCE_LOSS(_Loss):
def __init__(self):
super().__init__()
self.bce_loss = torch.nn.BCEWithLogitsLoss()
def forward(self, input, label):
one_hot = torch.zeros_like(input)
C = input.size(1)
label = label.reshape(one_hot.shape[0], 1)
one_hot.scatter_(1, label, 1)
loss = self.bce_loss(input - math.log(C), one_hot) * C
return loss
def get_inputs():
return [torch.rand([4, 2]), torch.ones([4], dtype=torch.int64)]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch.nn.modules.loss import _Loss
import torch.optim
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_binary_cross_entropy_with_logits_mul_scatter_sub_0(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 8
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex // 2
r0 = rindex % 2
r2 = rindex
tmp0 = tl.load(in_ptr0 + r1, None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + r2, None)
tmp1 = r0
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = tmp3 - tmp5
tmp8 = 0.6931471805599453
tmp9 = tmp7 - tmp8
tmp10 = tmp6 * tmp9
tmp11 = triton_helpers.minimum(tmp4, tmp9)
tmp12 = tl_math.abs(tmp9)
tmp13 = -tmp12
tmp14 = tl_math.exp(tmp13)
tmp15 = libdevice.log1p(tmp14)
tmp16 = tmp11 - tmp15
tmp17 = tmp10 - tmp16
tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK])
tmp20 = tl.sum(tmp18, 1)[:, None]
tmp21 = 8.0
tmp22 = tmp20 / tmp21
tmp23 = 2.0
tmp24 = tmp22 * tmp23
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp24, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 2), (2, 1))
assert_size_stride(arg1_1, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_binary_cross_entropy_with_logits_mul_scatter_sub_0[
grid(1)](buf1, arg1_1, arg0_1, 1, 8, XBLOCK=1, num_warps=2,
num_stages=1)
del arg0_1
del arg1_1
return buf1,
class BCE_LOSSNew(_Loss):
def __init__(self):
super().__init__()
self.bce_loss = torch.nn.BCEWithLogitsLoss()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
www516717402/EOD
|
BCE_LOSS
| false
| 10,945
|
[
"Apache-2.0"
] | 0
|
89ee81a0cb5a5f64a8f788248e2bb3eccee7006d
|
https://github.com/www516717402/EOD/tree/89ee81a0cb5a5f64a8f788248e2bb3eccee7006d
|
Conv2dLocal
|
from torch.nn import Module
import math
import torch
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
from torch.nn.modules.utils import _pair
from torch.nn.functional import unfold
from torch.nn import Parameter
def conv2d_local(input: 'torch.Tensor', weight: 'torch.Tensor', bias=None,
padding: 'Pairable'=0, stride: 'Pairable'=1, dilation: 'Pairable'=1,
data_format: 'str'='channels_first'):
"""Calculate the local convolution.
Args:
input:
weight:
bias:
padding:
stride:
dilation:
data_format: For Keras compatibility
Returns:
"""
if input.dim() != 4:
raise NotImplementedError(
'Input Error: Only 4D input Tensors supported (got {}D)'.format
(input.dim()))
if weight.dim() != 6:
raise NotImplementedError(
'Input Error: Only 6D weight Tensors supported (got {}D)'.
format(weight.dim()))
(out_height, out_width, out_channels, in_channels, kernel_height,
kernel_width) = weight.size()
kernel_size = kernel_height, kernel_width
if data_format == 'channels_first':
cols = unfold(input, kernel_size, dilation=dilation, padding=
padding, stride=stride)
reshaped_input = cols.view(cols.size(0), cols.size(1), cols.size(2), 1
).permute(0, 2, 3, 1)
else:
stride_y, stride_x = _pair(stride)
feature_dim = in_channels * kernel_height * kernel_width
xs = []
for i in range(out_height):
for j in range(out_width):
y = i * stride_y
slice_row = slice(y, y + kernel_size[0])
x = j * stride_x
slice_col = slice(x, x + kernel_size[1])
val = input[:, slice_row, slice_col, :].contiguous()
xs.append(val.view(input.shape[0], 1, -1, feature_dim))
concated = torch.cat(xs, dim=1)
reshaped_input = concated
output_size = out_height * out_width
input_size = in_channels * kernel_height * kernel_width
weights_view = weight.view(output_size, out_channels, input_size)
permuted_weights = weights_view.permute(0, 2, 1)
out = torch.matmul(reshaped_input, permuted_weights)
out = out.view(reshaped_input.shape[0], out_height, out_width, out_channels
).permute(0, 3, 1, 2)
if data_format == 'channels_last':
out = out.permute(0, 2, 3, 1)
if bias is not None:
final_bias = bias.expand_as(out)
out = out + final_bias
return out
class Conv2dLocal(Module):
"""A 2D locally connected layer.
Attributes:
weight (torch.Tensor): The weights. out_height x out_width x out_channels x in_channels x kernel_height x kernel_width
kernel_size (Tuple[int, int]): The height and width of the convolutional kernels.
stride (Tuple[int, int]): The stride height and width.
"""
def __init__(self, in_height: 'int', in_width: 'int', in_channels:
'int', out_channels: 'int', kernel_size: 'Pairable', stride:
'Pairable'=1, padding: 'Pairable'=0, bias: 'bool'=True, dilation:
'Pairable'=1, data_format='channels_first'):
super(Conv2dLocal, self).__init__()
self.data_format = data_format
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _pair(padding)
self.dilation = _pair(dilation)
self.in_height = in_height
self.in_width = in_width
self.out_height = int(math.floor((in_height + 2 * self.padding[0] -
self.dilation[0] * (self.kernel_size[0] - 1) - 1) / self.stride
[0] + 1))
self.out_width = int(math.floor((in_width + 2 * self.padding[1] -
self.dilation[1] * (self.kernel_size[1] - 1) - 1) / self.stride
[1] + 1))
self.out_channels = out_channels
self.weight = Parameter(torch.Tensor(self.out_height, self.
out_width, out_channels, in_channels, *self.kernel_size))
if bias:
if self.data_format == 'channels_first':
self.bias = Parameter(torch.Tensor(out_channels, self.
out_height, self.out_width))
else:
self.bias = Parameter(torch.Tensor(self.out_height, self.
out_width, out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
@property
def input_shape(self):
"""The expected input shape for this module."""
if self.data_format == 'channels_first':
shape = self.in_channels, self.in_height, self.in_width
else:
shape = self.in_height, self.in_width, self.in_channels
return torch.Tensor(shape)
def reset_parameters(self):
"""Reset the parameters of the layer."""
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = 1.0 / math.sqrt(n)
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def __repr__(self):
s = (
'{name}({in_channels}, {out_channels}, kernel_size={kernel_size}, stride={stride}'
)
if self.padding != (0,) * len(self.padding):
s += ', padding={padding}'
if self.dilation != (1,) * len(self.dilation):
s += ', dilation={dilation}'
if self.bias is None:
s += ', bias=False'
s += ')'
return s.format(name=self.__class__.__name__, **self.__dict__)
def forward(self, input: 'torch.Tensor'):
return conv2d_local(input, self.weight, self.bias, stride=self.
stride, padding=self.padding, dilation=self.dilation,
data_format=self.data_format)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_height': 4, 'in_width': 4, 'in_channels': 4,
'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.nn import Module
import math
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
from torch.nn.modules.utils import _pair
from torch.nn.functional import unfold
from torch.nn import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_im2col_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
tmp0 = tl.load(in_ptr0 + x3, xmask)
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 1, 4, 4, 4, 4), (256, 256, 64, 16, 4, 1))
assert_size_stride(primals_2, (4, 1, 1), (1, 1, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1, 4, 1), (64, 16, 4, 4, 1, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_im2col_0[grid(256)](primals_3, buf0, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_3
buf1 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 1, 64), (64, 0, 1),
0), reinterpret_tensor(primals_1, (4, 64, 4), (0, 1, 64), 0),
out=buf1)
del primals_1
buf2 = reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 4, 4), 0)
del buf1
triton_poi_fused_add_1[grid(16)](buf2, primals_2, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_2
return buf2, reinterpret_tensor(buf0, (4, 64, 1), (64, 1, 4), 0)
def conv2d_local(input: 'torch.Tensor', weight: 'torch.Tensor', bias=None,
padding: 'Pairable'=0, stride: 'Pairable'=1, dilation: 'Pairable'=1,
data_format: 'str'='channels_first'):
"""Calculate the local convolution.
Args:
input:
weight:
bias:
padding:
stride:
dilation:
data_format: For Keras compatibility
Returns:
"""
if input.dim() != 4:
raise NotImplementedError(
'Input Error: Only 4D input Tensors supported (got {}D)'.format
(input.dim()))
if weight.dim() != 6:
raise NotImplementedError(
'Input Error: Only 6D weight Tensors supported (got {}D)'.
format(weight.dim()))
(out_height, out_width, out_channels, in_channels, kernel_height,
kernel_width) = weight.size()
kernel_size = kernel_height, kernel_width
if data_format == 'channels_first':
cols = unfold(input, kernel_size, dilation=dilation, padding=
padding, stride=stride)
reshaped_input = cols.view(cols.size(0), cols.size(1), cols.size(2), 1
).permute(0, 2, 3, 1)
else:
stride_y, stride_x = _pair(stride)
feature_dim = in_channels * kernel_height * kernel_width
xs = []
for i in range(out_height):
for j in range(out_width):
y = i * stride_y
slice_row = slice(y, y + kernel_size[0])
x = j * stride_x
slice_col = slice(x, x + kernel_size[1])
val = input[:, slice_row, slice_col, :].contiguous()
xs.append(val.view(input.shape[0], 1, -1, feature_dim))
concated = torch.cat(xs, dim=1)
reshaped_input = concated
output_size = out_height * out_width
input_size = in_channels * kernel_height * kernel_width
weights_view = weight.view(output_size, out_channels, input_size)
permuted_weights = weights_view.permute(0, 2, 1)
out = torch.matmul(reshaped_input, permuted_weights)
out = out.view(reshaped_input.shape[0], out_height, out_width, out_channels
).permute(0, 3, 1, 2)
if data_format == 'channels_last':
out = out.permute(0, 2, 3, 1)
if bias is not None:
final_bias = bias.expand_as(out)
out = out + final_bias
return out
class Conv2dLocalNew(Module):
"""A 2D locally connected layer.
Attributes:
weight (torch.Tensor): The weights. out_height x out_width x out_channels x in_channels x kernel_height x kernel_width
kernel_size (Tuple[int, int]): The height and width of the convolutional kernels.
stride (Tuple[int, int]): The stride height and width.
"""
def __init__(self, in_height: 'int', in_width: 'int', in_channels:
'int', out_channels: 'int', kernel_size: 'Pairable', stride:
'Pairable'=1, padding: 'Pairable'=0, bias: 'bool'=True, dilation:
'Pairable'=1, data_format='channels_first'):
super(Conv2dLocalNew, self).__init__()
self.data_format = data_format
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _pair(padding)
self.dilation = _pair(dilation)
self.in_height = in_height
self.in_width = in_width
self.out_height = int(math.floor((in_height + 2 * self.padding[0] -
self.dilation[0] * (self.kernel_size[0] - 1) - 1) / self.stride
[0] + 1))
self.out_width = int(math.floor((in_width + 2 * self.padding[1] -
self.dilation[1] * (self.kernel_size[1] - 1) - 1) / self.stride
[1] + 1))
self.out_channels = out_channels
self.weight = Parameter(torch.Tensor(self.out_height, self.
out_width, out_channels, in_channels, *self.kernel_size))
if bias:
if self.data_format == 'channels_first':
self.bias = Parameter(torch.Tensor(out_channels, self.
out_height, self.out_width))
else:
self.bias = Parameter(torch.Tensor(self.out_height, self.
out_width, out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
@property
def input_shape(self):
"""The expected input shape for this module."""
if self.data_format == 'channels_first':
shape = self.in_channels, self.in_height, self.in_width
else:
shape = self.in_height, self.in_width, self.in_channels
return torch.Tensor(shape)
def reset_parameters(self):
"""Reset the parameters of the layer."""
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = 1.0 / math.sqrt(n)
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def __repr__(self):
s = (
'{name}({in_channels}, {out_channels}, kernel_size={kernel_size}, stride={stride}'
)
if self.padding != (0,) * len(self.padding):
s += ', padding={padding}'
if self.dilation != (1,) * len(self.dilation):
s += ', dilation={dilation}'
if self.bias is None:
s += ', bias=False'
s += ')'
return s.format(name=self.__class__.__name__, **self.__dict__)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
vluzko/keras_to_pytorch
|
Conv2dLocal
| false
| 10,946
|
[
"MIT"
] | 0
|
eefb3f77024b3a3b75e918b93316c12bb9338f1c
|
https://github.com/vluzko/keras_to_pytorch/tree/eefb3f77024b3a3b75e918b93316c12bb9338f1c
|
InceptionA
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.functional as F
class InceptionA(nn.Module):
def __init__(self, in_channels):
super(InceptionA, self).__init__()
self.branch1x1 = nn.Conv2d(in_channels, 16, kernel_size=1)
self.branch5x5_1 = nn.Conv2d(in_channels, 16, kernel_size=1)
self.branch5x5_2 = nn.Conv2d(16, 24, kernel_size=5, padding=2)
self.branch3x3_1 = nn.Conv2d(in_channels, 16, kernel_size=1)
self.branch3x3_2 = nn.Conv2d(16, 24, kernel_size=3, padding=1)
self.branch3x3_3 = nn.Conv2d(24, 24, kernel_size=3, padding=1)
self.branch_pool = nn.Conv2d(in_channels, 24, kernel_size=1)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3 = self.branch3x3_1(x)
branch3x3 = self.branch3x3_2(branch3x3)
branch3x3 = self.branch3x3_3(branch3x3)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3, branch_pool]
return torch.cat(outputs, dim=1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 1536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 24
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_avg_pool2d_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x4 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-5 + x4), tmp10 & xmask, other=0.0)
tmp12 = x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-4 + x4), tmp16 & xmask, other=0.0)
tmp18 = tmp17 + tmp11
tmp19 = 1 + x0
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-3 + x4), tmp23 & xmask, other=0.0)
tmp25 = tmp24 + tmp18
tmp26 = x1
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (-1 + x4), tmp30 & xmask, other=0.0)
tmp32 = tmp31 + tmp25
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + x4, tmp33 & xmask, other=0.0)
tmp35 = tmp34 + tmp32
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (1 + x4), tmp36 & xmask, other=0.0)
tmp38 = tmp37 + tmp35
tmp39 = 1 + x1
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (3 + x4), tmp43 & xmask, other=0.0)
tmp45 = tmp44 + tmp38
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (4 + x4), tmp46 & xmask, other=0.0)
tmp48 = tmp47 + tmp45
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (5 + x4), tmp49 & xmask, other=0.0)
tmp51 = tmp50 + tmp48
tmp52 = 1 + -1 * x0 + -1 * x1 + x0 * x1 + (5 * (5 <= 2 + x0) + (2 + x0) *
(2 + x0 < 5)) * (5 * (5 <= 2 + x1) + (2 + x1) * (2 + x1 < 5)
) + -1 * x0 * (5 * (5 <= 2 + x1) + (2 + x1) * (2 + x1 < 5)
) + -1 * x1 * (5 * (5 <= 2 + x0) + (2 + x0) * (2 + x0 < 5)) + (5 *
(5 <= 2 + x0) + (2 + x0) * (2 + x0 < 5)) + (5 * (5 <= 2 + x1) + (2 +
x1) * (2 + x1 < 5))
tmp53 = tmp51 / tmp52
tl.store(out_ptr0 + x4, tmp53, xmask)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 5632
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 88
x0 = xindex % 16
x2 = xindex // 1408
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 16, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 256 * x2), tmp4 & xmask, other=0.0
)
tmp6 = tl.load(in_ptr1 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 40, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tmp10 & tmp12
tmp14 = tl.load(in_ptr2 + (x0 + 16 * (-16 + x1) + 384 * x2), tmp13 &
xmask, other=0.0)
tmp15 = tl.load(in_ptr3 + (-16 + x1), tmp13 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp13, tmp16, tmp17)
tmp19 = tmp0 >= tmp11
tmp20 = tl.full([1], 64, tl.int64)
tmp21 = tmp0 < tmp20
tmp22 = tmp19 & tmp21
tmp23 = tl.load(in_ptr4 + (x0 + 16 * (-40 + x1) + 384 * x2), tmp22 &
xmask, other=0.0)
tmp24 = tl.load(in_ptr5 + (-40 + x1), tmp22 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp25 = tmp23 + tmp24
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp22, tmp25, tmp26)
tmp28 = tmp0 >= tmp20
tl.full([1], 88, tl.int64)
tmp31 = tl.load(in_ptr6 + (x0 + 16 * (-64 + x1) + 384 * x2), tmp28 &
xmask, other=0.0)
tmp32 = tl.load(in_ptr7 + (-64 + x1), tmp28 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp33 = tmp31 + tmp32
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp28, tmp33, tmp34)
tmp36 = tl.where(tmp22, tmp27, tmp35)
tmp37 = tl.where(tmp13, tmp18, tmp36)
tmp38 = tl.where(tmp4, tmp9, tmp37)
tl.store(out_ptr0 + x3, tmp38, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15) = args
args.clear()
assert_size_stride(primals_1, (16, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (16, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (24, 16, 5, 5), (400, 25, 5, 1))
assert_size_stride(primals_7, (24,), (1,))
assert_size_stride(primals_8, (16, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_9, (16,), (1,))
assert_size_stride(primals_10, (24, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_11, (24,), (1,))
assert_size_stride(primals_12, (24, 24, 3, 3), (216, 9, 3, 1))
assert_size_stride(primals_13, (24,), (1,))
assert_size_stride(primals_14, (24, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_15, (24,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 4, 4), (256, 16, 4, 1))
buf1 = extern_kernels.convolution(primals_3, primals_4, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 16, 4, 4), (256, 16, 4, 1))
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(1024)](buf2, primals_5, 1024,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf3 = extern_kernels.convolution(buf2, primals_6, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 24, 4, 4), (384, 16, 4, 1))
buf4 = extern_kernels.convolution(primals_3, primals_8, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 4, 4), (256, 16, 4, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_0[grid(1024)](buf5, primals_9, 1024,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf6 = extern_kernels.convolution(buf5, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 24, 4, 4), (384, 16, 4, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_1[grid(1536)](buf7, primals_11, 1536,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
buf8 = extern_kernels.convolution(buf7, primals_12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 24, 4, 4), (384, 16, 4, 1))
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_avg_pool2d_2[grid(256)](primals_3, buf9, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf10 = extern_kernels.convolution(buf9, primals_14, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 24, 4, 4), (384, 16, 4, 1))
buf11 = empty_strided_cuda((4, 88, 4, 4), (1408, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_3[grid(5632)](buf0, primals_2, buf3, primals_7,
buf8, primals_13, buf10, primals_15, buf11, 5632, XBLOCK=256,
num_warps=4, num_stages=1)
del buf0
del buf10
del buf3
del buf8
del primals_13
del primals_15
del primals_2
del primals_7
return (buf11, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, primals_12, primals_14, buf2, buf5, buf7, buf9)
class InceptionANew(nn.Module):
def __init__(self, in_channels):
super(InceptionANew, self).__init__()
self.branch1x1 = nn.Conv2d(in_channels, 16, kernel_size=1)
self.branch5x5_1 = nn.Conv2d(in_channels, 16, kernel_size=1)
self.branch5x5_2 = nn.Conv2d(16, 24, kernel_size=5, padding=2)
self.branch3x3_1 = nn.Conv2d(in_channels, 16, kernel_size=1)
self.branch3x3_2 = nn.Conv2d(16, 24, kernel_size=3, padding=1)
self.branch3x3_3 = nn.Conv2d(24, 24, kernel_size=3, padding=1)
self.branch_pool = nn.Conv2d(in_channels, 24, kernel_size=1)
def forward(self, input_0):
primals_1 = self.branch1x1.weight
primals_2 = self.branch1x1.bias
primals_4 = self.branch5x5_1.weight
primals_5 = self.branch5x5_1.bias
primals_6 = self.branch5x5_2.weight
primals_7 = self.branch5x5_2.bias
primals_8 = self.branch3x3_1.weight
primals_9 = self.branch3x3_1.bias
primals_10 = self.branch3x3_2.weight
primals_11 = self.branch3x3_2.bias
primals_12 = self.branch3x3_3.weight
primals_13 = self.branch3x3_3.bias
primals_14 = self.branch_pool.weight
primals_15 = self.branch_pool.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15])
return output[0]
|
vanthq/EarRecognition
|
InceptionA
| false
| 10,947
|
[
"MIT"
] | 0
|
7decddc97c4b27cd8457308b3d3836388936e7a8
|
https://github.com/vanthq/EarRecognition/tree/7decddc97c4b27cd8457308b3d3836388936e7a8
|
FreqEncoder
|
import torch
import torch.nn as nn
class FreqEncoder(nn.Module):
def __init__(self, input_dim, max_freq_log2, N_freqs, log_sampling=True,
include_input=True, periodic_fns=(torch.sin, torch.cos)):
super().__init__()
self.input_dim = input_dim
self.include_input = include_input
self.periodic_fns = periodic_fns
self.output_dim = 0
if self.include_input:
self.output_dim += self.input_dim
self.output_dim += self.input_dim * N_freqs * len(self.periodic_fns)
if log_sampling:
self.freq_bands = 2.0 ** torch.linspace(0.0, max_freq_log2, N_freqs
)
else:
self.freq_bands = torch.linspace(2.0 ** 0.0, 2.0 **
max_freq_log2, N_freqs)
self.freq_bands = self.freq_bands.numpy().tolist()
def forward(self, input, **kwargs):
out = []
if self.include_input:
out.append(input)
for i in range(len(self.freq_bands)):
freq = self.freq_bands[i]
for p_fn in self.periodic_fns:
out.append(p_fn(input * freq))
out = torch.cat(out, dim=-1)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'max_freq_log2': 4, 'N_freqs': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_cos_mul_sin_0(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.sin(tmp2)
tmp4 = tl_math.cos(tmp2)
tmp5 = 2.5198421478271484
tmp6 = tmp0 * tmp5
tmp7 = tl_math.sin(tmp6)
tmp8 = tl_math.cos(tmp6)
tmp9 = 6.349603652954102
tmp10 = tmp0 * tmp9
tmp11 = tl_math.sin(tmp10)
tmp12 = tl_math.cos(tmp10)
tmp13 = 16.0
tmp14 = tmp0 * tmp13
tmp15 = tl_math.sin(tmp14)
tmp16 = tl_math.cos(tmp14)
tl.store(out_ptr0 + (x0 + 36 * x1), tmp0, xmask)
tl.store(out_ptr1 + (x0 + 36 * x1), tmp3, xmask)
tl.store(out_ptr2 + (x0 + 36 * x1), tmp4, xmask)
tl.store(out_ptr3 + (x0 + 36 * x1), tmp7, xmask)
tl.store(out_ptr4 + (x0 + 36 * x1), tmp8, xmask)
tl.store(out_ptr5 + (x0 + 36 * x1), tmp11, xmask)
tl.store(out_ptr6 + (x0 + 36 * x1), tmp12, xmask)
tl.store(out_ptr7 + (x0 + 36 * x1), tmp15, xmask)
tl.store(out_ptr8 + (x0 + 36 * x1), tmp16, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf9 = empty_strided_cuda((4, 4, 4, 36), (576, 144, 36, 1), torch.
float32)
buf0 = reinterpret_tensor(buf9, (4, 4, 4, 4), (576, 144, 36, 1), 0)
buf1 = reinterpret_tensor(buf9, (4, 4, 4, 4), (576, 144, 36, 1), 4)
buf2 = reinterpret_tensor(buf9, (4, 4, 4, 4), (576, 144, 36, 1), 8)
buf3 = reinterpret_tensor(buf9, (4, 4, 4, 4), (576, 144, 36, 1), 12)
buf4 = reinterpret_tensor(buf9, (4, 4, 4, 4), (576, 144, 36, 1), 16)
buf5 = reinterpret_tensor(buf9, (4, 4, 4, 4), (576, 144, 36, 1), 20)
buf6 = reinterpret_tensor(buf9, (4, 4, 4, 4), (576, 144, 36, 1), 24)
buf7 = reinterpret_tensor(buf9, (4, 4, 4, 4), (576, 144, 36, 1), 28)
buf8 = reinterpret_tensor(buf9, (4, 4, 4, 4), (576, 144, 36, 1), 32)
get_raw_stream(0)
triton_poi_fused_cat_cos_mul_sin_0[grid(256)](arg0_1, buf0, buf1,
buf2, buf3, buf4, buf5, buf6, buf7, buf8, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf9,
class FreqEncoderNew(nn.Module):
def __init__(self, input_dim, max_freq_log2, N_freqs, log_sampling=True,
include_input=True, periodic_fns=(torch.sin, torch.cos)):
super().__init__()
self.input_dim = input_dim
self.include_input = include_input
self.periodic_fns = periodic_fns
self.output_dim = 0
if self.include_input:
self.output_dim += self.input_dim
self.output_dim += self.input_dim * N_freqs * len(self.periodic_fns)
if log_sampling:
self.freq_bands = 2.0 ** torch.linspace(0.0, max_freq_log2, N_freqs
)
else:
self.freq_bands = torch.linspace(2.0 ** 0.0, 2.0 **
max_freq_log2, N_freqs)
self.freq_bands = self.freq_bands.numpy().tolist()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
wx-b/torch-ngp
|
FreqEncoder
| false
| 10,948
|
[
"MIT"
] | 0
|
b5799e90dca4e188b14f8c77abf0d420c0bac915
|
https://github.com/wx-b/torch-ngp/tree/b5799e90dca4e188b14f8c77abf0d420c0bac915
|
AsymmetricalFocalLoss
|
import torch
import torch.nn as nn
class AsymmetricalFocalLoss(nn.Module):
def __init__(self, gamma=0, zeta=0):
super(AsymmetricalFocalLoss, self).__init__()
self.gamma = gamma
self.zeta = zeta
def forward(self, pred, target):
losses = -((1 - pred) ** self.gamma * target * torch.clamp_min(
torch.log(pred), -100) + pred ** self.zeta * (1 - target) *
torch.clamp_min(torch.log(1 - pred), -100))
return torch.mean(losses)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_clamp_min_log_mean_mul_neg_pow_rsub_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp1 * tmp3
tmp5 = tl_math.log(tmp0)
tmp6 = -100.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp4 * tmp7
tmp9 = tmp1 - tmp3
tmp10 = tmp1 * tmp9
tmp11 = tl_math.log(tmp2)
tmp12 = triton_helpers.maximum(tmp11, tmp6)
tmp13 = tmp10 * tmp12
tmp14 = tmp8 + tmp13
tmp15 = -tmp14
tmp16 = tl.broadcast_to(tmp15, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tmp19 = 256.0
tmp20 = tmp18 / tmp19
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp20, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_clamp_min_log_mean_mul_neg_pow_rsub_0[grid(1)](
buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class AsymmetricalFocalLossNew(nn.Module):
def __init__(self, gamma=0, zeta=0):
super(AsymmetricalFocalLossNew, self).__init__()
self.gamma = gamma
self.zeta = zeta
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
venisehannoyer/Hear-me-GirlsInAI-team1
|
AsymmetricalFocalLoss
| false
| 10,949
|
[
"Apache-2.0"
] | 0
|
664b3af4befe9b73c28d4362969699bc2254bdf9
|
https://github.com/venisehannoyer/Hear-me-GirlsInAI-team1/tree/664b3af4befe9b73c28d4362969699bc2254bdf9
|
ContextGating
|
import torch
import torch.nn as nn
class ContextGating(nn.Module):
def __init__(self, in_dim):
super(ContextGating, self).__init__()
self.sigmoid = nn.Sigmoid()
self.sigmoid = nn.Sigmoid()
self.linear = nn.Linear(in_dim, in_dim)
def forward(self, x):
lin = self.linear(x.permute(0, 2, 3, 1))
lin = lin.permute(0, 3, 1, 2)
sig = self.sigmoid(lin)
res = x * sig
return res
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_mul_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask)
tmp1 = tl.load(in_ptr1 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tmp2 = tl.load(in_ptr2 + y0, ymask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tl.sigmoid(tmp3)
tmp5 = tmp0 * tmp4
tl.store(out_ptr0 + (x2 + 16 * y3), tmp5, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64, 4)](primals_1, buf0, 64, 4,
XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sigmoid_1[grid(16, 16)](primals_1, buf1,
primals_3, buf2, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4,
num_stages=1)
return buf2, primals_1, primals_3, reinterpret_tensor(buf0, (64, 4), (4,
1), 0), buf1
class ContextGatingNew(nn.Module):
def __init__(self, in_dim):
super(ContextGatingNew, self).__init__()
self.sigmoid = nn.Sigmoid()
self.sigmoid = nn.Sigmoid()
self.linear = nn.Linear(in_dim, in_dim)
def forward(self, input_0):
primals_2 = self.linear.weight
primals_3 = self.linear.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
venisehannoyer/Hear-me-GirlsInAI-team1
|
ContextGating
| false
| 10,950
|
[
"Apache-2.0"
] | 0
|
664b3af4befe9b73c28d4362969699bc2254bdf9
|
https://github.com/venisehannoyer/Hear-me-GirlsInAI-team1/tree/664b3af4befe9b73c28d4362969699bc2254bdf9
|
InterProbCrossEntropyLoss
|
import torch
import torch.utils.data
class InterProbCrossEntropyLoss(torch.nn.Module):
def __init__(self, in_features, num_classes):
super(InterProbCrossEntropyLoss, self).__init__()
self.in_features = in_features
self.num_classes = num_classes
self.fc = torch.nn.Linear(in_features, num_classes)
def forward(self, x, target, mask=None):
log_prob = self.fc(x).log_softmax(-1)
loss = -(target * log_prob).sum(-1)
if mask is not None:
loss = loss * mask.view(-1)
return loss
def pack_init_args(self):
args = {'in_features': self.in_features, 'num_classes': self.
num_classes}
return args
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'num_classes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_mul_neg_sum_1(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp23 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp1 - tmp12
tmp14 = tmp0 * tmp13
tmp16 = tmp3 - tmp12
tmp17 = tmp15 * tmp16
tmp18 = tmp14 + tmp17
tmp20 = tmp6 - tmp12
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp24 = tmp9 - tmp12
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = -tmp26
tl.store(out_ptr0 + x0, tmp27, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](buf0, buf1, 256, XBLOCK=
256, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__log_softmax_mul_neg_sum_1[grid(64)](primals_4,
buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf1
return buf2, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0
class InterProbCrossEntropyLossNew(torch.nn.Module):
def __init__(self, in_features, num_classes):
super(InterProbCrossEntropyLossNew, self).__init__()
self.in_features = in_features
self.num_classes = num_classes
self.fc = torch.nn.Linear(in_features, num_classes)
def pack_init_args(self):
args = {'in_features': self.in_features, 'num_classes': self.
num_classes}
return args
def forward(self, input_0, input_1):
primals_1 = self.fc.weight
primals_2 = self.fc.bias
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
tkc-morita/secl
|
InterProbCrossEntropyLoss
| false
| 10,951
|
[
"MIT"
] | 0
|
d0156cea4fd95ea5071126dbf076a6da69752a37
|
https://github.com/tkc-morita/secl/tree/d0156cea4fd95ea5071126dbf076a6da69752a37
|
_MCLSTMCell
|
from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
from typing import Tuple
class _Gate(nn.Module):
"""Utility class to implement a standard sigmoid gate"""
def __init__(self, in_features: 'int', out_features: 'int'):
super(_Gate, self).__init__()
self.fc = nn.Linear(in_features=in_features, out_features=out_features)
self._reset_parameters()
def _reset_parameters(self):
nn.init.orthogonal_(self.fc.weight)
nn.init.zeros_(self.fc.bias)
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
"""Perform forward pass through the normalised gate"""
return torch.sigmoid(self.fc(x))
class _NormalizedGate(nn.Module):
"""Utility class to implement a gate with normalised activation function"""
def __init__(self, in_features: 'int', out_shape: 'Tuple[int, int]',
normalizer: 'str'):
super(_NormalizedGate, self).__init__()
self.fc = nn.Linear(in_features=in_features, out_features=out_shape
[0] * out_shape[1])
self.out_shape = out_shape
if normalizer == 'normalized_sigmoid':
self.activation = nn.Sigmoid()
elif normalizer == 'normalized_relu':
self.activation = nn.ReLU()
else:
raise ValueError(
f"Unknown normalizer {normalizer}. Must be one of {'normalized_sigmoid', 'normalized_relu'}"
)
self._reset_parameters()
def _reset_parameters(self):
nn.init.orthogonal_(self.fc.weight)
nn.init.zeros_(self.fc.bias)
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
"""Perform forward pass through the normalized gate"""
h = self.fc(x).view(-1, *self.out_shape)
return torch.nn.functional.normalize(self.activation(h), p=1, dim=-1)
class _MCLSTMCell(nn.Module):
"""The logic of the MC-LSTM cell"""
def __init__(self, mass_input_size: 'int', aux_input_size: 'int',
hidden_size: 'int', cfg: 'Config'):
super(_MCLSTMCell, self).__init__()
self.cfg = cfg
self._hidden_size = hidden_size
gate_inputs = aux_input_size + hidden_size + mass_input_size
self.output_gate = _Gate(in_features=gate_inputs, out_features=
hidden_size)
self.input_gate = _NormalizedGate(in_features=gate_inputs,
out_shape=(mass_input_size, hidden_size), normalizer=
'normalized_sigmoid')
self.redistribution = _NormalizedGate(in_features=gate_inputs,
out_shape=(hidden_size, hidden_size), normalizer='normalized_relu')
self._reset_parameters()
def _reset_parameters(self):
if self.cfg.initial_forget_bias is not None:
nn.init.constant_(self.output_gate.fc.bias, val=self.cfg.
initial_forget_bias)
def forward(self, x_m: 'torch.Tensor', x_a: 'torch.Tensor') ->Tuple[
torch.Tensor, torch.Tensor]:
"""Perform forward pass on the MC-LSTM cell.
Parameters
----------
x_m : torch.Tensor
Mass input that will be conserved by the network.
x_a : torch.Tensor
Auxiliary inputs that will be used to modulate the gates but whose information won't be stored internally
in the MC-LSTM cells.
Returns
-------
Tuple[torch.Tensor, torch.Tensor]
Outgoing mass and memory cells per time step of shape [sequence length, batch size, hidden size]
"""
_, batch_size, _ = x_m.size()
ct = x_m.new_zeros((batch_size, self._hidden_size))
m_out, c = [], []
for xt_m, xt_a in zip(x_m, x_a):
mt_out, ct = self._step(xt_m, xt_a, ct)
m_out.append(mt_out)
c.append(ct)
m_out, c = torch.stack(m_out), torch.stack(c)
return m_out, c
def _step(self, xt_m, xt_a, c):
""" Make a single time step in the MCLSTM. """
features = torch.cat([xt_m, xt_a, c / (c.norm(1) + 1e-05)], dim=-1)
i = self.input_gate(features)
r = self.redistribution(features)
o = self.output_gate(features)
m_in = torch.matmul(xt_m.unsqueeze(-2), i).squeeze(-2)
m_sys = torch.matmul(c.unsqueeze(-2), r).squeeze(-2)
m_new = m_in + m_sys
return o * m_new, (1 - o) * m_new
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'mass_input_size': 4, 'aux_input_size': 4, 'hidden_size':
4, 'cfg': _mock_config(initial_forget_bias=4)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
from typing import Tuple
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_div_linalg_vector_norm_new_zeros_0(out_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex % 4
r2 = rindex // 4
tmp0 = 0.0
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.sum(tmp1, 1)[:, None]
tmp4 = 1e-05
tmp5 = tmp3 + tmp4
tmp6 = tmp0 / tmp5
tl.store(out_ptr1 + tl.broadcast_to(r1 + 12 * r2, [XBLOCK, RBLOCK]),
tmp6, None)
@triton.jit
def triton_for_fused_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1):
pid = tl.program_id(0)
XBLOCK: tl.constexpr = 1024
num_xblocks_0 = tl.cdiv(16, XBLOCK)
num_xblocks_1 = num_xblocks_0 + tl.cdiv(16, XBLOCK)
if pid < num_xblocks_0:
pid_offset = pid
xnumel = 16
xoffset = pid_offset * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tl.store(out_ptr0 + (x0 + 12 * x1), tmp0, xmask)
elif pid < num_xblocks_1:
pid_offset = pid - num_xblocks_0
xnumel = 16
xoffset = pid_offset * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x5 = xindex
x3 = xindex % 4
x4 = xindex // 4
tmp1 = tl.load(in_ptr1 + x5, xmask)
tl.store(out_ptr1 + (x3 + 12 * x4), tmp1, xmask)
else:
pass
@triton.jit
def triton_poi_fused_div_sigmoid_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tl.sigmoid(tmp2)
tmp4 = tl_math.abs(tmp3)
tmp6 = tl.sigmoid(tmp5)
tmp7 = tl_math.abs(tmp6)
tmp8 = tmp4 + tmp7
tmp10 = tl.sigmoid(tmp9)
tmp11 = tl_math.abs(tmp10)
tmp12 = tmp8 + tmp11
tmp14 = tl.sigmoid(tmp13)
tmp15 = tl_math.abs(tmp14)
tmp16 = tmp12 + tmp15
tmp17 = 1e-12
tmp18 = triton_helpers.maximum(tmp16, tmp17)
tmp19 = tmp1 / tmp18
tl.store(out_ptr0 + x2, tmp19, xmask)
@triton.jit
def triton_poi_fused_new_zeros_3(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_div_relu_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp1, tmp3)
tmp5 = tl_math.abs(tmp4)
tmp7 = triton_helpers.maximum(tmp1, tmp6)
tmp8 = tl_math.abs(tmp7)
tmp9 = tmp5 + tmp8
tmp11 = triton_helpers.maximum(tmp1, tmp10)
tmp12 = tl_math.abs(tmp11)
tmp13 = tmp9 + tmp12
tmp15 = triton_helpers.maximum(tmp1, tmp14)
tmp16 = tl_math.abs(tmp15)
tmp17 = tmp13 + tmp16
tmp18 = 1e-12
tmp19 = triton_helpers.maximum(tmp17, tmp18)
tmp20 = tmp2 / tmp19
tl.store(out_ptr0 + x2, tmp20, xmask)
@triton.jit
def triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_5(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_out_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 + tmp1
tmp4 = tl.sigmoid(tmp3)
tmp5 = 1.0
tmp6 = tmp5 - tmp4
tmp7 = tmp6 * tmp2
tmp8 = tl_math.abs(tmp7)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.sum(tmp9, 1)[:, None]
tmp12 = 1e-05
tmp13 = tmp11 + tmp12
tl.store(in_out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp2, None)
tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp7, None)
tl.debug_barrier()
tl.store(in_out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp13, None)
@triton.jit
def triton_poi_fused_cat_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 12
x1 = xindex // 12
x2 = xindex
tmp15 = tl.load(in_ptr3 + 0)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (16 + 4 * x1 + x0), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (16 + 4 * x1 + (-4 + x0)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tl.full([1], 12, tl.int64)
tmp14 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp11 & xmask,
eviction_policy='evict_last', other=0.0)
tmp17 = tmp14 / tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp11, tmp17, tmp18)
tmp20 = tl.where(tmp9, tmp10, tmp19)
tmp21 = tl.where(tmp4, tmp5, tmp20)
tl.store(out_ptr0 + x2, tmp21, xmask)
@triton.jit
def triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_7(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_out_ptr0 + r0, None)
tmp1 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 + tmp1
tmp4 = tl.sigmoid(tmp3)
tmp5 = 1.0
tmp6 = tmp5 - tmp4
tmp7 = tmp6 * tmp2
tmp8 = tl_math.abs(tmp7)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.sum(tmp9, 1)[:, None]
tmp12 = 1e-05
tmp13 = tmp11 + tmp12
tl.store(in_out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp2, None)
tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp7, None)
tl.debug_barrier()
tl.store(in_out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp13, None)
@triton.jit
def triton_poi_fused_cat_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 12
x1 = xindex // 12
x2 = xindex
tmp15 = tl.load(in_ptr3 + 0)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (32 + 4 * x1 + x0), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (32 + 4 * x1 + (-4 + x0)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tl.full([1], 12, tl.int64)
tmp14 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp11 & xmask,
eviction_policy='evict_last', other=0.0)
tmp17 = tmp14 / tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp11, tmp17, tmp18)
tmp20 = tl.where(tmp9, tmp10, tmp19)
tmp21 = tl.where(tmp4, tmp5, tmp20)
tl.store(out_ptr0 + x2, tmp21, xmask)
@triton.jit
def triton_poi_fused_cat_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 12
x1 = xindex // 12
x2 = xindex
tmp15 = tl.load(in_ptr3 + 0)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (48 + 4 * x1 + x0), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (48 + 4 * x1 + (-4 + x0)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tl.full([1], 12, tl.int64)
tmp14 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp11 & xmask,
eviction_policy='evict_last', other=0.0)
tmp17 = tmp14 / tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp11, tmp17, tmp18)
tmp20 = tl.where(tmp9, tmp10, tmp19)
tmp21 = tl.where(tmp4, tmp5, tmp20)
tl.store(out_ptr0 + x2, tmp21, xmask)
@triton.jit
def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_stack_11(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1), tmp4 & xmask, other=0.0)
tmp6 = tl.sigmoid(tmp5)
tmp7 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp4 & xmask, other=0.0)
tmp8 = tmp6 * tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp4, tmp8, tmp9)
tmp11 = tmp0 >= tmp3
tmp12 = tl.full([1], 8, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + (x0 + 4 * (-4 + x1)), tmp14 & xmask, other=0.0)
tmp16 = tl.sigmoid(tmp15)
tmp17 = tl.load(in_ptr3 + (x0 + 4 * (-4 + x1)), tmp14 & xmask, other=0.0)
tmp18 = tmp16 * tmp17
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp14, tmp18, tmp19)
tmp21 = tmp0 >= tmp12
tmp22 = tl.full([1], 12, tl.int64)
tmp23 = tmp0 < tmp22
tmp24 = tmp21 & tmp23
tmp25 = tl.load(in_ptr4 + (x0 + 4 * (-8 + x1)), tmp24 & xmask, other=0.0)
tmp26 = tl.sigmoid(tmp25)
tmp27 = tl.load(in_ptr5 + (x0 + 4 * (-8 + x1)), tmp24 & xmask, other=0.0)
tmp28 = tmp26 * tmp27
tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp30 = tl.where(tmp24, tmp28, tmp29)
tmp31 = tmp0 >= tmp22
tl.full([1], 16, tl.int64)
tmp34 = tl.load(in_ptr6 + (x0 + 4 * (-12 + x1)), tmp31 & xmask, other=0.0)
tmp35 = tl.sigmoid(tmp34)
tmp36 = tl.load(in_ptr7 + (x0 + 4 * (-12 + x1)), tmp31 & xmask, other=0.0)
tmp37 = tmp35 * tmp36
tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype)
tmp39 = tl.where(tmp31, tmp37, tmp38)
tmp40 = tl.where(tmp24, tmp30, tmp39)
tmp41 = tl.where(tmp14, tmp20, tmp40)
tmp42 = tl.where(tmp4, tmp10, tmp41)
tmp43 = tl.load(in_ptr8 + (x0 + 4 * x1), tmp4 & xmask, other=0.0)
tmp44 = tl.load(in_ptr9 + (x0 + 4 * (-4 + x1)), tmp14 & xmask, other=0.0)
tmp45 = tl.load(in_ptr10 + (x0 + 4 * (-8 + x1)), tmp24 & xmask, other=0.0)
tmp46 = 1.0
tmp47 = tmp46 - tmp35
tmp48 = tmp47 * tmp36
tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype)
tmp50 = tl.where(tmp31, tmp48, tmp49)
tmp51 = tl.where(tmp24, tmp45, tmp50)
tmp52 = tl.where(tmp14, tmp44, tmp51)
tmp53 = tl.where(tmp4, tmp43, tmp52)
tl.store(out_ptr0 + x2, tmp42, xmask)
tl.store(out_ptr1 + x2, tmp53, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (16, 12), (12, 1))
assert_size_stride(primals_4, (16,), (1,))
assert_size_stride(primals_5, (16, 12), (12, 1))
assert_size_stride(primals_6, (16,), (1,))
assert_size_stride(primals_7, (4, 12), (12, 1))
assert_size_stride(primals_8, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf4 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
buf3 = reinterpret_tensor(buf4, (4, 4), (12, 1), 8)
get_raw_stream(0)
triton_per_fused_add_div_linalg_vector_norm_new_zeros_0[grid(1)](buf3,
1, 16, XBLOCK=1, num_warps=2, num_stages=1)
buf1 = reinterpret_tensor(buf4, (4, 4), (12, 1), 0)
buf2 = reinterpret_tensor(buf4, (4, 4), (12, 1), 4)
triton_for_fused_1[2, 1, 1](primals_1, primals_2, buf1, buf2,
num_warps=8, num_stages=1)
buf5 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_4, buf4, reinterpret_tensor(primals_3,
(12, 16), (1, 12), 0), alpha=1, beta=1, out=buf5)
buf6 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_6, buf4, reinterpret_tensor(primals_5,
(12, 16), (1, 12), 0), alpha=1, beta=1, out=buf6)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7,
(12, 4), (1, 12), 0), alpha=1, beta=1, out=buf7)
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_div_sigmoid_2[grid(64)](buf5, buf8, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf9 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 1, 4), (4, 4,
1), 0), buf8, out=buf9)
buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_new_zeros_3[grid(16)](buf10, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf11 = buf8
del buf8
triton_poi_fused_div_relu_4[grid(64)](buf6, buf11, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf12 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf10, (4, 1, 4), (4, 0, 1),
0), buf11, out=buf12)
buf13 = reinterpret_tensor(buf12, (4, 4), (4, 1), 0)
del buf12
buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf15 = empty_strided_cuda((), (), torch.float32)
buf16 = buf15
del buf15
triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_5[grid(1)](
buf13, buf16, buf9, buf7, buf14, 1, 16, XBLOCK=1, num_warps=2,
num_stages=1)
buf17 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
triton_poi_fused_cat_6[grid(48)](primals_1, primals_2, buf14, buf16,
buf17, 48, XBLOCK=64, num_warps=1, num_stages=1)
buf18 = reinterpret_tensor(buf11, (4, 16), (16, 1), 0)
del buf11
extern_kernels.addmm(primals_4, buf17, reinterpret_tensor(primals_3,
(12, 16), (1, 12), 0), alpha=1, beta=1, out=buf18)
buf19 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_6, buf17, reinterpret_tensor(primals_5,
(12, 16), (1, 12), 0), alpha=1, beta=1, out=buf19)
buf20 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_div_relu_4[grid(64)](buf19, buf20, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf21 = reinterpret_tensor(buf9, (4, 4), (4, 1), 0)
del buf9
extern_kernels.addmm(primals_8, buf17, reinterpret_tensor(primals_7,
(12, 4), (1, 12), 0), alpha=1, beta=1, out=buf21)
buf22 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_div_sigmoid_2[grid(64)](buf18, buf22, 64, XBLOCK=
64, num_warps=1, num_stages=1)
buf23 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 1, 4), (4, 4,
1), 16), buf22, out=buf23)
buf24 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf14, (4, 1, 4), (4, 4, 1),
0), buf20, out=buf24)
buf25 = reinterpret_tensor(buf23, (4, 4), (4, 1), 0)
del buf23
buf26 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf27 = empty_strided_cuda((), (), torch.float32)
buf28 = buf27
del buf27
triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_7[grid(1)](
buf25, buf28, buf24, buf21, buf26, 1, 16, XBLOCK=1, num_warps=2,
num_stages=1)
buf29 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
triton_poi_fused_cat_8[grid(48)](primals_1, primals_2, buf26, buf28,
buf29, 48, XBLOCK=64, num_warps=1, num_stages=1)
buf30 = reinterpret_tensor(buf22, (4, 16), (16, 1), 0)
del buf22
extern_kernels.addmm(primals_4, buf29, reinterpret_tensor(primals_3,
(12, 16), (1, 12), 0), alpha=1, beta=1, out=buf30)
buf31 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_6, buf29, reinterpret_tensor(primals_5,
(12, 16), (1, 12), 0), alpha=1, beta=1, out=buf31)
buf32 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_div_relu_4[grid(64)](buf31, buf32, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf33 = reinterpret_tensor(buf24, (4, 4), (4, 1), 0)
del buf24
extern_kernels.addmm(primals_8, buf29, reinterpret_tensor(primals_7,
(12, 4), (1, 12), 0), alpha=1, beta=1, out=buf33)
buf34 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_div_sigmoid_2[grid(64)](buf30, buf34, 64, XBLOCK=
64, num_warps=1, num_stages=1)
buf35 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 1, 4), (4, 4,
1), 32), buf34, out=buf35)
buf36 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf26, (4, 1, 4), (4, 4, 1),
0), buf32, out=buf36)
buf37 = reinterpret_tensor(buf35, (4, 4), (4, 1), 0)
del buf35
buf38 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf39 = empty_strided_cuda((), (), torch.float32)
buf40 = buf39
del buf39
triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_7[grid(1)](
buf37, buf40, buf36, buf33, buf38, 1, 16, XBLOCK=1, num_warps=2,
num_stages=1)
buf41 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
triton_poi_fused_cat_9[grid(48)](primals_1, primals_2, buf38, buf40,
buf41, 48, XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
buf42 = reinterpret_tensor(buf34, (4, 16), (16, 1), 0)
del buf34
extern_kernels.addmm(primals_4, buf41, reinterpret_tensor(primals_3,
(12, 16), (1, 12), 0), alpha=1, beta=1, out=buf42)
del primals_4
buf43 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_6, buf41, reinterpret_tensor(primals_5,
(12, 16), (1, 12), 0), alpha=1, beta=1, out=buf43)
del primals_6
buf44 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_div_relu_4[grid(64)](buf43, buf44, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf45 = reinterpret_tensor(buf36, (4, 4), (4, 1), 0)
del buf36
extern_kernels.addmm(primals_8, buf41, reinterpret_tensor(primals_7,
(12, 4), (1, 12), 0), alpha=1, beta=1, out=buf45)
del primals_8
buf46 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_div_sigmoid_2[grid(64)](buf42, buf46, 64, XBLOCK=
64, num_warps=1, num_stages=1)
buf47 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 1, 4), (4, 4,
1), 48), buf46, out=buf47)
buf48 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf38, (4, 1, 4), (4, 4, 1),
0), buf44, out=buf48)
buf49 = reinterpret_tensor(buf47, (4, 4), (4, 1), 0)
del buf47
triton_poi_fused_add_10[grid(16)](buf49, buf48, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf48
buf50 = reinterpret_tensor(buf46, (16, 4), (4, 1), 0)
del buf46
buf51 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
triton_poi_fused_stack_11[grid(64)](buf7, buf13, buf21, buf25,
buf33, buf37, buf45, buf49, buf14, buf26, buf38, buf50, buf51,
64, XBLOCK=64, num_warps=1, num_stages=1)
return (reinterpret_tensor(buf50, (4, 4, 4), (16, 4, 1), 0),
reinterpret_tensor(buf51, (4, 4, 4), (16, 4, 1), 0), buf4, buf5,
buf6, buf7, buf13, buf14, buf16, buf17, buf18, buf19, buf20, buf21,
buf25, buf26, buf28, buf29, buf30, buf31, buf32, buf33, buf37,
buf38, buf40, buf41, buf42, buf43, buf44, buf45, buf49,
reinterpret_tensor(primals_1, (4, 4, 1), (4, 1, 4), 48), primals_7,
primals_5, primals_3, reinterpret_tensor(primals_1, (4, 4, 1), (4,
1, 4), 32), reinterpret_tensor(primals_1, (4, 4, 1), (4, 1, 4), 16),
reinterpret_tensor(buf10, (4, 4, 1), (4, 1, 4), 0),
reinterpret_tensor(primals_1, (4, 4, 1), (4, 1, 4), 0))
class _Gate(nn.Module):
"""Utility class to implement a standard sigmoid gate"""
def __init__(self, in_features: 'int', out_features: 'int'):
super(_Gate, self).__init__()
self.fc = nn.Linear(in_features=in_features, out_features=out_features)
self._reset_parameters()
def _reset_parameters(self):
nn.init.orthogonal_(self.fc.weight)
nn.init.zeros_(self.fc.bias)
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
"""Perform forward pass through the normalised gate"""
return torch.sigmoid(self.fc(x))
class _NormalizedGate(nn.Module):
"""Utility class to implement a gate with normalised activation function"""
def __init__(self, in_features: 'int', out_shape: 'Tuple[int, int]',
normalizer: 'str'):
super(_NormalizedGate, self).__init__()
self.fc = nn.Linear(in_features=in_features, out_features=out_shape
[0] * out_shape[1])
self.out_shape = out_shape
if normalizer == 'normalized_sigmoid':
self.activation = nn.Sigmoid()
elif normalizer == 'normalized_relu':
self.activation = nn.ReLU()
else:
raise ValueError(
f"Unknown normalizer {normalizer}. Must be one of {'normalized_sigmoid', 'normalized_relu'}"
)
self._reset_parameters()
def _reset_parameters(self):
nn.init.orthogonal_(self.fc.weight)
nn.init.zeros_(self.fc.bias)
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
"""Perform forward pass through the normalized gate"""
h = self.fc(x).view(-1, *self.out_shape)
return torch.nn.functional.normalize(self.activation(h), p=1, dim=-1)
class _MCLSTMCellNew(nn.Module):
"""The logic of the MC-LSTM cell"""
def __init__(self, mass_input_size: 'int', aux_input_size: 'int',
hidden_size: 'int', cfg: 'Config'):
super(_MCLSTMCellNew, self).__init__()
self.cfg = cfg
self._hidden_size = hidden_size
gate_inputs = aux_input_size + hidden_size + mass_input_size
self.output_gate = _Gate(in_features=gate_inputs, out_features=
hidden_size)
self.input_gate = _NormalizedGate(in_features=gate_inputs,
out_shape=(mass_input_size, hidden_size), normalizer=
'normalized_sigmoid')
self.redistribution = _NormalizedGate(in_features=gate_inputs,
out_shape=(hidden_size, hidden_size), normalizer='normalized_relu')
self._reset_parameters()
def _reset_parameters(self):
if self.cfg.initial_forget_bias is not None:
nn.init.constant_(self.output_gate.fc.bias, val=self.cfg.
initial_forget_bias)
def _step(self, xt_m, xt_a, c):
""" Make a single time step in the MCLSTM. """
features = torch.cat([xt_m, xt_a, c / (c.norm(1) + 1e-05)], dim=-1)
i = self.input_gate(features)
r = self.redistribution(features)
o = self.output_gate(features)
m_in = torch.matmul(xt_m.unsqueeze(-2), i).squeeze(-2)
m_sys = torch.matmul(c.unsqueeze(-2), r).squeeze(-2)
m_new = m_in + m_sys
return o * m_new, (1 - o) * m_new
def forward(self, input_0, input_1):
primals_7 = self.output_gate.fc.weight
primals_8 = self.output_gate.fc.bias
primals_3 = self.input_gate.fc.weight
primals_4 = self.input_gate.fc.bias
primals_5 = self.redistribution.fc.weight
primals_6 = self.redistribution.fc.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0], output[1]
|
rro2q2/transfer-learning-aaai21
|
_MCLSTMCell
| false
| 10,952
|
[
"BSD-3-Clause"
] | 0
|
f1960540d0608ce1e4d1d64bb4abd29d953f250f
|
https://github.com/rro2q2/transfer-learning-aaai21/tree/f1960540d0608ce1e4d1d64bb4abd29d953f250f
|
SoftTargetCrossEntropy
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.nn.functional as F
class SoftTargetCrossEntropy(nn.Module):
"""
The native CE loss with soft target
input: x is output of model, target is ground truth
return: loss
"""
def __init__(self):
super(SoftTargetCrossEntropy, self).__init__()
def forward(self, x, target):
N_rep = x.shape[0]
N = target.shape[0]
if not N == N_rep:
target = target.repeat(N_rep // N, 1)
loss = torch.sum(-target * F.log_softmax(x, dim=-1), dim=-1)
return loss.mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_per_fused__log_softmax_mean_mul_neg_sum_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp1 = -tmp0
tmp3 = tl_math.exp(tmp2)
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp3 + tmp5
tmp8 = tl_math.exp(tmp7)
tmp9 = tmp6 + tmp8
tmp11 = tl_math.exp(tmp10)
tmp12 = tmp9 + tmp11
tmp13 = tl_math.log(tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp1 * tmp14
tmp17 = -tmp16
tmp18 = tmp4 - tmp13
tmp19 = tmp17 * tmp18
tmp20 = tmp15 + tmp19
tmp22 = -tmp21
tmp23 = tmp7 - tmp13
tmp24 = tmp22 * tmp23
tmp25 = tmp20 + tmp24
tmp27 = -tmp26
tmp28 = tmp10 - tmp13
tmp29 = tmp27 * tmp28
tmp30 = tmp25 + tmp29
tmp31 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK])
tmp33 = tl.sum(tmp31, 1)[:, None]
tmp34 = 64.0
tmp35 = tmp33 / tmp34
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp35, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
triton_per_fused__log_softmax_mean_mul_neg_sum_1[grid(1)](buf3,
arg1_1, buf0, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg1_1
del buf0
return buf3,
class SoftTargetCrossEntropyNew(nn.Module):
"""
The native CE loss with soft target
input: x is output of model, target is ground truth
return: loss
"""
def __init__(self):
super(SoftTargetCrossEntropyNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
xuewengeophysics/volo
|
SoftTargetCrossEntropy
| false
| 10,953
|
[
"Apache-2.0"
] | 0
|
411f367c617b556fd0df450e7844e57541695c4d
|
https://github.com/xuewengeophysics/volo/tree/411f367c617b556fd0df450e7844e57541695c4d
|
Discriminator
|
import torch
import torch.nn as nn
class Discriminator(nn.Module):
def __init__(self, n_h):
super().__init__()
self.f_k = nn.Bilinear(n_h, n_h, 1)
for m in self.modules():
self.weights_init(m)
def weights_init(self, m):
if isinstance(m, nn.Bilinear):
torch.nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
def forward(self, c, h_pl, h_mi, s_bias1=None, s_bias2=None):
c_x = c
sc_1 = torch.squeeze(self.f_k(h_pl, c_x))
sc_2 = torch.squeeze(self.f_k(h_mi, c_x))
if s_bias1 is not None:
sc_1 += s_bias1
if s_bias2 is not None:
sc_2 += s_bias2
logits = torch.cat((sc_1, sc_2), 1).squeeze(-1)
v = logits.shape[1]
return logits, logits[:, :v // 2]
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_h': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 8
x0 = xindex % 4
x2 = xindex // 32
x3 = xindex
tmp6 = tl.load(in_ptr1 + 0)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp4 & xmask, other=0.0)
tmp8 = tmp5 + tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp4, tmp8, tmp9)
tmp11 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp14 = tl.load(in_ptr2 + (x0 + 4 * (-4 + x1) + 16 * x2), tmp11 & xmask,
other=0.0)
tmp15 = tmp14 + tmp7
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp11, tmp15, tmp16)
tmp18 = tl.where(tmp4, tmp10, tmp17)
tl.store(out_ptr0 + x3, tmp18, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (1,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = torch.ops.aten._trilinear.default(reinterpret_tensor(
primals_4, (64, 4), (4, 1), 0), primals_2, reinterpret_tensor(
primals_1, (64, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3])
buf1 = buf0
del buf0
buf2 = torch.ops.aten._trilinear.default(reinterpret_tensor(
primals_5, (64, 4), (4, 1), 0), primals_2, reinterpret_tensor(
primals_1, (64, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3])
del primals_2
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(128)](buf1, primals_3, buf3, buf4, 128,
XBLOCK=128, num_warps=4, num_stages=1)
del buf1
del buf3
del primals_3
return buf4, reinterpret_tensor(buf4, (4, 4, 4), (32, 4, 1), 0
), buf4, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_5, (64, 4), (4, 1), 0)
class DiscriminatorNew(nn.Module):
def __init__(self, n_h):
super().__init__()
self.f_k = nn.Bilinear(n_h, n_h, 1)
for m in self.modules():
self.weights_init(m)
def weights_init(self, m):
if isinstance(m, nn.Bilinear):
torch.nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
def forward(self, input_0, input_1, input_2):
primals_2 = self.f_k.weight
primals_3 = self.f_k.bias
primals_1 = input_0
primals_4 = input_1
primals_5 = input_2
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1]
|
usherbob/dgcnn.pytorch
|
Discriminator
| false
| 10,954
|
[
"MIT"
] | 0
|
fdf5f7a470123b292ac7642f65fd4f693d9b010d
|
https://github.com/usherbob/dgcnn.pytorch/tree/fdf5f7a470123b292ac7642f65fd4f693d9b010d
|
AttentionLayer
|
import torch
import numpy as np
import torch.nn as nn
def init_xavier_normal(tensor):
param = nn.Parameter(tensor)
nn.init.xavier_normal_(param)
return param
class AttentionLayer(nn.Module):
def __init__(self, input_dim, hidden_dim=64, n_heads=3, dropout=0.5):
super(AttentionLayer, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.n_heads = n_heads
self.weight = init_xavier_normal(torch.FloatTensor(n_heads,
input_dim, hidden_dim))
self.softmax = nn.Softmax(dim=-1)
self.dropout = nn.Dropout(dropout)
self.linear = nn.Linear(n_heads * hidden_dim, input_dim)
self.norm = nn.LayerNorm(input_dim)
self.output_dim = input_dim
def forward(self, input_):
input_size = input_.size(0)
logits = input_.repeat(self.n_heads, 1, 1).view(self.n_heads, -1,
self.input_dim)
logits = torch.bmm(logits, self.weight).view(input_size * self.
n_heads, -1, self.hidden_dim)
attn = torch.bmm(logits, logits.transpose(1, 2)) / np.sqrt(self.
hidden_dim)
attn = self.softmax(attn)
outputs = torch.bmm(attn, logits)
outputs = torch.split(outputs, input_size, dim=0)
outputs = torch.cat(outputs, dim=-1)
outputs = self.linear(outputs)
outputs = self.dropout(outputs)
return self.norm(input_ + outputs), attn
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_repeat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * (x1 % 4)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_bmm_transpose_1(in_ptr0, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 16
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * (x1 % 4) + 16 * ((x0 + 4 * x1) // 16
) + 64 * x2), xmask)
tl.store(out_ptr0 + x3, tmp0, xmask)
tl.store(out_ptr1 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused__softmax_sqrt_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp8 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = tl.full([1], 8.0, tl.float64)
tmp2 = tl.full([1], 0.0, tl.float64)
tmp3 = tmp1 >= tmp2
tmp4 = 1.0
tmp5 = -1.0
tmp6 = tl.where(tmp3, tmp4, tmp5)
tmp7 = tmp0 * tmp6
tmp9 = tmp8 * tmp6
tmp11 = tmp10 * tmp6
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp14 = tmp13 * tmp6
tmp15 = triton_helpers.maximum(tmp12, tmp14)
tmp17 = tmp16 * tmp6
tmp18 = triton_helpers.maximum(tmp15, tmp17)
tmp19 = tmp7 - tmp18
tmp20 = tmp6.to(tl.float64)
tmp21 = tmp20 * tmp1
tmp22 = tmp21.to(tl.float32)
tmp23 = tmp19 / tmp22
tmp24 = tl_math.exp(tmp23)
tl.store(out_ptr0 + x2, tmp24, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_cat_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 3072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 192
x1 = xindex // 192
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (64 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 128, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + (1024 + 64 * x1 + (-64 + x0)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tl.full([1], 192, tl.int64)
tmp14 = tl.load(in_ptr0 + (2048 + 64 * x1 + (-128 + x0)), tmp11 & xmask,
eviction_policy='evict_last', other=0.0)
tmp15 = tl.where(tmp9, tmp10, tmp14)
tmp16 = tl.where(tmp4, tmp5, tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_add_5(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_6(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (3, 4, 64), (256, 64, 1))
assert_size_stride(primals_3, (4, 192), (192, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((12, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_repeat_0[grid(192)](primals_1, buf0, 192, XBLOCK=
128, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((3, 16, 4), (64, 4, 1), torch.float32)
buf13 = empty_strided_cuda((3, 4, 16), (64, 1, 4), torch.float32)
triton_poi_fused_bmm_transpose_1[grid(192)](buf0, buf1, buf13, 192,
XBLOCK=128, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((3, 16, 64), (1024, 64, 1), torch.float32)
extern_kernels.bmm(buf1, primals_2, out=buf2)
del primals_2
buf3 = reinterpret_tensor(buf1, (12, 4, 4), (16, 4, 1), 0)
del buf1
extern_kernels.bmm(reinterpret_tensor(buf2, (12, 4, 64), (256, 64,
1), 0), reinterpret_tensor(buf2, (12, 64, 4), (256, 1, 64), 0),
out=buf3)
buf4 = buf0
del buf0
triton_poi_fused__softmax_sqrt_2[grid(192)](buf3, buf4, 192, XBLOCK
=128, num_warps=4, num_stages=1)
buf5 = buf3
del buf3
triton_poi_fused__softmax_3[grid(192)](buf4, buf5, 192, XBLOCK=256,
num_warps=4, num_stages=1)
del buf4
buf6 = empty_strided_cuda((12, 4, 64), (256, 64, 1), torch.float32)
extern_kernels.bmm(buf5, reinterpret_tensor(buf2, (12, 4, 64), (256,
64, 1), 0), out=buf6)
buf7 = empty_strided_cuda((4, 4, 192), (768, 192, 1), torch.float32)
triton_poi_fused_cat_4[grid(3072)](buf6, buf7, 3072, XBLOCK=128,
num_warps=4, num_stages=1)
del buf6
buf8 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf7, (16, 192), (192, 1), 0),
reinterpret_tensor(primals_3, (192, 4), (1, 192), 0), out=buf8)
buf9 = reinterpret_tensor(buf8, (4, 4, 4), (16, 4, 1), 0)
del buf8
triton_poi_fused_add_5[grid(64)](buf9, primals_1, primals_4, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_1
del primals_4
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_native_layer_norm_6[grid(16)](buf9, buf10, buf11,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_7[grid(64)](buf9, buf10, buf11,
primals_5, primals_6, buf12, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf10
del buf11
del primals_6
return buf12, buf5, primals_5, reinterpret_tensor(buf2, (12, 64, 4), (
256, 1, 64), 0), buf5, reinterpret_tensor(buf7, (16, 192), (192, 1), 0
), buf9, primals_3, buf13
def init_xavier_normal(tensor):
param = nn.Parameter(tensor)
nn.init.xavier_normal_(param)
return param
class AttentionLayerNew(nn.Module):
def __init__(self, input_dim, hidden_dim=64, n_heads=3, dropout=0.5):
super(AttentionLayerNew, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.n_heads = n_heads
self.weight = init_xavier_normal(torch.FloatTensor(n_heads,
input_dim, hidden_dim))
self.softmax = nn.Softmax(dim=-1)
self.dropout = nn.Dropout(dropout)
self.linear = nn.Linear(n_heads * hidden_dim, input_dim)
self.norm = nn.LayerNorm(input_dim)
self.output_dim = input_dim
def forward(self, input_0):
primals_2 = self.weight
primals_3 = self.linear.weight
primals_4 = self.linear.bias
primals_5 = self.norm.weight
primals_6 = self.norm.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0], output[1]
|
vietbt/ViTextnormASR
|
AttentionLayer
| false
| 10,955
|
[
"Apache-2.0"
] | 0
|
57444aa7247c67b2628d1802e9ed53dae4857ee4
|
https://github.com/vietbt/ViTextnormASR/tree/57444aa7247c67b2628d1802e9ed53dae4857ee4
|
DiscrimNet
|
import torch
import torch.nn as nn
from torch.nn.init import kaiming_uniform_
def weight_init(m):
if m.__class__.__name__ == 'Linear':
m.weight.data.copy_(kaiming_uniform_(m.weight.data))
m.bias.data.fill_(0)
class DiscrimNet(nn.Module):
def __init__(self, ob_space, ac_space, h1=32, h2=32):
nn.Module.__init__(self)
self.fc1 = nn.Linear(ob_space.shape[0] + ac_space.shape[0], h1)
self.fc2 = nn.Linear(h1, h2)
self.output_layer = nn.Linear(h2, 1)
self.apply(weight_init)
def forward(self, ob, ac):
h = torch.tanh(self.fc1(torch.cat([ob, ac], dim=1)))
h = torch.tanh(self.fc2(h))
return self.output_layer(h)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'ob_space': torch.rand([4, 4]), 'ac_space': torch.rand([4,
4])}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from torch.nn.init import kaiming_uniform_
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (32, 8), (8, 1))
assert_size_stride(primals_4, (32,), (1,))
assert_size_stride(primals_5, (32, 32), (32, 1))
assert_size_stride(primals_6, (32,), (1,))
assert_size_stride(primals_7, (1, 32), (32, 1))
assert_size_stride(primals_8, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 32), (32, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 32), (1,
8), 0), out=buf1)
del primals_3
buf2 = buf1
del buf1
triton_poi_fused_tanh_1[grid(128)](buf2, primals_4, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 32), (32, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (32, 32), (1,
32), 0), out=buf3)
buf4 = buf3
del buf3
triton_poi_fused_tanh_1[grid(128)](buf4, primals_6, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_6
buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7,
(32, 1), (1, 32), 0), alpha=1, beta=1, out=buf6)
del primals_8
return buf6, buf0, buf2, buf4, primals_7, primals_5
def weight_init(m):
if m.__class__.__name__ == 'Linear':
m.weight.data.copy_(kaiming_uniform_(m.weight.data))
m.bias.data.fill_(0)
class DiscrimNetNew(nn.Module):
def __init__(self, ob_space, ac_space, h1=32, h2=32):
nn.Module.__init__(self)
self.fc1 = nn.Linear(ob_space.shape[0] + ac_space.shape[0], h1)
self.fc2 = nn.Linear(h1, h2)
self.output_layer = nn.Linear(h2, 1)
self.apply(weight_init)
def forward(self, input_0, input_1):
primals_3 = self.fc1.weight
primals_4 = self.fc1.bias
primals_5 = self.fc2.weight
primals_6 = self.fc2.bias
primals_7 = self.output_layer.weight
primals_8 = self.output_layer.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
|
ven-kyoshiro/PILCO-1
|
DiscrimNet
| false
| 10,956
|
[
"MIT"
] | 0
|
61c4ef18a6bbecbeb6a10784a7925d31f46dd23b
|
https://github.com/ven-kyoshiro/PILCO-1/tree/61c4ef18a6bbecbeb6a10784a7925d31f46dd23b
|
Transformer
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Transformer(nn.Module):
def __init__(self, input_size):
super(Transformer, self).__init__()
self.fc1 = nn.Linear(input_size, 256)
self.fc2 = nn.Linear(256, 512)
self.parametrized_layers = [self.fc1, self.fc2]
def forward(self, x):
out = F.relu(self.fc1(x))
out = self.fc2(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (512, 256), (256, 1))
assert_size_stride(primals_5, (512,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf1,
primals_2, buf3, 16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 512), (512, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 256),
(256, 1), 0), reinterpret_tensor(primals_4, (256, 512), (1, 256
), 0), alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 512), (8192, 2048, 512, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 256), (256, 1), 0), primals_4, buf3
class TransformerNew(nn.Module):
def __init__(self, input_size):
super(TransformerNew, self).__init__()
self.fc1 = nn.Linear(input_size, 256)
self.fc2 = nn.Linear(256, 512)
self.parametrized_layers = [self.fc1, self.fc2]
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
xuewanqi/RestoreNet
|
Transformer
| false
| 10,957
|
[
"Apache-2.0"
] | 0
|
fc313dc36965c2fab2c4cea9bf1227de75319439
|
https://github.com/xuewanqi/RestoreNet/tree/fc313dc36965c2fab2c4cea9bf1227de75319439
|
LinearAdd
|
import torch
from torch import nn
import torch.cuda
import torch.backends.cudnn
import torch.backends.mkl
import torch.backends.cuda
import torch.backends.quantized
class LinearAdd(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(LinearAdd, self).__init__()
seed = 2018
torch.manual_seed(seed)
self.linear = nn.Linear(in_channels, out_channels, **kwargs)
def forward(self, x):
return torch.add(self.linear(x), self.linear(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.cuda
import torch.backends.cudnn
import torch.backends.mkl
import torch.backends.cuda
import torch.backends.quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tmp2 + tmp2
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](buf1, primals_2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_2
return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0)
class LinearAddNew(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(LinearAddNew, self).__init__()
seed = 2018
torch.manual_seed(seed)
self.linear = nn.Linear(in_channels, out_channels, **kwargs)
def forward(self, input_0):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
yangw1234/intel-extension-for-pytorch
|
LinearAdd
| false
| 10,958
|
[
"Apache-2.0"
] | 0
|
571e31578605ab3999dcebbb4d66a0ee2253a464
|
https://github.com/yangw1234/intel-extension-for-pytorch/tree/571e31578605ab3999dcebbb4d66a0ee2253a464
|
KnowledgeDistillationKLDivLoss
|
import functools
import torch
import torch.nn as nn
import torch.nn.functional as F
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
loss = loss * weight
if avg_factor is None:
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
loss = loss.sum() / avg_factor
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
``loss_func(pred, target, **kwargs)``. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like ``loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)``.
:Example:
>>> import torch
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, avg_factor=2)
tensor(1.5000)
"""
@functools.wraps(loss_func)
def wrapper(pred, target, weight=None, reduction='mean', avg_factor=
None, **kwargs):
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper
@weighted_loss
def knowledge_distillation_kl_div_loss(pred, soft_label, T, detach_target=True
):
"""Loss function for knowledge distilling using KL divergence.
Args:
pred (Tensor): Predicted logits with shape (N, n + 1).
soft_label (Tensor): Target logits with shape (N, N + 1).
T (int): Temperature for distillation.
detach_target (bool): Remove soft_label from automatic differentiation
Returns:
torch.Tensor: Loss tensor with shape (N,).
"""
assert pred.size() == soft_label.size()
target = F.softmax(soft_label / T, dim=1)
if detach_target:
target = target.detach()
kd_loss = F.kl_div(F.log_softmax(pred / T, dim=1), target, reduction='none'
).mean(1) * (T * T)
return kd_loss
class KnowledgeDistillationKLDivLoss(nn.Module):
"""Loss function for knowledge distilling using KL divergence.
Args:
reduction (str): Options are `'none'`, `'mean'` and `'sum'`.
loss_weight (float): Loss weight of current loss.
T (int): Temperature for distillation.
"""
def __init__(self, reduction='mean', loss_weight=1.0, T=10):
super(KnowledgeDistillationKLDivLoss, self).__init__()
assert T >= 1
self.reduction = reduction
self.loss_weight = loss_weight
self.T = T
def forward(self, pred, soft_label, weight=None, avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (Tensor): Predicted logits with shape (N, n + 1).
soft_label (Tensor): Target logits with shape (N, N + 1).
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (reduction_override if reduction_override else self.
reduction)
loss_kd = self.loss_weight * knowledge_distillation_kl_div_loss(pred,
soft_label, weight, reduction=reduction, avg_factor=avg_factor,
T=self.T)
return loss_kd
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import functools
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.1
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x3, tmp17, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.1
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + x3, tmp16, xmask)
@triton.jit
def triton_per_fused__log_softmax_mean_mul_sub_xlogy_3(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp9 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp11 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp14 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp17 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp24 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp35 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp46 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp1 = libdevice.isnan(tmp0).to(tl.int1)
tmp2 = 0.0
tmp3 = tmp0 == tmp2
tmp4 = tl_math.log(tmp0)
tmp5 = tmp0 * tmp4
tmp6 = tl.where(tmp3, tmp2, tmp5)
tmp7 = float('nan')
tmp8 = tl.where(tmp1, tmp7, tmp6)
tmp10 = tl_math.exp(tmp9)
tmp12 = tl_math.exp(tmp11)
tmp13 = tmp10 + tmp12
tmp15 = tl_math.exp(tmp14)
tmp16 = tmp13 + tmp15
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp16 + tmp18
tmp20 = tl_math.log(tmp19)
tmp21 = tmp9 - tmp20
tmp22 = tmp0 * tmp21
tmp23 = tmp8 - tmp22
tmp25 = libdevice.isnan(tmp24).to(tl.int1)
tmp26 = tmp24 == tmp2
tmp27 = tl_math.log(tmp24)
tmp28 = tmp24 * tmp27
tmp29 = tl.where(tmp26, tmp2, tmp28)
tmp30 = tl.where(tmp25, tmp7, tmp29)
tmp31 = tmp11 - tmp20
tmp32 = tmp24 * tmp31
tmp33 = tmp30 - tmp32
tmp34 = tmp23 + tmp33
tmp36 = libdevice.isnan(tmp35).to(tl.int1)
tmp37 = tmp35 == tmp2
tmp38 = tl_math.log(tmp35)
tmp39 = tmp35 * tmp38
tmp40 = tl.where(tmp37, tmp2, tmp39)
tmp41 = tl.where(tmp36, tmp7, tmp40)
tmp42 = tmp14 - tmp20
tmp43 = tmp35 * tmp42
tmp44 = tmp41 - tmp43
tmp45 = tmp34 + tmp44
tmp47 = libdevice.isnan(tmp46).to(tl.int1)
tmp48 = tmp46 == tmp2
tmp49 = tl_math.log(tmp46)
tmp50 = tmp46 * tmp49
tmp51 = tl.where(tmp48, tmp2, tmp50)
tmp52 = tl.where(tmp47, tmp7, tmp51)
tmp53 = tmp17 - tmp20
tmp54 = tmp46 * tmp53
tmp55 = tmp52 - tmp54
tmp56 = tmp45 + tmp55
tmp57 = 4.0
tmp58 = tmp56 / tmp57
tmp59 = 100.0
tmp60 = tmp58 * tmp59
tmp61 = tl.broadcast_to(tmp60, [XBLOCK, RBLOCK])
tmp63 = tl.sum(tmp61, 1)[:, None]
tmp64 = 64.0
tmp65 = tmp63 / tmp64
tmp66 = 1.0
tmp67 = tmp65 * tmp66
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp67, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf0, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf2 = buf0
del buf0
triton_poi_fused_2[grid(256)](arg0_1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
buf4 = empty_strided_cuda((), (), torch.float32)
buf5 = buf4
del buf4
triton_per_fused__log_softmax_mean_mul_sub_xlogy_3[grid(1)](buf5,
buf1, buf2, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del buf1
del buf2
return buf5,
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
loss = loss * weight
if avg_factor is None:
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
loss = loss.sum() / avg_factor
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
``loss_func(pred, target, **kwargs)``. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like ``loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)``.
:Example:
>>> import torch
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, avg_factor=2)
tensor(1.5000)
"""
@functools.wraps(loss_func)
def wrapper(pred, target, weight=None, reduction='mean', avg_factor=
None, **kwargs):
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper
@weighted_loss
def knowledge_distillation_kl_div_loss(pred, soft_label, T, detach_target=True
):
"""Loss function for knowledge distilling using KL divergence.
Args:
pred (Tensor): Predicted logits with shape (N, n + 1).
soft_label (Tensor): Target logits with shape (N, N + 1).
T (int): Temperature for distillation.
detach_target (bool): Remove soft_label from automatic differentiation
Returns:
torch.Tensor: Loss tensor with shape (N,).
"""
assert pred.size() == soft_label.size()
target = F.softmax(soft_label / T, dim=1)
if detach_target:
target = target.detach()
kd_loss = F.kl_div(F.log_softmax(pred / T, dim=1), target, reduction='none'
).mean(1) * (T * T)
return kd_loss
class KnowledgeDistillationKLDivLossNew(nn.Module):
"""Loss function for knowledge distilling using KL divergence.
Args:
reduction (str): Options are `'none'`, `'mean'` and `'sum'`.
loss_weight (float): Loss weight of current loss.
T (int): Temperature for distillation.
"""
def __init__(self, reduction='mean', loss_weight=1.0, T=10):
super(KnowledgeDistillationKLDivLossNew, self).__init__()
assert T >= 1
self.reduction = reduction
self.loss_weight = loss_weight
self.T = T
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
xiangn95/mmclassification
|
KnowledgeDistillationKLDivLoss
| false
| 10,959
|
[
"Apache-2.0"
] | 0
|
3a3307cd222fe5156a703cf5573e54dbb6692b10
|
https://github.com/xiangn95/mmclassification/tree/3a3307cd222fe5156a703cf5573e54dbb6692b10
|
VNet
|
import torch
import torch.nn as nn
from torch.nn.init import kaiming_uniform_
import torch.nn.functional as F
def weight_init(m):
if m.__class__.__name__ == 'Linear':
m.weight.data.copy_(kaiming_uniform_(m.weight.data))
m.bias.data.fill_(0)
class VNet(nn.Module):
def __init__(self, ob_space, h1=200, h2=100):
super(VNet, self).__init__()
self.fc1 = nn.Linear(ob_space.shape[0], h1)
self.fc2 = nn.Linear(h1, h2)
self.output_layer = nn.Linear(h2, 1)
self.apply(weight_init)
def forward(self, ob):
h = F.relu(self.fc1(ob))
h = F.relu(self.fc2(h))
return self.output_layer(h)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'ob_space': torch.rand([4, 4])}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from torch.nn.init import kaiming_uniform_
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 12800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 200
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 100
x2 = xindex % 1600
x3 = xindex // 1600
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x2 + 1664 * x3), tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (200, 4), (4, 1))
assert_size_stride(primals_2, (200,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (100, 200), (200, 1))
assert_size_stride(primals_5, (100,), (1,))
assert_size_stride(primals_6, (1, 100), (100, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 200), (200, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 200), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 200), (3200, 800, 200, 1), 0)
del buf0
buf7 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(12800)](buf1,
primals_2, buf7, 12800, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 100), (100, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 200), (200, 1), 0),
reinterpret_tensor(primals_4, (200, 100), (1, 200), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 100), (1600, 400, 100, 1), 0)
del buf2
buf6 = empty_strided_cuda((4, 4, 4, 100), (1664, 400, 100, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(6400)](buf3,
primals_5, buf6, 6400, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 100),
(100, 1), 0), reinterpret_tensor(primals_6, (100, 1), (1, 100),
0), alpha=1, beta=1, out=buf5)
del primals_7
return reinterpret_tensor(buf5, (4, 4, 4, 1), (16, 4, 1, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 200), (200, 1), 0
), reinterpret_tensor(buf3, (64, 100), (100, 1), 0
), primals_6, buf6, primals_4, buf7
def weight_init(m):
if m.__class__.__name__ == 'Linear':
m.weight.data.copy_(kaiming_uniform_(m.weight.data))
m.bias.data.fill_(0)
class VNetNew(nn.Module):
def __init__(self, ob_space, h1=200, h2=100):
super(VNetNew, self).__init__()
self.fc1 = nn.Linear(ob_space.shape[0], h1)
self.fc2 = nn.Linear(h1, h2)
self.output_layer = nn.Linear(h2, 1)
self.apply(weight_init)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.output_layer.weight
primals_7 = self.output_layer.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
ven-kyoshiro/PILCO-1
|
VNet
| false
| 10,960
|
[
"MIT"
] | 0
|
61c4ef18a6bbecbeb6a10784a7925d31f46dd23b
|
https://github.com/ven-kyoshiro/PILCO-1/tree/61c4ef18a6bbecbeb6a10784a7925d31f46dd23b
|
BinaryLinear
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class LearnableBias(nn.Module):
def __init__(self, out_chn):
super(LearnableBias, self).__init__()
self.bias = nn.Parameter(torch.zeros(out_chn), requires_grad=True)
def forward(self, x):
out = x + self.bias.expand_as(x)
return out
class BinaryLinear(nn.Module):
def __init__(self, in_chn, out_chn, bias=False):
super(BinaryLinear, self).__init__()
self.shape = out_chn, in_chn
self.weight = nn.Parameter(torch.rand(self.shape) * 0.001,
requires_grad=True)
self.bias = None
if bias:
self.bias = LearnableBias(out_chn)
def forward(self, x):
real_weights = self.weight
scaling_factor = torch.mean(abs(real_weights), dim=1, keepdim=True)
scaling_factor = scaling_factor.detach()
binary_weights_no_grad = scaling_factor * torch.sign(real_weights)
cliped_weights = torch.clamp(real_weights, -1.0, 1.0)
binary_weights = binary_weights_no_grad.detach(
) - cliped_weights.detach() + cliped_weights
y = F.linear(x, binary_weights)
if self.bias:
y = self.bias(y)
return y
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_chn': 4, 'out_chn': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_abs_add_clamp_ge_le_logical_and_mean_mul_sign_sub_0(
in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl_math.abs(tmp0)
tmp3 = tl_math.abs(tmp2)
tmp4 = tmp1 + tmp3
tmp6 = tl_math.abs(tmp5)
tmp7 = tmp4 + tmp6
tmp9 = tl_math.abs(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = 4.0
tmp12 = tmp10 / tmp11
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = tmp14 < tmp13
tmp16 = tmp15.to(tl.int8)
tmp17 = tmp13 < tmp14
tmp18 = tmp17.to(tl.int8)
tmp19 = tmp16 - tmp18
tmp20 = tmp19.to(tmp13.dtype)
tmp21 = tmp12 * tmp20
tmp22 = -1.0
tmp23 = triton_helpers.maximum(tmp13, tmp22)
tmp24 = 1.0
tmp25 = triton_helpers.minimum(tmp23, tmp24)
tmp26 = tmp21 - tmp25
tmp27 = tmp26 + tmp25
tmp28 = tmp13 >= tmp22
tmp29 = tmp13 <= tmp24
tmp30 = tmp28 & tmp29
tl.store(out_ptr0 + x2, tmp27, xmask)
tl.store(out_ptr1 + x2, tmp30, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_abs_add_clamp_ge_le_logical_and_mean_mul_sign_sub_0[
grid(16)](primals_1, buf0, buf2, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(buf0, (4, 4), (1, 4), 0), out=buf1)
del buf0
return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), buf2
class LearnableBias(nn.Module):
def __init__(self, out_chn):
super(LearnableBias, self).__init__()
self.bias = nn.Parameter(torch.zeros(out_chn), requires_grad=True)
def forward(self, x):
out = x + self.bias.expand_as(x)
return out
class BinaryLinearNew(nn.Module):
def __init__(self, in_chn, out_chn, bias=False):
super(BinaryLinearNew, self).__init__()
self.shape = out_chn, in_chn
self.weight = nn.Parameter(torch.rand(self.shape) * 0.001,
requires_grad=True)
self.bias = None
if bias:
self.bias = LearnableBias(out_chn)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
uzair789/pytorch-retinanet
|
BinaryLinear
| false
| 10,961
|
[
"Apache-2.0"
] | 0
|
cabac159a9877825ef04ab06d3b9a63bdfa4f306
|
https://github.com/uzair789/pytorch-retinanet/tree/cabac159a9877825ef04ab06d3b9a63bdfa4f306
|
ModelNet
|
import torch
import torch.nn as nn
from torch.nn.init import kaiming_uniform_
import torch.nn.functional as F
def weight_init(m):
if m.__class__.__name__ == 'Linear':
m.weight.data.copy_(kaiming_uniform_(m.weight.data))
m.bias.data.fill_(0)
class ModelNet(nn.Module):
def __init__(self, ob_space, ac_space, h1=500, h2=500):
super(ModelNet, self).__init__()
self.fc1 = nn.Linear(ob_space.shape[0] + ac_space.shape[0], h1)
self.fc2 = nn.Linear(h1, h2)
self.output_layer = nn.Linear(h2, ob_space.shape[0])
self.fc1.apply(weight_init)
self.fc2.apply(weight_init)
self.output_layer.apply(weight_init)
def forward(self, ob, ac):
h = torch.cat([ob, ac], dim=-1)
h = F.relu(self.fc1(h))
h = F.relu(self.fc2(h))
return self.output_layer(h)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'ob_space': torch.rand([4, 4]), 'ac_space': torch.rand([4,
4])}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from torch.nn.init import kaiming_uniform_
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 32000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 500
x2 = xindex // 2000
x3 = xindex % 2000
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3 + 2016 * x2), tmp4, xmask)
tl.store(out_ptr1 + (x3 + 2048 * x2), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_view_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 500
x1 = xindex // 500
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 500 * (x1 % 4) + 2016 * (x1 // 4)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (500, 8), (8, 1))
assert_size_stride(primals_4, (500,), (1,))
assert_size_stride(primals_5, (500, 500), (500, 1))
assert_size_stride(primals_6, (500,), (1,))
assert_size_stride(primals_7, (4, 500), (500, 1))
assert_size_stride(primals_8, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 500), (500, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0),
reinterpret_tensor(primals_3, (8, 500), (1, 8), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 4, 500), (8064, 2016, 500, 1),
torch.float32)
buf9 = empty_strided_cuda((4, 4, 4, 500), (8192, 2048, 500, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(32000)](buf1,
primals_4, buf2, buf9, 32000, XBLOCK=256, num_warps=4, num_stages=1
)
del primals_4
buf3 = buf1
del buf1
triton_poi_fused_relu_view_2[grid(32000)](buf2, buf3, 32000, XBLOCK
=128, num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((64, 500), (500, 1), torch.float32)
extern_kernels.mm(buf3, reinterpret_tensor(primals_5, (500, 500), (
1, 500), 0), out=buf4)
buf5 = buf2
del buf2
buf8 = empty_strided_cuda((4, 4, 4, 500), (8192, 2048, 500, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(32000)](buf4,
primals_6, buf5, buf8, 32000, XBLOCK=256, num_warps=4, num_stages=1
)
del primals_6
buf6 = buf4
del buf4
triton_poi_fused_relu_view_2[grid(32000)](buf5, buf6, 32000, XBLOCK
=128, num_warps=4, num_stages=1)
del buf5
buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_8, buf6, reinterpret_tensor(primals_7,
(500, 4), (1, 500), 0), alpha=1, beta=1, out=buf7)
del primals_8
return reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(buf0, (64, 8), (8, 1), 0
), buf3, buf6, primals_7, buf8, primals_5, buf9
def weight_init(m):
if m.__class__.__name__ == 'Linear':
m.weight.data.copy_(kaiming_uniform_(m.weight.data))
m.bias.data.fill_(0)
class ModelNetNew(nn.Module):
def __init__(self, ob_space, ac_space, h1=500, h2=500):
super(ModelNetNew, self).__init__()
self.fc1 = nn.Linear(ob_space.shape[0] + ac_space.shape[0], h1)
self.fc2 = nn.Linear(h1, h2)
self.output_layer = nn.Linear(h2, ob_space.shape[0])
self.fc1.apply(weight_init)
self.fc2.apply(weight_init)
self.output_layer.apply(weight_init)
def forward(self, input_0, input_1):
primals_3 = self.fc1.weight
primals_4 = self.fc1.bias
primals_5 = self.fc2.weight
primals_6 = self.fc2.bias
primals_7 = self.output_layer.weight
primals_8 = self.output_layer.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
|
ven-kyoshiro/PILCO-1
|
ModelNet
| false
| 10,962
|
[
"MIT"
] | 0
|
61c4ef18a6bbecbeb6a10784a7925d31f46dd23b
|
https://github.com/ven-kyoshiro/PILCO-1/tree/61c4ef18a6bbecbeb6a10784a7925d31f46dd23b
|
CausalSelfAttention
|
from _paritybench_helpers import _mock_config
import math
import torch
import torch.nn as nn
from torch.nn import functional as F
class CausalSelfAttention(nn.Module):
"""
A vanilla multi-head masked self-attention layer with a projection at the end.
It is possible to use torch.nn.MultiheadAttention here but I am including an
explicit implementation here to show that there is nothing too scary here.
"""
def __init__(self, config):
super().__init__()
assert config.n_embd % config.n_head == 0
self.key = nn.Linear(config.n_embd, config.n_embd)
self.query = nn.Linear(config.n_embd, config.n_embd)
self.value = nn.Linear(config.n_embd, config.n_embd)
self.attn_drop = nn.Dropout(config.attn_pdrop)
self.resid_drop = nn.Dropout(config.resid_pdrop)
self.proj = nn.Linear(config.n_embd, config.n_embd)
self.n_head = config.n_head
def forward(self, x, layer_past=None):
B, T, C = x.size()
k = self.key(x).view(B, T, self.n_head, C // self.n_head).transpose(
1, 2)
q = self.query(x).view(B, T, self.n_head, C // self.n_head).transpose(
1, 2)
v = self.value(x).view(B, T, self.n_head, C // self.n_head).transpose(
1, 2)
att = q @ k.transpose(-2, -1) * (1.0 / math.sqrt(k.size(-1)))
att = F.softmax(att, dim=-1)
att = self.attn_drop(att)
y = att @ v
y = y.transpose(1, 2).contiguous().view(B, T, C)
y = self.resid_drop(self.proj(y))
return y
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(n_embd=4, n_head=4, attn_pdrop=0.5,
resid_pdrop=0.5)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(16, 4)](buf1, primals_5, buf3, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_5
buf4 = reinterpret_tensor(buf1, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf1
triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_3, buf4, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_3
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf5, buf6, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused__softmax_2[grid(256)](buf6, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf6
buf8 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf0
triton_poi_fused_clone_0[grid(16, 4)](buf2, primals_7, buf8, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_7
buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_3[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0)
del buf9
extern_kernels.addmm(primals_9, reinterpret_tensor(buf10, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf11)
del primals_9
return reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), buf7, reinterpret_tensor(buf10, (16, 4), (4, 1), 0
), primals_8, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0)
class CausalSelfAttentionNew(nn.Module):
"""
A vanilla multi-head masked self-attention layer with a projection at the end.
It is possible to use torch.nn.MultiheadAttention here but I am including an
explicit implementation here to show that there is nothing too scary here.
"""
def __init__(self, config):
super().__init__()
assert config.n_embd % config.n_head == 0
self.key = nn.Linear(config.n_embd, config.n_embd)
self.query = nn.Linear(config.n_embd, config.n_embd)
self.value = nn.Linear(config.n_embd, config.n_embd)
self.attn_drop = nn.Dropout(config.attn_pdrop)
self.resid_drop = nn.Dropout(config.resid_pdrop)
self.proj = nn.Linear(config.n_embd, config.n_embd)
self.n_head = config.n_head
def forward(self, input_0):
primals_2 = self.key.weight
primals_3 = self.key.bias
primals_4 = self.query.weight
primals_5 = self.query.bias
primals_6 = self.value.weight
primals_7 = self.value.bias
primals_8 = self.proj.weight
primals_9 = self.proj.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
wangyanqing7590/DeepLayout
|
CausalSelfAttention
| false
| 10,963
|
[
"Apache-2.0"
] | 0
|
cb181c725007e4e6c9710c4f6a15d246ee3e4f61
|
https://github.com/wangyanqing7590/DeepLayout/tree/cb181c725007e4e6c9710c4f6a15d246ee3e4f61
|
HardBinaryConv
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class HardBinaryConv(nn.Module):
def __init__(self, in_chn, out_chn, kernel_size=3, stride=1, padding=1):
super(HardBinaryConv, self).__init__()
self.stride = stride
self.padding = padding
self.number_of_weights = in_chn * out_chn * kernel_size * kernel_size
self.shape = out_chn, in_chn, kernel_size, kernel_size
self.weights = nn.Parameter(torch.rand((self.number_of_weights, 1)) *
0.001, requires_grad=True)
def forward(self, x):
real_weights = self.weights.view(self.shape)
scaling_factor = torch.mean(torch.mean(torch.mean(abs(real_weights),
dim=3, keepdim=True), dim=2, keepdim=True), dim=1, keepdim=True)
scaling_factor = scaling_factor.detach()
binary_weights_no_grad = scaling_factor * torch.sign(real_weights)
cliped_weights = torch.clamp(real_weights, -1.0, 1.0)
binary_weights = binary_weights_no_grad.detach(
) - cliped_weights.detach() + cliped_weights
y = F.conv2d(x, binary_weights, stride=self.stride, padding=self.
padding)
return y
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_chn': 4, 'out_chn': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_abs_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 9 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 9 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 9 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (3 + 9 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr0 + (4 + 9 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr0 + (5 + 9 * x0), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr0 + (6 + 9 * x0), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr0 + (7 + 9 * x0), xmask, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr0 + (8 + 9 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = tl_math.abs(tmp0)
tmp3 = tl_math.abs(tmp2)
tmp4 = tmp1 + tmp3
tmp6 = tl_math.abs(tmp5)
tmp7 = tmp4 + tmp6
tmp8 = 3.0
tmp9 = tmp7 / tmp8
tmp11 = tl_math.abs(tmp10)
tmp13 = tl_math.abs(tmp12)
tmp14 = tmp11 + tmp13
tmp16 = tl_math.abs(tmp15)
tmp17 = tmp14 + tmp16
tmp18 = tmp17 / tmp8
tmp19 = tmp9 + tmp18
tmp21 = tl_math.abs(tmp20)
tmp23 = tl_math.abs(tmp22)
tmp24 = tmp21 + tmp23
tmp26 = tl_math.abs(tmp25)
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp8
tmp29 = tmp19 + tmp28
tmp30 = tmp29 / tmp8
tl.store(out_ptr0 + x0, tmp30, xmask)
@triton.jit
def triton_poi_fused_add_clamp_ge_le_logical_and_mean_mul_sign_sub_1(in_ptr0,
in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 36
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp10 = tl.full([1], 0, tl.int32)
tmp11 = tmp10 < tmp9
tmp12 = tmp11.to(tl.int8)
tmp13 = tmp9 < tmp10
tmp14 = tmp13.to(tl.int8)
tmp15 = tmp12 - tmp14
tmp16 = tmp15.to(tmp9.dtype)
tmp17 = tmp8 * tmp16
tmp18 = -1.0
tmp19 = triton_helpers.maximum(tmp9, tmp18)
tmp20 = 1.0
tmp21 = triton_helpers.minimum(tmp19, tmp20)
tmp22 = tmp17 - tmp21
tmp23 = tmp22 + tmp21
tmp24 = tmp9 >= tmp18
tmp25 = tmp9 <= tmp20
tmp26 = tmp24 & tmp25
tl.store(out_ptr0 + x2, tmp23, xmask)
tl.store(out_ptr1 + x2, tmp26, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (144, 1), (1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_abs_mean_0[grid(16)](primals_1, buf0, 16, XBLOCK=
16, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.bool)
triton_poi_fused_add_clamp_ge_le_logical_and_mean_mul_sign_sub_1[grid
(144)](buf0, primals_1, buf1, buf3, 144, XBLOCK=128, num_warps=
4, num_stages=1)
del buf0
del primals_1
buf2 = extern_kernels.convolution(primals_2, buf1, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
return buf2, primals_2, buf1, buf3
class HardBinaryConvNew(nn.Module):
def __init__(self, in_chn, out_chn, kernel_size=3, stride=1, padding=1):
super(HardBinaryConvNew, self).__init__()
self.stride = stride
self.padding = padding
self.number_of_weights = in_chn * out_chn * kernel_size * kernel_size
self.shape = out_chn, in_chn, kernel_size, kernel_size
self.weights = nn.Parameter(torch.rand((self.number_of_weights, 1)) *
0.001, requires_grad=True)
def forward(self, input_0):
primals_1 = self.weights
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
uzair789/pytorch-retinanet
|
HardBinaryConv
| false
| 10,964
|
[
"Apache-2.0"
] | 0
|
cabac159a9877825ef04ab06d3b9a63bdfa4f306
|
https://github.com/uzair789/pytorch-retinanet/tree/cabac159a9877825ef04ab06d3b9a63bdfa4f306
|
BinaryActivation
|
import torch
import torch.nn as nn
class BinaryActivation(nn.Module):
def __init__(self):
super(BinaryActivation, self).__init__()
def forward(self, x):
out_forward = torch.sign(x)
mask1 = x < -1
mask2 = x < 0
mask3 = x < 1
out1 = -1 * mask1.type(torch.float32) + (x * x + 2 * x) * (1 -
mask1.type(torch.float32))
out2 = out1 * mask2.type(torch.float32) + (-x * x + 2 * x) * (1 -
mask2.type(torch.float32))
out3 = out2 * mask3.type(torch.float32) + 1 * (1 - mask3.type(torch
.float32))
out = out_forward.detach() - out3.detach() + out3
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy_add_lt_mul_neg_rsub_sign_sub_0(in_out_ptr0,
in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp1 < tmp0
tmp3 = tmp2.to(tl.int8)
tmp4 = tmp0 < tmp1
tmp5 = tmp4.to(tl.int8)
tmp6 = tmp3 - tmp5
tmp7 = tmp6.to(tmp0.dtype)
tmp8 = -1.0
tmp9 = tmp0 < tmp8
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp10 * tmp8
tmp12 = tmp0 * tmp0
tmp13 = 2.0
tmp14 = tmp0 * tmp13
tmp15 = tmp12 + tmp14
tmp16 = 1.0
tmp17 = tmp16 - tmp10
tmp18 = tmp15 * tmp17
tmp19 = tmp11 + tmp18
tmp20 = 0.0
tmp21 = tmp0 < tmp20
tmp22 = tmp21.to(tl.float32)
tmp23 = tmp19 * tmp22
tmp24 = -tmp0
tmp25 = tmp24 * tmp0
tmp26 = tmp25 + tmp14
tmp27 = tmp16 - tmp22
tmp28 = tmp26 * tmp27
tmp29 = tmp23 + tmp28
tmp30 = tmp0 < tmp16
tmp31 = tmp30.to(tl.float32)
tmp32 = tmp29 * tmp31
tmp33 = tmp16 - tmp31
tmp34 = tmp33 * tmp16
tmp35 = tmp32 + tmp34
tmp36 = tmp7 - tmp35
tmp37 = tmp36 + tmp35
tl.store(in_out_ptr0 + x0, tmp37, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused__to_copy_add_lt_mul_neg_rsub_sign_sub_0[grid(256)](
buf1, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf1,
class BinaryActivationNew(nn.Module):
def __init__(self):
super(BinaryActivationNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
uzair789/pytorch-retinanet
|
BinaryActivation
| false
| 10,965
|
[
"Apache-2.0"
] | 0
|
cabac159a9877825ef04ab06d3b9a63bdfa4f306
|
https://github.com/uzair789/pytorch-retinanet/tree/cabac159a9877825ef04ab06d3b9a63bdfa4f306
|
QNet
|
import torch
import torch.nn as nn
from torch.nn.init import kaiming_uniform_
from torch.nn.init import uniform_
import torch.nn.functional as F
def mini_weight_init(m):
if m.__class__.__name__ == 'Linear':
m.weight.data.copy_(uniform_(m.weight.data, -0.003, 0.003))
m.bias.data.fill_(0)
def weight_init(m):
if m.__class__.__name__ == 'Linear':
m.weight.data.copy_(kaiming_uniform_(m.weight.data))
m.bias.data.fill_(0)
class QNet(nn.Module):
def __init__(self, ob_space, ac_space, h1=300, h2=400):
super(QNet, self).__init__()
self.fc1 = nn.Linear(ob_space.shape[0], h1)
self.fc2 = nn.Linear(ac_space.shape[0] + h1, h2)
self.output_layer = nn.Linear(h2, 1)
self.fc1.apply(weight_init)
self.fc2.apply(weight_init)
self.output_layer.apply(mini_weight_init)
def forward(self, ob, ac):
h = F.relu(self.fc1(ob))
h = torch.cat([h, ac], dim=-1)
h = F.relu(self.fc2(h))
return self.output_layer(h)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'ob_space': torch.rand([4, 4]), 'ac_space': torch.rand([4,
4])}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from torch.nn.init import kaiming_uniform_
from torch.nn.init import uniform_
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 19456
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 304
x1 = xindex // 304
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 300, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (300 * x1 + x0), tmp4 & xmask, eviction_policy
='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tl.full([1], 304, tl.int64)
tmp15 = tl.load(in_ptr2 + (4 * x1 + (-300 + x0)), tmp12 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tl.where(tmp4, tmp11, tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 400
x2 = xindex % 1600
x3 = xindex // 1600
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x2 + 1664 * x3), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 300
x2 = xindex // 1200
x4 = xindex % 1200
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x4 + 1280 * x2), tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (300, 4), (4, 1))
assert_size_stride(primals_2, (300,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (400, 304), (304, 1))
assert_size_stride(primals_6, (400,), (1,))
assert_size_stride(primals_7, (1, 400), (400, 1))
assert_size_stride(primals_8, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 300), (300, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 300), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 304), (4864, 1216, 304, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(19456)](buf0, primals_2, primals_4,
buf1, 19456, XBLOCK=256, num_warps=4, num_stages=1)
del primals_4
buf2 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 304), (304, 1), 0),
reinterpret_tensor(primals_5, (304, 400), (1, 304), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 400), (6400, 1600, 400, 1), 0
)
del buf2
buf6 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(25600)](buf3,
primals_6, buf6, 25600, XBLOCK=256, num_warps=4, num_stages=1)
del primals_6
buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_8, reinterpret_tensor(buf3, (64, 400),
(400, 1), 0), reinterpret_tensor(primals_7, (400, 1), (1, 400),
0), alpha=1, beta=1, out=buf5)
del primals_8
buf7 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(19200)](buf0,
primals_2, buf7, 19200, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_2
return reinterpret_tensor(buf5, (4, 4, 4, 1), (16, 4, 1, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 304), (304, 1), 0
), reinterpret_tensor(buf3, (64, 400), (400, 1), 0
), primals_7, buf6, primals_5, buf7
def mini_weight_init(m):
if m.__class__.__name__ == 'Linear':
m.weight.data.copy_(uniform_(m.weight.data, -0.003, 0.003))
m.bias.data.fill_(0)
def weight_init(m):
if m.__class__.__name__ == 'Linear':
m.weight.data.copy_(kaiming_uniform_(m.weight.data))
m.bias.data.fill_(0)
class QNetNew(nn.Module):
def __init__(self, ob_space, ac_space, h1=300, h2=400):
super(QNetNew, self).__init__()
self.fc1 = nn.Linear(ob_space.shape[0], h1)
self.fc2 = nn.Linear(ac_space.shape[0] + h1, h2)
self.output_layer = nn.Linear(h2, 1)
self.fc1.apply(weight_init)
self.fc2.apply(weight_init)
self.output_layer.apply(mini_weight_init)
def forward(self, input_0, input_1):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_5 = self.fc2.weight
primals_6 = self.fc2.bias
primals_7 = self.output_layer.weight
primals_8 = self.output_layer.bias
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
|
ven-kyoshiro/PILCO-1
|
QNet
| false
| 10,966
|
[
"MIT"
] | 0
|
61c4ef18a6bbecbeb6a10784a7925d31f46dd23b
|
https://github.com/ven-kyoshiro/PILCO-1/tree/61c4ef18a6bbecbeb6a10784a7925d31f46dd23b
|
SEModule
|
import torch
from torch import nn
import torch.utils.data
class SEModule(nn.Module):
def __init__(self, channel, reduction=4):
super().__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv1 = nn.Conv2d(in_channels=channel, out_channels=channel //
reduction, kernel_size=1, stride=1, padding=0)
self.relu = nn.ReLU()
self.conv2 = nn.Conv2d(in_channels=channel // reduction,
out_channels=channel, kernel_size=1, stride=1, padding=0)
self.hardsigmoid = nn.Hardsigmoid()
def forward(self, x):
identity = x
x = self.avg_pool(x)
x = self.conv1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.hardsigmoid(x)
out = identity * x
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channel': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tl.store(in_out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_convolution_hardsigmoid_mul_2(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex // 16
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = 3.0
tmp5 = tmp3 + tmp4
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = 6.0
tmp9 = triton_helpers.minimum(tmp7, tmp8)
tmp10 = 0.16666666666666666
tmp11 = tmp9 * tmp10
tmp12 = tmp0 * tmp11
tl.store(out_ptr0 + x3, tmp12, xmask)
@triton.jit
def triton_poi_fused_convolution_hardsigmoid_backward_3(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = -3.0
tmp4 = tmp2 > tmp3
tmp5 = 3.0
tmp6 = tmp2 < tmp5
tmp7 = tmp4 & tmp6
tl.store(out_ptr0 + x2, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (1,), (1,))
assert_size_stride(primals_4, (4, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 1, 1, 1), (1, 1, 1, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(4)](buf3, primals_3, 4,
XBLOCK=4, num_warps=1, num_stages=1)
del primals_3
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 1, 1), (4, 1, 1, 1))
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_convolution_hardsigmoid_mul_2[grid(256)](primals_1,
buf4, primals_5, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf6 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool)
triton_poi_fused_convolution_hardsigmoid_backward_3[grid(16)](buf4,
primals_5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf4
del primals_5
return buf5, primals_1, primals_2, primals_4, buf1, buf3, buf6
class SEModuleNew(nn.Module):
def __init__(self, channel, reduction=4):
super().__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv1 = nn.Conv2d(in_channels=channel, out_channels=channel //
reduction, kernel_size=1, stride=1, padding=0)
self.relu = nn.ReLU()
self.conv2 = nn.Conv2d(in_channels=channel // reduction,
out_channels=channel, kernel_size=1, stride=1, padding=0)
self.hardsigmoid = nn.Hardsigmoid()
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
wangjian123799/L-DETR
|
SEModule
| false
| 10,967
|
[
"Apache-2.0"
] | 0
|
5c21117666d31b45e94019f0a206f82a5cdefafc
|
https://github.com/wangjian123799/L-DETR/tree/5c21117666d31b45e94019f0a206f82a5cdefafc
|
GlobalAvgPool2d
|
import torch
import torch.nn as nn
class GlobalAvgPool2d(nn.Module):
def __init__(self):
"""Global average pooling over the input's spatial dimensions"""
super(GlobalAvgPool2d, self).__init__()
def forward(self, inputs):
in_size = inputs.size()
inputs = inputs.view((in_size[0], in_size[1], -1)).mean(dim=2)
inputs = inputs.view(in_size[0], in_size[1], 1, 1)
return inputs
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, arg0_1, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del arg0_1
return reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 1, 1), 0),
class GlobalAvgPool2dNew(nn.Module):
def __init__(self):
"""Global average pooling over the input's spatial dimensions"""
super(GlobalAvgPool2dNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
tim885/DeepDepthRefiner
|
GlobalAvgPool2d
| false
| 10,968
|
[
"MIT"
] | 0
|
a59f376b5b0ff01b0d166ec8d946a20c81a6b190
|
https://github.com/tim885/DeepDepthRefiner/tree/a59f376b5b0ff01b0d166ec8d946a20c81a6b190
|
ActorCritic
|
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import *
class ActorCritic(nn.Module):
def __init__(self, num_states, num_actions, hidden_size):
super(ActorCritic, self).__init__()
self.num_actions = num_actions
self.fc = nn.Linear(num_states, hidden_size)
self.critic_linear2 = nn.Linear(hidden_size, 1)
self.actor_linear2 = nn.Linear(hidden_size, num_actions)
def forward(self, state):
x = F.relu(self.fc(state))
value = self.critic_linear2(x)
policy_dist = F.softmax(self.actor_linear2(x))
return value, policy_dist
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_states': 4, 'num_actions': 4, 'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 4), (4, 1))
assert_size_stride(primals_5, (1,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf4, buf5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf5
return reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 1), 0
), buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0
), buf6, primals_6, primals_4, buf7
class ActorCriticNew(nn.Module):
def __init__(self, num_states, num_actions, hidden_size):
super(ActorCriticNew, self).__init__()
self.num_actions = num_actions
self.fc = nn.Linear(num_states, hidden_size)
self.critic_linear2 = nn.Linear(hidden_size, 1)
self.actor_linear2 = nn.Linear(hidden_size, num_actions)
def forward(self, input_0):
primals_1 = self.fc.weight
primals_2 = self.fc.bias
primals_4 = self.critic_linear2.weight
primals_5 = self.critic_linear2.bias
primals_6 = self.actor_linear2.weight
primals_7 = self.actor_linear2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0], output[1]
|
rmfan/nni
|
ActorCritic
| false
| 10,969
|
[
"MIT"
] | 0
|
727ee1ce47e070061fe3dab8a2da5d3cd5e55546
|
https://github.com/rmfan/nni/tree/727ee1ce47e070061fe3dab8a2da5d3cd5e55546
|
BasicResidualBlock
|
import torch
import torch.nn as nn
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1,
padding=1, bias=True, normalization=None, activation='prelu'):
super(ConvBlock, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride=stride, padding=padding, bias=bias)
if normalization == 'batch':
self.norm = nn.BatchNorm2d(out_channels)
elif normalization == 'instance':
self.norm = nn.InstanceNorm2d(out_channels)
else:
self.norm = None
if activation == 'relu':
self.act = nn.ReLU(inplace=True)
elif activation == 'prelu':
self.act = nn.PReLU()
elif activation == 'lrelu':
self.act = nn.LeakyReLU(negative_slope=0.2, inplace=True)
elif activation == 'tanh':
self.act = nn.Tanh()
elif activation == 'sigmoid':
self.act = nn.Sigmoid()
else:
self.act = None
def forward(self, x):
out = self.conv(x)
if self.norm is not None:
out = self.norm(out)
if self.act is not None:
out = self.act(out)
return out
class BasicResidualBlock(nn.Module):
expansion = 1
def __init__(self, in_channels, channels, stride=1, bias=True,
normalization=None, activation='prelu', downsample=None):
super(BasicResidualBlock, self).__init__()
self.conv1 = ConvBlock(in_channels, channels, stride=stride, bias=
bias, normalization=normalization, activation=activation)
self.conv2 = ConvBlock(channels, channels, bias=bias, normalization
=normalization, activation=None)
self.downsample = downsample
self.prelu = nn.PReLU()
def forward(self, x):
out = self.conv1(x)
out = self.conv2(out)
out += x if self.downsample is None else self.downsample(x)
out = self.prelu(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__prelu_kernel_convolution_0(in_out_ptr0, in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp7 = tmp6 * tmp2
tmp8 = tl.where(tmp4, tmp2, tmp7)
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused__prelu_kernel_add_convolution_1(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, xmask)
tmp7 = tl.load(in_ptr2 + 0)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = tmp4 > tmp5
tmp9 = tmp8 * tmp4
tmp10 = tl.where(tmp6, tmp4, tmp9)
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp10, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1,), (1,))
assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__prelu_kernel_convolution_0[grid(256)](buf1,
primals_2, primals_4, buf2, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_2
buf3 = extern_kernels.convolution(buf2, primals_5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = buf3
del buf3
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__prelu_kernel_add_convolution_1[grid(256)](buf4,
primals_6, primals_3, primals_7, buf5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_6
return (buf5, primals_1, primals_3, primals_4, primals_5, primals_7,
buf1, buf2, buf4)
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1,
padding=1, bias=True, normalization=None, activation='prelu'):
super(ConvBlock, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride=stride, padding=padding, bias=bias)
if normalization == 'batch':
self.norm = nn.BatchNorm2d(out_channels)
elif normalization == 'instance':
self.norm = nn.InstanceNorm2d(out_channels)
else:
self.norm = None
if activation == 'relu':
self.act = nn.ReLU(inplace=True)
elif activation == 'prelu':
self.act = nn.PReLU()
elif activation == 'lrelu':
self.act = nn.LeakyReLU(negative_slope=0.2, inplace=True)
elif activation == 'tanh':
self.act = nn.Tanh()
elif activation == 'sigmoid':
self.act = nn.Sigmoid()
else:
self.act = None
def forward(self, x):
out = self.conv(x)
if self.norm is not None:
out = self.norm(out)
if self.act is not None:
out = self.act(out)
return out
class BasicResidualBlockNew(nn.Module):
expansion = 1
def __init__(self, in_channels, channels, stride=1, bias=True,
normalization=None, activation='prelu', downsample=None):
super(BasicResidualBlockNew, self).__init__()
self.conv1 = ConvBlock(in_channels, channels, stride=stride, bias=
bias, normalization=normalization, activation=activation)
self.conv2 = ConvBlock(channels, channels, bias=bias, normalization
=normalization, activation=None)
self.downsample = downsample
self.prelu = nn.PReLU()
def forward(self, input_0):
primals_1 = self.conv1.conv.weight
primals_2 = self.conv1.conv.bias
primals_4 = self.conv1.act.weight
primals_5 = self.conv2.conv.weight
primals_6 = self.conv2.conv.bias
primals_7 = self.prelu.weight
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
xiqi98/HRDN
|
BasicResidualBlock
| false
| 10,970
|
[
"MIT"
] | 0
|
2140700ab5f3ab2e66678e808203cda68a137207
|
https://github.com/xiqi98/HRDN/tree/2140700ab5f3ab2e66678e808203cda68a137207
|
linear_module
|
import torch
import torch.nn as nn
class linear_module(nn.Module):
"""Module of the linear model. Inherited from nn.Module"""
def __init__(self):
"""linear module init"""
super(linear_module, self).__init__()
self.a = nn.Parameter(torch.tensor(10.0))
self.b = nn.Parameter(torch.tensor(20.0))
def forward(self, x, y):
"""linear module forward"""
return torch.abs(self.a * x + self.b - y)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_abs_add_mul_sub_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp4 = tl.load(in_ptr2 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr3 + x0, xmask)
tmp3 = tmp1 * tmp2
tmp6 = tmp3 + tmp5
tmp8 = tmp6 - tmp7
tmp9 = tl_math.abs(tmp8)
tl.store(out_ptr0 + x0, tmp9, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (), ())
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (), ())
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_abs_add_mul_sub_0[grid(256)](primals_1, primals_2,
primals_3, primals_4, buf0, 256, XBLOCK=256, num_warps=4,
num_stages=1)
return buf0, primals_1, primals_2, primals_3, primals_4
class linear_moduleNew(nn.Module):
"""Module of the linear model. Inherited from nn.Module"""
def __init__(self):
"""linear module init"""
super(linear_moduleNew, self).__init__()
self.a = nn.Parameter(torch.tensor(10.0))
self.b = nn.Parameter(torch.tensor(20.0))
def forward(self, input_0, input_1):
primals_1 = self.a
primals_3 = self.b
primals_2 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
yelingqun/toolkit_demos
|
linear_module
| false
| 10,971
|
[
"MIT"
] | 0
|
12dd9431b2e306312c3b6059356be9a91b68409a
|
https://github.com/yelingqun/toolkit_demos/tree/12dd9431b2e306312c3b6059356be9a91b68409a
|
PositionalEmbedding
|
import math
import torch
class PositionalEmbedding(torch.nn.Module):
def __init__(self):
super(PositionalEmbedding, self).__init__()
def forward(self, inputs):
if inputs.dim() != 3:
raise ValueError('The rank of input must be 3.')
length = inputs.shape[1]
channels = inputs.shape[2]
half_dim = channels // 2
positions = torch.arange(length, dtype=inputs.dtype, device=inputs.
device)
dimensions = torch.arange(half_dim, dtype=inputs.dtype, device=
inputs.device)
scale = math.log(10000.0) / float(half_dim - 1)
dimensions.mul_(-scale).exp_()
scaled_time = positions.unsqueeze(1) * dimensions.unsqueeze(0)
signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)],
dim=1)
if channels % 2 == 1:
pad = torch.zeros([signal.shape[0], 1], dtype=inputs.dtype,
device=inputs.device)
signal = torch.cat([signal, pad], axis=1)
return inputs + torch.reshape(signal, [1, -1, channels])
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp0.to(tl.float32)
tmp6 = -9.210340371976184
tmp7 = tmp5 * tmp6
tmp8 = tl_math.exp(tmp7)
tmp9 = x1
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp10 * tmp8
tmp12 = tl_math.sin(tmp11)
tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype)
tmp14 = tl.where(tmp4, tmp12, tmp13)
tmp15 = tmp0 >= tmp3
tl.full([1], 4, tl.int64)
tmp18 = -2 + x0
tmp19 = tmp18.to(tl.float32)
tmp20 = tmp19 * tmp6
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp10 * tmp21
tmp23 = tl_math.cos(tmp22)
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp15, tmp23, tmp24)
tmp26 = tl.where(tmp4, tmp14, tmp25)
tl.store(out_ptr0 + x2, tmp26, xmask)
@triton.jit
def triton_poi_fused_add_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(16)](buf0, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_1[grid(64)](arg0_1, buf0, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
del buf0
return buf1,
class PositionalEmbeddingNew(torch.nn.Module):
def __init__(self):
super(PositionalEmbeddingNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
yafuly/PromptNMT
|
PositionalEmbedding
| false
| 10,972
|
[
"BSD-3-Clause"
] | 0
|
07b1daa7c7609d6f9035b4ac71b962c3c07b2f96
|
https://github.com/yafuly/PromptNMT/tree/07b1daa7c7609d6f9035b4ac71b962c3c07b2f96
|
RGBDiff
|
import torch
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
class RGBDiff(nn.Module):
def __init__(self, dim=1):
super().__init__()
self.dim = dim
def forward(self, image):
"""
Args:
image (torch.Tensor): (N x T x C x H x W)
"""
diffs = []
for i in range(1, image.size(self.dim)):
prev = image.index_select(self.dim, image.new_tensor(i - 1,
dtype=torch.long))
current = image.index_select(self.dim, image.new_tensor(i,
dtype=torch.long))
diffs.append(current - prev)
return torch.cat(diffs, dim=self.dim)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 3
x0 = xindex % 16
x2 = xindex // 48
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tmp5 - tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 2, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tmp10 & tmp12
tmp14 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp13 & xmask,
eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp13 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tmp14 - tmp15
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp13, tmp16, tmp17)
tmp19 = tmp0 >= tmp11
tl.full([1], 3, tl.int64)
tmp22 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp19 & xmask,
eviction_policy='evict_last', other=0.0)
tmp23 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp19 & xmask,
eviction_policy='evict_last', other=0.0)
tmp24 = tmp22 - tmp23
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp19, tmp24, tmp25)
tmp27 = tl.where(tmp13, tmp18, tmp26)
tmp28 = tl.where(tmp4, tmp9, tmp27)
tl.store(out_ptr0 + x3, tmp28, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(192)](arg0_1, buf0, 192, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class RGBDiffNew(nn.Module):
def __init__(self, dim=1):
super().__init__()
self.dim = dim
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
krodyush/training_extensions
|
RGBDiff
| false
| 10,973
|
[
"Apache-2.0"
] | 0
|
542f4004dfbc6fc62a622065367ba4f85a703dd3
|
https://github.com/krodyush/training_extensions/tree/542f4004dfbc6fc62a622065367ba4f85a703dd3
|
ESA
|
import torch
from torch import nn
import torch.nn.functional as F
class ESA(nn.Module):
def __init__(self, channel=64, reduction=4, bias=True):
super(ESA, self).__init__()
self.r_nc = channel // reduction
self.conv1 = nn.Conv2d(channel, self.r_nc, kernel_size=1)
self.conv21 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=1)
self.conv2 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, stride=
2, padding=0)
self.conv3 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, padding=1)
self.conv4 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, padding=1)
self.conv5 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, padding=1)
self.conv6 = nn.Conv2d(self.r_nc, channel, kernel_size=1)
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x1 = self.conv1(x)
x2 = F.max_pool2d(self.conv2(x1), kernel_size=7, stride=3)
x2 = self.relu(self.conv3(x2))
x2 = self.relu(self.conv4(x2))
x2 = F.interpolate(self.conv5(x2), (x.size(2), x.size(3)), mode=
'bilinear', align_corners=False)
x2 = self.conv6(x2 + self.conv21(x1))
return x.mul(self.sigmoid(x2))
def get_inputs():
return [torch.rand([4, 64, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 16
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 61504
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 961 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 5184
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 81 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused__to_copy_3(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.140625
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tl.store(out_ptr0 + x0, tmp9, xmask)
@triton.jit
def triton_poi_fused_add_clamp_4(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.140625
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tl.full([1], 1, tl.int64)
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 8, tl.int64)
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_5(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.140625
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp8 - tmp10
tmp12 = triton_helpers.maximum(tmp11, tmp7)
tmp13 = 1.0
tmp14 = triton_helpers.minimum(tmp12, tmp13)
tl.store(out_ptr0 + x0, tmp14, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_mul_sub_6(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7,
in_ptr8, in_ptr9, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 64 % 64
x0 = xindex % 64
x5 = xindex // 4096
x2 = xindex // 4096 % 16
x6 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr7 + x1, None, eviction_policy='evict_last')
tmp37 = tl.load(in_ptr8 + x6, None)
tmp38 = tl.load(in_ptr9 + x2, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 9, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 9 * tmp4 + 81 * x5), None,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp13 = tmp12 + tmp1
tmp14 = tmp12 < 0
tmp15 = tl.where(tmp14, tmp13, tmp12)
tmp16 = tl.load(in_ptr2 + (tmp15 + 9 * tmp4 + 81 * x5), None,
eviction_policy='evict_last')
tmp17 = tmp16 + tmp10
tmp18 = tmp17 - tmp11
tmp20 = tmp18 * tmp19
tmp21 = tmp11 + tmp20
tmp23 = tmp22 + tmp1
tmp24 = tmp22 < 0
tmp25 = tl.where(tmp24, tmp23, tmp22)
tmp26 = tl.load(in_ptr2 + (tmp8 + 9 * tmp25 + 81 * x5), None,
eviction_policy='evict_last')
tmp27 = tmp26 + tmp10
tmp28 = tl.load(in_ptr2 + (tmp15 + 9 * tmp25 + 81 * x5), None,
eviction_policy='evict_last')
tmp29 = tmp28 + tmp10
tmp30 = tmp29 - tmp27
tmp31 = tmp30 * tmp19
tmp32 = tmp27 + tmp31
tmp33 = tmp32 - tmp21
tmp35 = tmp33 * tmp34
tmp36 = tmp21 + tmp35
tmp39 = tmp37 + tmp38
tmp40 = tmp36 + tmp39
tl.store(in_out_ptr0 + x6, tmp40, None)
@triton.jit
def triton_poi_fused_convolution_mul_sigmoid_7(in_out_ptr0, in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, None)
tmp2 = tmp0 + tmp1
tmp4 = tl.sigmoid(tmp2)
tmp5 = tmp3 * tmp4
tl.store(in_out_ptr0 + x3, tmp2, None)
tl.store(out_ptr0 + x3, tmp5, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15) = args
args.clear()
assert_size_stride(primals_1, (16, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 64, 64, 64), (262144, 4096, 64, 1))
assert_size_stride(primals_4, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_7, (16,), (1,))
assert_size_stride(primals_8, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_9, (16,), (1,))
assert_size_stride(primals_10, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_11, (16,), (1,))
assert_size_stride(primals_12, (16, 16, 1, 1), (16, 1, 1, 1))
assert_size_stride(primals_13, (16,), (1,))
assert_size_stride(primals_14, (64, 16, 1, 1), (16, 1, 1, 1))
assert_size_stride(primals_15, (64,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(262144)](buf1, primals_2,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 16, 31, 31), (15376, 961, 31, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_1[grid(61504)](buf3, primals_5, 61504,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = torch.ops.aten.max_pool2d_with_indices.default(buf3, [7, 7],
[3, 3])
buf5 = buf4[0]
buf6 = buf4[1]
del buf4
buf7 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 16, 9, 9), (1296, 81, 9, 1))
buf8 = buf7
del buf7
triton_poi_fused_convolution_relu_2[grid(5184)](buf8, primals_7,
5184, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf9 = extern_kernels.convolution(buf8, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 16, 9, 9), (1296, 81, 9, 1))
buf10 = buf9
del buf9
triton_poi_fused_convolution_relu_2[grid(5184)](buf10, primals_9,
5184, XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf11 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 16, 9, 9), (1296, 81, 9, 1))
buf12 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_3[grid(64)](buf12, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf13 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_4[grid(64)](buf13, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf14 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused__to_copy_3[grid(64)](buf14, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf15 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused_add_clamp_4[grid(64)](buf15, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf16 = empty_strided_cuda((64,), (1,), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_5[grid(64)](buf16,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf18 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_5[grid(64)](buf18,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf20 = extern_kernels.convolution(buf1, primals_12, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf19 = empty_strided_cuda((4, 16, 64, 64), (65536, 4096, 64, 1),
torch.float32)
buf21 = buf19
del buf19
triton_poi_fused__unsafe_index_add_convolution_mul_sub_6[grid(262144)](
buf21, buf12, buf14, buf11, primals_11, buf15, buf16, buf13,
buf18, buf20, primals_13, 262144, XBLOCK=512, num_warps=8,
num_stages=1)
del buf11
del buf20
del primals_11
del primals_13
buf22 = extern_kernels.convolution(buf21, primals_14, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf23 = buf22
del buf22
buf24 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1),
torch.float32)
triton_poi_fused_convolution_mul_sigmoid_7[grid(1048576)](buf23,
primals_15, primals_3, buf24, 1048576, XBLOCK=512, num_warps=8,
num_stages=1)
del primals_15
return (buf24, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, primals_12, primals_14, buf1, buf3, buf5, buf6, buf8,
buf10, buf12, buf13, buf14, buf15, buf16, buf18, buf21, buf23)
class ESANew(nn.Module):
def __init__(self, channel=64, reduction=4, bias=True):
super(ESANew, self).__init__()
self.r_nc = channel // reduction
self.conv1 = nn.Conv2d(channel, self.r_nc, kernel_size=1)
self.conv21 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=1)
self.conv2 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, stride=
2, padding=0)
self.conv3 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, padding=1)
self.conv4 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, padding=1)
self.conv5 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, padding=1)
self.conv6 = nn.Conv2d(self.r_nc, channel, kernel_size=1)
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU(inplace=True)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_12 = self.conv21.weight
primals_5 = self.conv21.bias
primals_4 = self.conv2.weight
primals_7 = self.conv2.bias
primals_6 = self.conv3.weight
primals_9 = self.conv3.bias
primals_8 = self.conv4.weight
primals_11 = self.conv4.bias
primals_10 = self.conv5.weight
primals_13 = self.conv5.bias
primals_14 = self.conv6.weight
primals_15 = self.conv6.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15])
return output[0]
|
samuro95/Prox-PnP
|
ESA
| false
| 10,974
|
[
"MIT"
] | 0
|
c05a48a586f0ef27c8ddc14e0a4c2c3d6814f8c9
|
https://github.com/samuro95/Prox-PnP/tree/c05a48a586f0ef27c8ddc14e0a4c2c3d6814f8c9
|
GatedLinearUnit
|
import torch
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
class GatedLinearUnit(nn.Module):
def __init__(self, input_size, output_size, dropout=0):
super().__init__()
self.dropout = nn.Dropout(dropout)
self.w4 = nn.Linear(input_size, output_size)
self.w5 = nn.Linear(input_size, output_size)
self.act = nn.Sigmoid()
def forward(self, x):
x = self.dropout(x)
x = self.act(self.w4(x)) * self.w5(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'output_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64,
4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0[grid(256)](buf0, buf1, buf2, 256,
XBLOCK=128, num_warps=4, num_stages=1)
return buf2, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf0, buf1
class GatedLinearUnitNew(nn.Module):
def __init__(self, input_size, output_size, dropout=0):
super().__init__()
self.dropout = nn.Dropout(dropout)
self.w4 = nn.Linear(input_size, output_size)
self.w5 = nn.Linear(input_size, output_size)
self.act = nn.Sigmoid()
def forward(self, input_0):
primals_2 = self.w4.weight
primals_3 = self.w4.bias
primals_4 = self.w5.weight
primals_5 = self.w5.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
krodyush/training_extensions
|
GatedLinearUnit
| false
| 10,975
|
[
"Apache-2.0"
] | 0
|
542f4004dfbc6fc62a622065367ba4f85a703dd3
|
https://github.com/krodyush/training_extensions/tree/542f4004dfbc6fc62a622065367ba4f85a703dd3
|
PositionwiseFeedForward
|
import torch
import torch.nn as nn
import torch.cuda
class Bottle(nn.Module):
def forward(self, input):
if len(input.size()) <= 2:
return super(Bottle, self).forward(input)
size = input.size()[:2]
out = super(Bottle, self).forward(input.view(size[0] * size[1], -1))
return out.contiguous().view(size[0], size[1], -1)
class BottleLinear(Bottle, nn.Linear):
pass
class LayerNorm(nn.Module):
""" Layer normalization module """
def __init__(self, d_hid, eps=0.001):
super(LayerNorm, self).__init__()
self.eps = eps
self.a_2 = nn.Parameter(torch.ones(d_hid), requires_grad=True)
self.b_2 = nn.Parameter(torch.zeros(d_hid), requires_grad=True)
def forward(self, z):
if z.size(1) == 1:
return z
mu = torch.mean(z, dim=1)
sigma = torch.std(z, dim=1)
if mu.dim() == 1:
mu = mu.unsqueeze(1)
sigma = sigma.unsqueeze(1)
ln_out = (z - mu.expand_as(z)) / (sigma.expand_as(z) + self.eps)
ln_out = ln_out.mul(self.a_2.expand_as(ln_out)) + self.b_2.expand_as(
ln_out)
return ln_out
class BottleLayerNorm(Bottle, LayerNorm):
pass
class PositionwiseFeedForward(nn.Module):
""" A two-layer Feed-Forward-Network."""
def __init__(self, size, hidden_size, dropout=0.1):
"""
Args:
size(int): the size of input for the first-layer of the FFN.
hidden_size(int): the hidden layer size of the second-layer
of the FNN.
droput(float): dropout probability(0-1.0).
"""
super(PositionwiseFeedForward, self).__init__()
self.w_1 = BottleLinear(size, hidden_size)
self.w_2 = BottleLinear(hidden_size, size)
self.layer_norm = BottleLayerNorm(size)
self.dropout = nn.Dropout(dropout)
self.relu = nn.ReLU()
def forward(self, x):
residual = x
output = self.dropout(self.w_2(self.relu(self.w_1(x))))
return self.layer_norm(output + residual)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'size': 4, 'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.cuda
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_mean_std_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = 3.0
tmp29 = tmp27 / tmp28
tl.store(in_out_ptr0 + x0, tmp29, xmask)
tl.store(out_ptr0 + x0, tmp16, xmask)
@triton.jit
def triton_poi_fused_add_div_mul_sub_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = libdevice.sqrt(tmp5)
tmp7 = 0.001
tmp8 = tmp6 + tmp7
tmp9 = tmp4 / tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4),
(1, 4), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(16)](buf1, primals_3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf4 = reinterpret_tensor(buf3, (4,), (1,), 0)
del buf3
buf5 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused_add_mean_std_1[grid(4)](buf4, buf2, primals_1,
buf5, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_div_mul_sub_2[grid(16)](buf2, primals_1, buf5,
buf4, primals_6, primals_7, buf6, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del buf4
del buf5
del primals_7
return buf6, primals_1, primals_6, buf1, buf2, primals_4
class Bottle(nn.Module):
def forward(self, input):
if len(input.size()) <= 2:
return super(Bottle, self).forward(input)
size = input.size()[:2]
out = super(Bottle, self).forward(input.view(size[0] * size[1], -1))
return out.contiguous().view(size[0], size[1], -1)
class BottleLinear(Bottle, nn.Linear):
pass
class LayerNorm(nn.Module):
""" Layer normalization module """
def __init__(self, d_hid, eps=0.001):
super(LayerNorm, self).__init__()
self.eps = eps
self.a_2 = nn.Parameter(torch.ones(d_hid), requires_grad=True)
self.b_2 = nn.Parameter(torch.zeros(d_hid), requires_grad=True)
def forward(self, z):
if z.size(1) == 1:
return z
mu = torch.mean(z, dim=1)
sigma = torch.std(z, dim=1)
if mu.dim() == 1:
mu = mu.unsqueeze(1)
sigma = sigma.unsqueeze(1)
ln_out = (z - mu.expand_as(z)) / (sigma.expand_as(z) + self.eps)
ln_out = ln_out.mul(self.a_2.expand_as(ln_out)) + self.b_2.expand_as(
ln_out)
return ln_out
class BottleLayerNorm(Bottle, LayerNorm):
pass
class PositionwiseFeedForwardNew(nn.Module):
""" A two-layer Feed-Forward-Network."""
def __init__(self, size, hidden_size, dropout=0.1):
"""
Args:
size(int): the size of input for the first-layer of the FFN.
hidden_size(int): the hidden layer size of the second-layer
of the FNN.
droput(float): dropout probability(0-1.0).
"""
super(PositionwiseFeedForwardNew, self).__init__()
self.w_1 = BottleLinear(size, hidden_size)
self.w_2 = BottleLinear(hidden_size, size)
self.layer_norm = BottleLayerNorm(size)
self.dropout = nn.Dropout(dropout)
self.relu = nn.ReLU()
def forward(self, input_0):
primals_1 = self.w_1.weight
primals_3 = self.w_1.bias
primals_2 = self.w_2.weight
primals_5 = self.w_2.bias
primals_6 = self.layer_norm.a_2
primals_7 = self.layer_norm.b_2
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
wenh06/OpenAttack
|
PositionwiseFeedForward
| false
| 10,976
|
[
"MIT"
] | 0
|
412d1b2777dea5009fe97ac264044bfda65dfa5d
|
https://github.com/wenh06/OpenAttack/tree/412d1b2777dea5009fe97ac264044bfda65dfa5d
|
ScaledDotProductAttention
|
import torch
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
class ScaledDotProductAttention(nn.Module):
def __init__(self, dropout=0, scale=True):
super().__init__()
self.dropout = nn.Dropout(p=dropout)
self.softmax = nn.Softmax(dim=2)
self.scale = scale
def forward(self, q, k, v, mask=None):
attn = torch.bmm(q, k.permute(0, 2, 1))
if self.scale:
dimention = torch.sqrt(torch.tensor(k.shape[-1]))
attn = attn / dimention
if mask is not None:
attn = attn.masked_fill(mask == 0, -1000000000.0)
attn = self.softmax(attn)
attn = self.dropout(attn)
output = torch.bmm(attn, v)
return output, attn
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp8 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 2.0
tmp2 = 0.0
tmp3 = tmp1 >= tmp2
tmp4 = 1.0
tmp5 = -1.0
tmp6 = tl.where(tmp3, tmp4, tmp5)
tmp7 = tmp0 * tmp6
tmp9 = tmp8 * tmp6
tmp11 = tmp10 * tmp6
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp14 = tmp13 * tmp6
tmp15 = triton_helpers.maximum(tmp12, tmp14)
tmp17 = tmp16 * tmp6
tmp18 = triton_helpers.maximum(tmp15, tmp17)
tmp19 = tmp7 - tmp18
tmp20 = tmp6 * tmp1
tmp21 = tmp19 / tmp20
tmp22 = tl_math.exp(tmp21)
tl.store(out_ptr0 + x2, tmp22, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), (
16, 1, 4), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_sqrt_0[grid(64)](buf0, buf1, 64, XBLOCK=
64, num_warps=1, num_stages=1)
buf2 = buf0
del buf0
triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = buf1
del buf1
extern_kernels.bmm(buf2, arg2_1, out=buf3)
del arg2_1
return buf3, buf2
class ScaledDotProductAttentionNew(nn.Module):
def __init__(self, dropout=0, scale=True):
super().__init__()
self.dropout = nn.Dropout(p=dropout)
self.softmax = nn.Softmax(dim=2)
self.scale = scale
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0], output[1]
|
krodyush/training_extensions
|
ScaledDotProductAttention
| false
| 10,977
|
[
"Apache-2.0"
] | 0
|
542f4004dfbc6fc62a622065367ba4f85a703dd3
|
https://github.com/krodyush/training_extensions/tree/542f4004dfbc6fc62a622065367ba4f85a703dd3
|
GLU
|
import torch
import torch.nn as nn
class GLU(nn.Module):
def __init__(self, in_dim):
super(GLU, self).__init__()
self.sigmoid = nn.Sigmoid()
self.linear = nn.Linear(in_dim, in_dim)
def forward(self, x):
lin = self.linear(x.permute(0, 2, 3, 1))
lin = lin.permute(0, 3, 1, 2)
sig = self.sigmoid(x)
res = lin * sig
return res
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_mul_sigmoid_1(in_out_ptr0, in_ptr0, in_ptr1, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 16
y1 = yindex // 16
tmp0 = tl.load(in_out_ptr0 + (x2 + 4 * y3), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + x2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tl.sigmoid(tmp3)
tmp5 = tmp2 * tmp4
tl.debug_barrier()
tl.store(in_out_ptr0 + (x2 + 4 * y3), tmp5, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64, 4)](primals_1, buf0, 64, 4,
XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 1, 16, 4), 0)
del buf1
triton_poi_fused_mul_sigmoid_1[grid(64, 4)](buf2, primals_3,
primals_1, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1)
del primals_3
return buf2, primals_1, reinterpret_tensor(buf0, (64, 4), (4, 1), 0)
class GLUNew(nn.Module):
def __init__(self, in_dim):
super(GLUNew, self).__init__()
self.sigmoid = nn.Sigmoid()
self.linear = nn.Linear(in_dim, in_dim)
def forward(self, input_0):
primals_2 = self.linear.weight
primals_3 = self.linear.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
venisehannoyer/Hear-me-GirlsInAI-team1
|
GLU
| false
| 10,978
|
[
"Apache-2.0"
] | 0
|
664b3af4befe9b73c28d4362969699bc2254bdf9
|
https://github.com/venisehannoyer/Hear-me-GirlsInAI-team1/tree/664b3af4befe9b73c28d4362969699bc2254bdf9
|
LengthPredictor
|
import torch
from torch.nn import functional as F
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
class LengthPredictionLoss(nn.Module):
def __init__(self, max_delta=50):
super().__init__()
self.max_delta = max_delta
def forward(self, logits, src_mask, tgt_mask):
src_lens, tgt_lens = src_mask.sum(1), tgt_mask.sum(1)
delta = (tgt_lens - src_lens + self.max_delta).clamp(0, self.
max_delta * 2 - 1).long()
loss = F.cross_entropy(logits, delta, reduction='mean')
return {'length_prediction_loss': loss}
class LengthPredictor(nn.Module):
def __init__(self, hidden_size, max_delta=50):
super().__init__()
self.hidden_size = hidden_size
self.max_delta = max_delta
self._init_modules()
self._init_loss()
def forward(self, src, src_mask, tgt_len=None):
src_mean = self._compute_mean_emb(src, src_mask)
logits, delta = self._predict_delta(src_mean)
return logits, delta
def _predict_delta(self, src):
logits = self.length_predictor(src)
delta = logits.argmax(-1) - float(self.max_delta)
return logits, delta
def _compute_mean_emb(self, src, src_mask):
mean_emb = (src * src_mask[:, :, None]).sum(1) / src_mask.sum(1)[:,
None]
return mean_emb
def _init_modules(self):
self.length_predictor = nn.Linear(self.hidden_size, self.max_delta * 2)
def _init_loss(self):
self.loss = LengthPredictionLoss(self.max_delta)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch.nn import functional as F
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 64
x0 = xindex % 16
x2 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (64 + x3), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (128 + x3), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (192 + x3), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp15 = tmp1 + tmp4
tmp16 = tmp15 + tmp8
tmp17 = tmp16 + tmp12
tmp18 = tmp14 / tmp17
tl.store(out_ptr0 + x4, tmp18, xmask)
@triton.jit
def triton_per_fused_argmax_sub_1(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 64
rnumel = 100
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 100 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = tl.broadcast_to(rindex, tmp3.shape)
_, tmp2_tmp = triton_helpers.max_with_index(tmp3, tmp4, 1)
tmp2 = tmp2_tmp[:, None]
tmp5 = tmp2.to(tl.float32)
tmp6 = 50.0
tmp7 = tmp5 - tmp6
tl.store(out_ptr1 + x0, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (100, 4), (4, 1))
assert_size_stride(primals_4, (100,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_mul_sum_0[grid(256)](primals_2, primals_1,
buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 100), (100, 1), torch.float32)
extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_3, (4, 100), (1, 4), 0),
alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_per_fused_argmax_sub_1[grid(64)](buf1, buf3, 64, 100, XBLOCK
=8, num_warps=8, num_stages=1)
return reinterpret_tensor(buf1, (4, 4, 4, 100), (1600, 400, 100, 1), 0
), buf3, reinterpret_tensor(buf0, (64, 4), (4, 1), 0)
class LengthPredictionLoss(nn.Module):
def __init__(self, max_delta=50):
super().__init__()
self.max_delta = max_delta
def forward(self, logits, src_mask, tgt_mask):
src_lens, tgt_lens = src_mask.sum(1), tgt_mask.sum(1)
delta = (tgt_lens - src_lens + self.max_delta).clamp(0, self.
max_delta * 2 - 1).long()
loss = F.cross_entropy(logits, delta, reduction='mean')
return {'length_prediction_loss': loss}
class LengthPredictorNew(nn.Module):
def __init__(self, hidden_size, max_delta=50):
super().__init__()
self.hidden_size = hidden_size
self.max_delta = max_delta
self._init_modules()
self._init_loss()
def _predict_delta(self, src):
logits = self.length_predictor(src)
delta = logits.argmax(-1) - float(self.max_delta)
return logits, delta
def _compute_mean_emb(self, src, src_mask):
mean_emb = (src * src_mask[:, :, None]).sum(1) / src_mask.sum(1)[:,
None]
return mean_emb
def _init_modules(self):
self.length_predictor = nn.Linear(self.hidden_size, self.max_delta * 2)
def _init_loss(self):
self.loss = LengthPredictionLoss(self.max_delta)
def forward(self, input_0, input_1):
primals_3 = self.length_predictor.weight
primals_4 = self.length_predictor.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0], output[1]
|
krodyush/training_extensions
|
LengthPredictor
| false
| 10,979
|
[
"Apache-2.0"
] | 0
|
542f4004dfbc6fc62a622065367ba4f85a703dd3
|
https://github.com/krodyush/training_extensions/tree/542f4004dfbc6fc62a622065367ba4f85a703dd3
|
K1TemporalBlock
|
import torch
from torch import nn
from torch.nn.utils import weight_norm
class K1TemporalBlock(nn.Module):
def __init__(self, n_inputs, n_outputs, dropout=0.2):
super(K1TemporalBlock, self).__init__()
self.conv1 = weight_norm(nn.Conv1d(n_inputs, n_outputs, 1))
self.relu1 = nn.ReLU()
self.dropout1 = nn.Dropout(dropout)
self.conv2 = weight_norm(nn.Conv1d(n_outputs, n_outputs, 1))
self.relu2 = nn.ReLU()
self.dropout2 = nn.Dropout(dropout)
self.net = nn.Sequential(self.conv1, self.relu1, self.dropout1,
self.conv2, self.relu2, self.dropout2)
self.downsample = nn.Conv1d(n_inputs, n_outputs, 1
) if n_inputs != n_outputs else None
self.relu = nn.ReLU()
self.init_weights()
def init_weights(self):
self.conv1.weight.data.normal_(0, 0.01)
self.conv2.weight.data.normal_(0, 0.01)
if self.downsample is not None:
self.downsample.weight.data.normal_(0, 0.01)
def forward(self, x):
out = self.net(x)
res = x if self.downsample is None else self.downsample(x)
return self.relu(out + res)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'n_inputs': 4, 'n_outputs': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
from torch.nn.utils import weight_norm
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__weight_norm_interface_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = libdevice.sqrt(tmp10)
tl.store(out_ptr0 + x0, tmp11, xmask)
@triton.jit
def triton_poi_fused__weight_norm_interface_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 / tmp2
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_3(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp4 + tmp5
tmp7 = triton_helpers.maximum(tmp3, tmp6)
tmp8 = 0.0
tmp9 = tmp4 <= tmp8
tmp10 = tmp7 <= tmp8
tl.store(out_ptr0 + x2, tmp7, xmask)
tl.store(out_ptr1 + x2, tmp9, xmask)
tl.store(out_ptr2 + x2, tmp10, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 1, 1), (1, 1, 1))
assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 1, 1), (1, 1, 1))
assert_size_stride(primals_6, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__weight_norm_interface_0[grid(4)](primals_2, buf0,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(16)](primals_2,
primals_1, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf2 = extern_kernels.convolution(reinterpret_tensor(primals_4, (1,
4, 4), (16, 4, 1), 0), buf1, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf2, (1, 4, 4), (16, 4, 1))
buf3 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_0[grid(4)](primals_6, buf3,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(16)](primals_6,
primals_5, buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf5 = reinterpret_tensor(buf2, (4, 4), (4, 1), 0)
del buf2
buf10 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(16)](buf5,
primals_3, buf10, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
buf6 = extern_kernels.convolution(reinterpret_tensor(buf5, (1, 4, 4
), (0, 4, 1), 0), buf4, stride=(1,), padding=(0,), dilation=(1,
), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf6, (1, 4, 4), (16, 4, 1))
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
buf8 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_add_relu_threshold_backward_3[grid(16)](buf6,
primals_7, primals_4, buf7, buf9, buf8, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf6
del primals_7
return (buf7, buf1, buf4, primals_1, primals_2, primals_5, primals_6,
buf0, buf1, reinterpret_tensor(primals_4, (1, 4, 4), (16, 4, 1), 0),
buf3, buf4, reinterpret_tensor(buf5, (1, 4, 4), (16, 4, 1), 0),
buf8, buf9, buf10)
class K1TemporalBlockNew(nn.Module):
def __init__(self, n_inputs, n_outputs, dropout=0.2):
super(K1TemporalBlockNew, self).__init__()
self.conv1 = weight_norm(nn.Conv1d(n_inputs, n_outputs, 1))
self.relu1 = nn.ReLU()
self.dropout1 = nn.Dropout(dropout)
self.conv2 = weight_norm(nn.Conv1d(n_outputs, n_outputs, 1))
self.relu2 = nn.ReLU()
self.dropout2 = nn.Dropout(dropout)
self.net = nn.Sequential(self.conv1, self.relu1, self.dropout1,
self.conv2, self.relu2, self.dropout2)
self.downsample = nn.Conv1d(n_inputs, n_outputs, 1
) if n_inputs != n_outputs else None
self.relu = nn.ReLU()
self.init_weights()
def init_weights(self):
self.conv1.weight.data.normal_(0, 0.01)
self.conv2.weight.data.normal_(0, 0.01)
if self.downsample is not None:
self.downsample.weight.data.normal_(0, 0.01)
def forward(self, input_0):
primals_3 = self.conv1.bias
primals_1 = self.conv1.weight_g
primals_2 = self.conv1.weight_v
primals_7 = self.conv2.bias
primals_5 = self.conv2.weight_g
primals_6 = self.conv2.weight_v
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
whdc/TCN
|
K1TemporalBlock
| false
| 10,980
|
[
"MIT"
] | 0
|
182a57da7790a8ddb3a94cc3c33e1476551e0b54
|
https://github.com/whdc/TCN/tree/182a57da7790a8ddb3a94cc3c33e1476551e0b54
|
PositionwiseFeedForward
|
import torch
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
class Identity(nn.Module):
def forward(self, input_):
return input_
class LayerNormalization(nn.Module):
""" Layer normalization module """
def __init__(self, d_hid, eps=0.001):
super(LayerNormalization, self).__init__()
self.eps = eps
self.a_2 = nn.Parameter(torch.ones(d_hid), requires_grad=True)
self.b_2 = nn.Parameter(torch.zeros(d_hid), requires_grad=True)
def forward(self, z):
if z.size(1) == 1:
return z
mu = torch.mean(z, keepdim=True, dim=-1)
sigma = torch.std(z, keepdim=True, dim=-1)
ln_out = (z - mu.expand_as(z)) / (sigma.expand_as(z) + self.eps)
ln_out = ln_out * self.a_2.expand_as(ln_out) + self.b_2.expand_as(
ln_out)
return ln_out
class PositionwiseFeedForward(nn.Module):
""" A two-feed-forward-layer module """
def __init__(self, d_hid, d_inner_hid, dropout=0.1, layer_norm=True):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Conv1d(d_hid, d_inner_hid, 1)
self.w_2 = nn.Conv1d(d_inner_hid, d_hid, 1)
self.layer_norm = LayerNormalization(d_hid
) if layer_norm else Identity()
self.dropout = nn.Dropout(dropout)
self.relu = nn.ReLU()
def forward(self, x):
residual = x
output = self.relu(self.w_1(x.transpose(1, 2)))
output = self.w_2(output).transpose(2, 1)
output = self.dropout(output)
return self.layer_norm(output + residual)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'d_hid': 4, 'd_inner_hid': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_mean_std_3(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp4 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp8 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = 3.0
tmp29 = tmp27 / tmp28
tl.store(in_out_ptr0 + x2, tmp29, xmask)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_add_div_mul_sub_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, in_ptr5, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr,
XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask & ymask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x2 + 4 * y1), xmask & ymask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr3 + (x2 + 4 * y1), xmask & ymask, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr4 + y0, ymask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = libdevice.sqrt(tmp5)
tmp7 = 0.001
tmp8 = tmp6 + tmp7
tmp9 = tmp4 / tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2 + 4 * y3), tmp13, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4), (16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_relu_1[grid(64)](buf2, primals_3, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4), (16, 4, 1))
buf4 = buf3
del buf3
triton_poi_fused_convolution_2[grid(64)](buf4, primals_5, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf6 = buf5
del buf5
buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_mean_std_3[grid(16)](buf6, buf4, primals_1,
buf7, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf8 = reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0)
del buf0
triton_poi_fused_add_div_mul_sub_4[grid(16, 4)](buf4, primals_1,
buf7, buf6, primals_6, primals_7, buf8, 16, 4, XBLOCK=4, YBLOCK
=16, num_warps=1, num_stages=1)
del buf6
del buf7
del primals_7
return buf8, primals_1, primals_2, primals_4, primals_6, buf2, buf4
class Identity(nn.Module):
def forward(self, input_):
return input_
class LayerNormalization(nn.Module):
""" Layer normalization module """
def __init__(self, d_hid, eps=0.001):
super(LayerNormalization, self).__init__()
self.eps = eps
self.a_2 = nn.Parameter(torch.ones(d_hid), requires_grad=True)
self.b_2 = nn.Parameter(torch.zeros(d_hid), requires_grad=True)
def forward(self, z):
if z.size(1) == 1:
return z
mu = torch.mean(z, keepdim=True, dim=-1)
sigma = torch.std(z, keepdim=True, dim=-1)
ln_out = (z - mu.expand_as(z)) / (sigma.expand_as(z) + self.eps)
ln_out = ln_out * self.a_2.expand_as(ln_out) + self.b_2.expand_as(
ln_out)
return ln_out
class PositionwiseFeedForwardNew(nn.Module):
""" A two-feed-forward-layer module """
def __init__(self, d_hid, d_inner_hid, dropout=0.1, layer_norm=True):
super(PositionwiseFeedForwardNew, self).__init__()
self.w_1 = nn.Conv1d(d_hid, d_inner_hid, 1)
self.w_2 = nn.Conv1d(d_inner_hid, d_hid, 1)
self.layer_norm = LayerNormalization(d_hid
) if layer_norm else Identity()
self.dropout = nn.Dropout(dropout)
self.relu = nn.ReLU()
def forward(self, input_0):
primals_2 = self.w_1.weight
primals_3 = self.w_1.bias
primals_4 = self.w_2.weight
primals_5 = self.w_2.bias
primals_6 = self.layer_norm.a_2
primals_7 = self.layer_norm.b_2
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
krodyush/training_extensions
|
PositionwiseFeedForward
| false
| 10,981
|
[
"Apache-2.0"
] | 0
|
542f4004dfbc6fc62a622065367ba4f85a703dd3
|
https://github.com/krodyush/training_extensions/tree/542f4004dfbc6fc62a622065367ba4f85a703dd3
|
StateInitZero
|
import torch
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
class StateInitZero(nn.Module):
def __init__(self, hidden_size, num_layers=1, batch_first=False):
super(StateInitZero, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.batch_first = batch_first
def forward(self, input: 'torch.Tensor'):
h0 = input.new_zeros((self.num_layers, input.size(0 if self.
batch_first else 1), self.hidden_size))
c0 = input.new_zeros((self.num_layers, input.size(0 if self.
batch_first else 1), self.hidden_size))
return h0, c0
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_new_zeros_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + x0, tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_new_zeros_0[grid(16)](buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_new_zeros_0[grid(16)](buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
return buf0, buf1
class StateInitZeroNew(nn.Module):
def __init__(self, hidden_size, num_layers=1, batch_first=False):
super(StateInitZeroNew, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.batch_first = batch_first
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0], output[1]
|
krodyush/training_extensions
|
StateInitZero
| false
| 10,982
|
[
"Apache-2.0"
] | 0
|
542f4004dfbc6fc62a622065367ba4f85a703dd3
|
https://github.com/krodyush/training_extensions/tree/542f4004dfbc6fc62a622065367ba4f85a703dd3
|
CustomLSTMCell
|
import torch
import torch.nn as nn
class CustomLSTMCell(nn.Module):
def __init__(self, input_size, hidden_size):
super().__init__()
self.lstm = nn.LSTMCell(input_size, hidden_size)
def forward(self, x):
output = self.lstm(x)
return output[0]
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_zeros_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + x0, tmp0, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (16, 4), (4, 1))
assert_size_stride(primals_3, (16, 4), (4, 1))
assert_size_stride(primals_4, (16,), (1,))
assert_size_stride(primals_5, (16,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_zeros_0[grid(16)](buf0, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf1 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 16),
(1, 4), 0), out=buf1)
del primals_2
buf2 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (4, 16), (1,
4), 0), out=buf2)
del primals_3
buf3 = torch.ops.aten._thnn_fused_lstm_cell.default(buf1, buf2,
buf0, primals_4, primals_5)
del buf1
del buf2
del primals_4
del primals_5
buf4 = buf3[0]
buf5 = buf3[1]
buf6 = buf3[2]
del buf3
return buf4, primals_1, buf0, buf5, buf6
class CustomLSTMCellNew(nn.Module):
def __init__(self, input_size, hidden_size):
super().__init__()
self.lstm = nn.LSTMCell(input_size, hidden_size)
def forward(self, input_0):
primals_2 = self.lstm.weight_ih
primals_3 = self.lstm.weight_hh
primals_4 = self.lstm.bias_ih
primals_5 = self.lstm.bias_hh
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
vr100/rl-trading
|
CustomLSTMCell
| false
| 10,983
|
[
"MIT"
] | 0
|
0e3383e383bdfd46c40df65f3c709ba88169153c
|
https://github.com/vr100/rl-trading/tree/0e3383e383bdfd46c40df65f3c709ba88169153c
|
GateAddNorm
|
import torch
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
class GatedLinearUnit(nn.Module):
def __init__(self, input_size, output_size, dropout=0):
super().__init__()
self.dropout = nn.Dropout(dropout)
self.w4 = nn.Linear(input_size, output_size)
self.w5 = nn.Linear(input_size, output_size)
self.act = nn.Sigmoid()
def forward(self, x):
x = self.dropout(x)
x = self.act(self.w4(x)) * self.w5(x)
return x
class GateAddNorm(nn.Module):
def __init__(self, input_size, output_size, dropout):
super().__init__()
self.glu = GatedLinearUnit(input_size, output_size, dropout)
self.norm = nn.LayerNorm(output_size)
def forward(self, x, skip):
return self.norm(self.glu(x) + skip)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'output_size': 4, 'dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_native_layer_norm_sigmoid_0(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp24 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp5 = tmp3 + tmp4
tmp7 = tl.sigmoid(tmp6)
tmp9 = tmp7 * tmp8
tmp11 = tmp9 + tmp10
tmp12 = tmp5 + tmp11
tmp14 = tl.sigmoid(tmp13)
tmp16 = tmp14 * tmp15
tmp18 = tmp16 + tmp17
tmp19 = tmp12 + tmp18
tmp21 = tl.sigmoid(tmp20)
tmp23 = tmp21 * tmp22
tmp25 = tmp23 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + x0, tmp28, xmask)
tl.store(out_ptr1 + x0, tmp40, xmask)
@triton.jit
def triton_poi_fused_add_mul_native_layer_norm_sigmoid_1(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x2, xmask)
tmp6 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 - tmp6
tmp9 = 1e-05
tmp10 = tmp8 + tmp9
tmp11 = libdevice.rsqrt(tmp10)
tmp12 = tmp7 * tmp11
tmp14 = tmp12 * tmp13
tmp16 = tmp14 + tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64,
4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_native_layer_norm_sigmoid_0[grid(64)](buf0,
buf1, primals_6, buf2, buf3, 64, XBLOCK=64, num_warps=1,
num_stages=1)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_native_layer_norm_sigmoid_1[grid(256)](buf0,
buf1, primals_6, buf2, buf3, primals_7, primals_8, buf4, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del buf2
del buf3
del primals_8
return buf4, primals_6, primals_7, reinterpret_tensor(primals_1, (64, 4
), (4, 1), 0), buf0, buf1
class GatedLinearUnit(nn.Module):
def __init__(self, input_size, output_size, dropout=0):
super().__init__()
self.dropout = nn.Dropout(dropout)
self.w4 = nn.Linear(input_size, output_size)
self.w5 = nn.Linear(input_size, output_size)
self.act = nn.Sigmoid()
def forward(self, x):
x = self.dropout(x)
x = self.act(self.w4(x)) * self.w5(x)
return x
class GateAddNormNew(nn.Module):
def __init__(self, input_size, output_size, dropout):
super().__init__()
self.glu = GatedLinearUnit(input_size, output_size, dropout)
self.norm = nn.LayerNorm(output_size)
def forward(self, input_0, input_1):
primals_2 = self.glu.w4.weight
primals_3 = self.glu.w4.bias
primals_4 = self.glu.w5.weight
primals_5 = self.glu.w5.bias
primals_7 = self.norm.weight
primals_8 = self.norm.bias
primals_1 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
|
krodyush/training_extensions
|
GateAddNorm
| false
| 10,984
|
[
"Apache-2.0"
] | 0
|
542f4004dfbc6fc62a622065367ba4f85a703dd3
|
https://github.com/krodyush/training_extensions/tree/542f4004dfbc6fc62a622065367ba4f85a703dd3
|
SpatialAttention
|
import torch
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
class SpatialAttention(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.activation = nn.Sigmoid()
self.maxpool = nn.MaxPool2d((1, in_channels))
self.avgpool = nn.AvgPool2d((1, in_channels))
self.conv = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=7,
padding=3)
def forward(self, x):
maxpool = self.maxpool(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
avgpool = self.avgpool(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
convolved = self.conv(maxpool + avgpool)
out = self.activation(convolved)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 + tmp0
tmp8 = tmp3 + tmp7
tmp9 = tmp5 + tmp8
tmp10 = 0.25
tmp11 = tmp9 * tmp10
tmp12 = tmp6 + tmp11
tl.store(out_ptr0 + x2, tmp12, xmask)
@triton.jit
def triton_poi_fused_convolution_sigmoid_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 1, 7, 7), (49, 49, 7, 1))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 1, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 4, 4), (16, 1, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_sigmoid_1[grid(64)](buf2, primals_3,
64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
return buf2, primals_2, buf0, buf2
class SpatialAttentionNew(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.activation = nn.Sigmoid()
self.maxpool = nn.MaxPool2d((1, in_channels))
self.avgpool = nn.AvgPool2d((1, in_channels))
self.conv = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=7,
padding=3)
def forward(self, input_0):
primals_2 = self.conv.weight
primals_3 = self.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
krodyush/training_extensions
|
SpatialAttention
| false
| 10,985
|
[
"Apache-2.0"
] | 0
|
542f4004dfbc6fc62a622065367ba4f85a703dd3
|
https://github.com/krodyush/training_extensions/tree/542f4004dfbc6fc62a622065367ba4f85a703dd3
|
LogitKLDivLoss
|
import torch
from torch.nn import functional as F
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
class LogitKLDivLoss(nn.Module):
"""Kullback–Leibler divergence loss. Inputs predicted and ground truth logits.
Args:
T (float): Softmax temperature.
"""
def __init__(self, T=1):
super().__init__()
self.T = T
def forward(self, p_logits, q_logits, **kwargs):
log_p = F.log_softmax(p_logits / self.T, dim=1)
q = F.softmax(q_logits / self.T, dim=1)
return F.kl_div(log_p, q, reduction='batchmean') * self.T ** 2
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tl.store(out_ptr0 + x3, tmp15, xmask)
@triton.jit
def triton_per_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = rindex // 64
tmp0 = tl.load(in_ptr0 + r3, None)
tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last'
)
tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr1 + r3, None)
tmp18 = tl.load(in_ptr1 + (r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr1 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr1 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp26 = tl.load(in_ptr1 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = libdevice.isnan(tmp8).to(tl.int1)
tmp10 = 0.0
tmp11 = tmp8 == tmp10
tmp12 = tl_math.log(tmp8)
tmp13 = tmp8 * tmp12
tmp14 = tl.where(tmp11, tmp10, tmp13)
tmp15 = float('nan')
tmp16 = tl.where(tmp9, tmp15, tmp14)
tmp19 = tl_math.exp(tmp18)
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tl_math.log(tmp28)
tmp30 = tmp17 - tmp29
tmp31 = tmp8 * tmp30
tmp32 = tmp16 - tmp31
tmp33 = tl.broadcast_to(tmp32, [RBLOCK])
tmp35 = triton_helpers.promote_to_tensor(tl.sum(tmp33, 0))
tmp36 = 0.25
tmp37 = tmp35 * tmp36
tmp38 = 1.0
tmp39 = tmp37 * tmp38
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp39, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_1[grid(256)](arg0_1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3
del buf3
triton_per_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2[grid(1)
](buf4, buf0, buf2, 1, 256, num_warps=2, num_stages=1)
del buf0
del buf2
return buf4,
class LogitKLDivLossNew(nn.Module):
"""Kullback–Leibler divergence loss. Inputs predicted and ground truth logits.
Args:
T (float): Softmax temperature.
"""
def __init__(self, T=1):
super().__init__()
self.T = T
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
krodyush/training_extensions
|
LogitKLDivLoss
| false
| 10,986
|
[
"Apache-2.0"
] | 0
|
542f4004dfbc6fc62a622065367ba4f85a703dd3
|
https://github.com/krodyush/training_extensions/tree/542f4004dfbc6fc62a622065367ba4f85a703dd3
|
ResBlock
|
import torch
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
class ResBlock(nn.Module):
def __init__(self, num_of_channels):
super(ResBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels=num_of_channels, out_channels=
num_of_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.in1 = nn.InstanceNorm2d(num_of_channels, affine=True)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(in_channels=num_of_channels, out_channels=
num_of_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.in2 = nn.InstanceNorm2d(num_of_channels, affine=True)
def forward(self, x):
orig = x
output = self.relu(self.in1(self.conv1(x)))
output = self.in2(self.conv2(output))
output = torch.add(output, orig)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_of_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__native_batch_norm_legit_relu_repeat_0(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3, out_ptr4, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
x0 = xindex
r1 = rindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + x0 % 4, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0)
tmp26 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tl.where(xmask, tmp2, 0)
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp5, 0)
tmp8 = tl.sum(tmp7, 1)[:, None]
tmp9 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp8 / tmp10
tmp12 = tmp2 - tmp11
tmp13 = tmp12 * tmp12
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = tl.where(xmask, tmp14, 0)
tmp17 = tl.sum(tmp16, 1)[:, None]
tmp18 = tmp1 - tmp11
tmp19 = 16.0
tmp20 = tmp17 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tmp24 = tmp18 * tmp23
tmp25 = tmp24 * tmp0
tmp27 = tmp25 + tmp26
tmp28 = tl.full([1, 1], 0, tl.int32)
tmp29 = triton_helpers.maximum(tmp28, tmp27)
tl.store(out_ptr0 + x0, tmp0, xmask)
tl.store(out_ptr3 + (r1 + 16 * x0), tmp29, xmask)
tl.store(out_ptr4 + x0, tmp23, xmask)
tl.store(out_ptr1 + x0, tmp11, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_repeat_1(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr3, out_ptr4, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
x0 = xindex
r1 = rindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + x0 % 4, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0)
tmp26 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr3 + (r1 + 16 * x0), xmask, other=0.0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tl.where(xmask, tmp2, 0)
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp5, 0)
tmp8 = tl.sum(tmp7, 1)[:, None]
tmp9 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp8 / tmp10
tmp12 = tmp2 - tmp11
tmp13 = tmp12 * tmp12
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = tl.where(xmask, tmp14, 0)
tmp17 = tl.sum(tmp16, 1)[:, None]
tmp18 = tmp1 - tmp11
tmp19 = 16.0
tmp20 = tmp17 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tmp24 = tmp18 * tmp23
tmp25 = tmp24 * tmp0
tmp27 = tmp25 + tmp26
tmp29 = tmp27 + tmp28
tl.store(out_ptr0 + x0, tmp0, xmask)
tl.store(out_ptr3 + (r1 + 16 * x0), tmp29, xmask)
tl.store(out_ptr4 + x0, tmp23, xmask)
tl.store(out_ptr1 + x0, tmp11, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((16,), (1,), torch.float32)
buf2 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_relu_repeat_0[grid(16)](
primals_3, buf0, primals_4, buf1, buf2, buf6, buf5, 16, 16,
XBLOCK=8, num_warps=2, num_stages=1)
del primals_3
del primals_4
buf7 = extern_kernels.convolution(buf6, primals_5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 4, 4, 4), (64, 16, 4, 1))
buf8 = empty_strided_cuda((16,), (1,), torch.float32)
buf9 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf12 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.
float32)
triton_per_fused__native_batch_norm_legit_add_repeat_1[grid(16)](
primals_6, buf7, primals_7, primals_1, buf8, buf9, buf13, buf12,
16, 16, XBLOCK=8, num_warps=2, num_stages=1)
del primals_6
del primals_7
return (buf13, primals_1, primals_2, primals_5, buf0, buf1,
reinterpret_tensor(buf5, (16,), (1,), 0), buf6, buf7, buf8,
reinterpret_tensor(buf12, (16,), (1,), 0), reinterpret_tensor(buf9,
(1, 16, 1, 1), (16, 1, 1, 1), 0), reinterpret_tensor(buf2, (1, 16,
1, 1), (16, 1, 1, 1), 0))
class ResBlockNew(nn.Module):
def __init__(self, num_of_channels):
super(ResBlockNew, self).__init__()
self.conv1 = nn.Conv2d(in_channels=num_of_channels, out_channels=
num_of_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.in1 = nn.InstanceNorm2d(num_of_channels, affine=True)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(in_channels=num_of_channels, out_channels=
num_of_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.in2 = nn.InstanceNorm2d(num_of_channels, affine=True)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.in1.weight
primals_4 = self.in1.bias
primals_5 = self.conv2.weight
primals_6 = self.in2.weight
primals_7 = self.in2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
krodyush/training_extensions
|
ResBlock
| false
| 10,987
|
[
"Apache-2.0"
] | 0
|
542f4004dfbc6fc62a622065367ba4f85a703dd3
|
https://github.com/krodyush/training_extensions/tree/542f4004dfbc6fc62a622065367ba4f85a703dd3
|
DQN_RAM
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class DQN_RAM(nn.Module):
def __init__(self, in_features=4, num_actions=18):
"""
Initialize a deep Q-learning network for testing algorithm
in_features: number of features of input.
num_actions: number of action-value to output, one-to-one correspondence to action in game.
"""
super(DQN_RAM, self).__init__()
self.fc1 = nn.Linear(in_features, 256)
self.fc2 = nn.Linear(256, 128)
self.fc3 = nn.Linear(128, 64)
self.fc4 = nn.Linear(64, num_actions)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
return self.fc4(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (128, 256), (256, 1))
assert_size_stride(primals_5, (128,), (1,))
assert_size_stride(primals_6, (64, 128), (128, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (18, 64), (64, 1))
assert_size_stride(primals_9, (18,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf0
buf9 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf1,
primals_2, buf9, 16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0),
reinterpret_tensor(primals_4, (256, 128), (1, 256), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf2
buf8 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(8192)](buf3,
primals_5, buf8, 8192, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 128), (128, 1), 0),
reinterpret_tensor(primals_6, (128, 64), (1, 128), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf4
buf7 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool
)
triton_poi_fused_relu_threshold_backward_2[grid(4096)](buf5,
primals_7, buf7, 4096, XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((64, 18), (18, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 64),
(64, 1), 0), reinterpret_tensor(primals_8, (64, 18), (1, 64), 0
), alpha=1, beta=1, out=buf6)
del primals_9
return reinterpret_tensor(buf6, (4, 4, 4, 18), (288, 72, 18, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 256), (256, 1), 0
), reinterpret_tensor(buf3, (64, 128), (128, 1), 0
), reinterpret_tensor(buf5, (64, 64), (64, 1), 0
), primals_8, buf7, primals_6, buf8, primals_4, buf9
class DQN_RAMNew(nn.Module):
def __init__(self, in_features=4, num_actions=18):
"""
Initialize a deep Q-learning network for testing algorithm
in_features: number of features of input.
num_actions: number of action-value to output, one-to-one correspondence to action in game.
"""
super(DQN_RAMNew, self).__init__()
self.fc1 = nn.Linear(in_features, 256)
self.fc2 = nn.Linear(256, 128)
self.fc3 = nn.Linear(128, 64)
self.fc4 = nn.Linear(64, num_actions)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_8 = self.fc4.weight
primals_9 = self.fc4.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
yepw/DQN-Atari
|
DQN_RAM
| false
| 10,988
|
[
"MIT"
] | 0
|
4ea9f687cbfdbc25a241e9b8f26b86d56291278b
|
https://github.com/yepw/DQN-Atari/tree/4ea9f687cbfdbc25a241e9b8f26b86d56291278b
|
CategoricalPolicyTwoLayer
|
import torch
import torch.nn.functional as F
import torch.distributions as td
import torch.nn as nn
class PolicyNetwork(nn.Module):
"""Base class for stochastic policy networks."""
def __init__(self):
super().__init__()
def forward(self, state):
"""Take state as input, then output the parameters of the policy."""
raise NotImplementedError('forward not implemented.')
def sample(self, state):
"""
Sample an action based on the model parameters given the current state.
"""
raise NotImplementedError('sample not implemented.')
class CategoricalPolicy(PolicyNetwork):
"""
Base class for categorical policy.
Desired network needs to be implemented.
"""
def __init__(self, state_dim, num_actions):
super().__init__()
self.state_dim = state_dim
self.num_actions = num_actions
def sample(self, state, no_log_prob=False):
probs = self.forward(state)
dist = td.Categorical(probs)
action = dist.sample(sample_shape=torch.tensor([1]))
return action if no_log_prob else (action, dist.log_prob(action))
class CategoricalPolicyTwoLayer(CategoricalPolicy):
"""
Categorical policy using a fully connected two-layer network with ReLU
activation to generate the parameters of the categorical distribution.
"""
def __init__(self, state_dim, num_actions, hidden_layer1_size=256,
hidden_layer2_size=256, init_std=0.01):
super().__init__(state_dim, num_actions)
self.init_std = init_std
self.linear1 = nn.Linear(state_dim, hidden_layer1_size)
self.linear2 = nn.Linear(hidden_layer1_size, hidden_layer2_size)
self.linear3 = nn.Linear(hidden_layer2_size, num_actions)
nn.init.normal_(self.linear1.weight, std=self.init_std)
nn.init.normal_(self.linear2.weight, std=self.init_std)
nn.init.normal_(self.linear3.weight, std=self.init_std)
def forward(self, state):
x = F.relu(self.linear1(state))
x = F.relu(self.linear2(x))
output = F.relu(self.linear3(x))
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4, 'num_actions': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.distributions as td
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (256, 256), (256, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (4, 256), (256, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf0
buf8 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf1,
primals_2, buf8, 16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0),
reinterpret_tensor(primals_4, (256, 256), (1, 256), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf2
buf7 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf3,
primals_5, buf7, 16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 256), (256, 1), 0),
reinterpret_tensor(primals_6, (256, 4), (1, 256), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(256)](buf5,
primals_7, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 256), (256, 1), 0
), reinterpret_tensor(buf3, (64, 256), (256, 1), 0
), buf6, primals_6, buf7, primals_4, buf8
class PolicyNetwork(nn.Module):
"""Base class for stochastic policy networks."""
def __init__(self):
super().__init__()
def forward(self, state):
"""Take state as input, then output the parameters of the policy."""
raise NotImplementedError('forward not implemented.')
def sample(self, state):
"""
Sample an action based on the model parameters given the current state.
"""
raise NotImplementedError('sample not implemented.')
class CategoricalPolicy(PolicyNetwork):
"""
Base class for categorical policy.
Desired network needs to be implemented.
"""
def __init__(self, state_dim, num_actions):
super().__init__()
self.state_dim = state_dim
self.num_actions = num_actions
def sample(self, state, no_log_prob=False):
probs = self.forward(state)
dist = td.Categorical(probs)
action = dist.sample(sample_shape=torch.tensor([1]))
return action if no_log_prob else (action, dist.log_prob(action))
class CategoricalPolicyTwoLayerNew(CategoricalPolicy):
"""
Categorical policy using a fully connected two-layer network with ReLU
activation to generate the parameters of the categorical distribution.
"""
def __init__(self, state_dim, num_actions, hidden_layer1_size=256,
hidden_layer2_size=256, init_std=0.01):
super().__init__(state_dim, num_actions)
self.init_std = init_std
self.linear1 = nn.Linear(state_dim, hidden_layer1_size)
self.linear2 = nn.Linear(hidden_layer1_size, hidden_layer2_size)
self.linear3 = nn.Linear(hidden_layer2_size, num_actions)
nn.init.normal_(self.linear1.weight, std=self.init_std)
nn.init.normal_(self.linear2.weight, std=self.init_std)
nn.init.normal_(self.linear3.weight, std=self.init_std)
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_6 = self.linear3.weight
primals_7 = self.linear3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
wessle/costaware
|
CategoricalPolicyTwoLayer
| false
| 10,989
|
[
"MIT"
] | 0
|
151502308411528eaa703d353d138fc809e59d8e
|
https://github.com/wessle/costaware/tree/151502308411528eaa703d353d138fc809e59d8e
|
Mask
|
import torch
import torch.nn as nn
import torch.utils.data
class Mask(nn.Module):
def forward(self, seq, mask):
seq_mask = torch.unsqueeze(mask, 2)
seq_mask = torch.transpose(seq_mask.repeat(1, 1, seq.size()[1]), 1, 2)
return seq.where(torch.eq(seq_mask, 1), torch.zeros_like(seq))
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_eq_where_zeros_like_0(in_ptr0, in_ptr1, out_ptr0,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y1 = yindex // 4
y0 = yindex % 4
tmp0 = tl.load(in_ptr0 + (x2 + 4 * y1), xmask & ymask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr1 + (x2 + 4 * y0), xmask & ymask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 == tmp1
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tl.store(out_ptr0 + (y0 + 4 * x2 + 16 * y1), tmp5, xmask & ymask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_eq_where_zeros_like_0[grid(16, 4)](arg0_1, arg1_1,
buf0, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class MaskNew(nn.Module):
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
pkuyym/nni
|
Mask
| false
| 10,990
|
[
"MIT"
] | 0
|
fe533e3bc65ea27997e16250adb503638548d500
|
https://github.com/pkuyym/nni/tree/fe533e3bc65ea27997e16250adb503638548d500
|
LinearARD
|
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import Parameter
class LinearARD(nn.Module):
"""
Dense layer implementation with weights ARD-prior (arxiv:1701.05369)
"""
def __init__(self, in_features, out_features, bias=True, thresh=3,
ard_init=-10):
super(LinearARD, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(out_features, in_features))
self.thresh = thresh
if bias:
self.bias = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.ard_init = ard_init
self.log_sigma2 = Parameter(torch.Tensor(out_features, in_features))
self.reset_parameters()
def forward(self, input):
if self.training:
W_mu = F.linear(input, self.weight)
std_w = torch.exp(self.log_alpha).permute(1, 0)
W_std = torch.sqrt(input.pow(2).matmul(std_w * self.weight.
permute(1, 0) ** 2) + 1e-15)
epsilon = W_std.new(W_std.shape).normal_()
output = W_mu + W_std * epsilon
output += self.bias
else:
W = self.weights_clipped
output = F.linear(input, W) + self.bias
return output
@property
def weights_clipped(self):
clip_mask = self.get_clip_mask()
return torch.where(clip_mask, torch.zeros_like(self.weight), self.
weight)
def reset_parameters(self):
self.weight.data.normal_(0, 0.02)
if self.bias is not None:
self.bias.data.zero_()
self.log_sigma2.data.fill_(self.ard_init)
def get_clip_mask(self):
log_alpha = self.log_alpha
return torch.ge(log_alpha, self.thresh)
def get_reg(self, **kwargs):
"""
Get weights regularization (KL(q(w)||p(w)) approximation)
"""
k1, k2, k3 = 0.63576, 1.8732, 1.48695
C = -k1
mdkl = k1 * torch.sigmoid(k2 + k3 * self.log_alpha
) - 0.5 * torch.log1p(torch.exp(-self.log_alpha)) + C
return -torch.sum(mdkl)
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(self.
in_features, self.out_features, self.bias is not None)
def get_dropped_params_cnt(self):
"""
Get number of dropped weights (with log alpha greater than "thresh" parameter)
:returns (number of dropped weights, number of all weight)
"""
return self.get_clip_mask().sum().cpu().numpy()
@property
def log_alpha(self):
log_alpha = self.log_sigma2 - 2 * torch.log(torch.abs(self.weight) +
1e-15)
return torch.clamp(log_alpha, -10, 10)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
from torch.nn import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_abs_add_clamp_ge_log_mul_sub_where_zeros_like_0(in_ptr0,
in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tl_math.abs(tmp1)
tmp3 = 1e-15
tmp4 = tmp2 + tmp3
tmp5 = tl_math.log(tmp4)
tmp6 = 2.0
tmp7 = tmp5 * tmp6
tmp8 = tmp0 - tmp7
tmp9 = -10.0
tmp10 = triton_helpers.maximum(tmp8, tmp9)
tmp11 = 10.0
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tmp13 = 3.0
tmp14 = tmp12 >= tmp13
tmp15 = 0.0
tmp16 = tl.where(tmp14, tmp15, tmp1)
tl.store(out_ptr0 + x0, tmp14, xmask)
tl.store(out_ptr1 + x0, tmp16, xmask)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_abs_add_clamp_ge_log_mul_sub_where_zeros_like_0[grid
(16)](primals_1, primals_2, buf0, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_1
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2)
del buf1
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused_add_1[grid(256)](buf3, primals_4, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_4
return buf3, buf0, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0)
class LinearARDNew(nn.Module):
"""
Dense layer implementation with weights ARD-prior (arxiv:1701.05369)
"""
def __init__(self, in_features, out_features, bias=True, thresh=3,
ard_init=-10):
super(LinearARDNew, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(out_features, in_features))
self.thresh = thresh
if bias:
self.bias = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.ard_init = ard_init
self.log_sigma2 = Parameter(torch.Tensor(out_features, in_features))
self.reset_parameters()
@property
def weights_clipped(self):
clip_mask = self.get_clip_mask()
return torch.where(clip_mask, torch.zeros_like(self.weight), self.
weight)
def reset_parameters(self):
self.weight.data.normal_(0, 0.02)
if self.bias is not None:
self.bias.data.zero_()
self.log_sigma2.data.fill_(self.ard_init)
def get_clip_mask(self):
log_alpha = self.log_alpha
return torch.ge(log_alpha, self.thresh)
def get_reg(self, **kwargs):
"""
Get weights regularization (KL(q(w)||p(w)) approximation)
"""
k1, k2, k3 = 0.63576, 1.8732, 1.48695
C = -k1
mdkl = k1 * torch.sigmoid(k2 + k3 * self.log_alpha
) - 0.5 * torch.log1p(torch.exp(-self.log_alpha)) + C
return -torch.sum(mdkl)
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(self.
in_features, self.out_features, self.bias is not None)
def get_dropped_params_cnt(self):
"""
Get number of dropped weights (with log alpha greater than "thresh" parameter)
:returns (number of dropped weights, number of all weight)
"""
return self.get_clip_mask().sum().cpu().numpy()
@property
def log_alpha(self):
log_alpha = self.log_sigma2 - 2 * torch.log(torch.abs(self.weight) +
1e-15)
return torch.clamp(log_alpha, -10, 10)
def forward(self, input_0):
primals_1 = self.weight
primals_4 = self.bias
primals_2 = self.log_sigma2
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
x-zho14/pytorch_ard
|
LinearARD
| false
| 10,991
|
[
"MIT"
] | 0
|
5a9b790f33bf0340b2b3a2108c45d97786a2be86
|
https://github.com/x-zho14/pytorch_ard/tree/5a9b790f33bf0340b2b3a2108c45d97786a2be86
|
Net
|
import torch
from torch.nn import functional as F
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=3)
self.conv2 = nn.Conv2d(10, 20, kernel_size=3)
self.conv3 = nn.Conv2d(20, 50, kernel_size=3)
self.conv4 = nn.Conv2d(50, 2, kernel_size=1, bias=False, padding=0,
stride=1)
self.max_pool2d = nn.MaxPool2d((4, 4))
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2(x), 2))
x = self.conv3(x)
x = self.conv4(x)
x = self.max_pool2d(x)
x = self.softmax(x)
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 153760
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3844 % 10
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_1(in_ptr0, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 38440
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 31
x3 = xindex // 31
x2 = xindex // 9610
x4 = xindex % 9610
x5 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 124 * x3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 124 * x3), xmask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (62 + 2 * x0 + 124 * x3), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (63 + 2 * x0 + 124 * x3), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tl.store(out_ptr0 + (x4 + 9728 * x2), tmp15, xmask)
tl.store(out_ptr1 + x5, tmp18, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 67280
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 841 % 20
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_3(in_ptr0, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 15680
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 14
x1 = xindex // 14 % 14
x4 = xindex // 196
x3 = xindex // 3920
x5 = xindex % 3920
x6 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 58 * x1 + 841 * x4), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 58 * x1 + 841 * x4), xmask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (29 + 2 * x0 + 58 * x1 + 841 * x4), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (30 + 2 * x0 + 58 * x1 + 841 * x4), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tl.store(out_ptr0 + (x5 + 3968 * x3), tmp15, xmask)
tl.store(out_ptr1 + x6, tmp18, xmask)
@triton.jit
def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 28800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 144 % 50
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 72
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x1 = xindex // 3
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4 * x0 + 48 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0 + 48 * x1), xmask, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0 + 48 * x1), xmask, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0 + 48 * x1), xmask, eviction_policy
='evict_last')
tmp7 = tl.load(in_ptr0 + (12 + 4 * x0 + 48 * x1), xmask,
eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (13 + 4 * x0 + 48 * x1), xmask,
eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (14 + 4 * x0 + 48 * x1), xmask,
eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (15 + 4 * x0 + 48 * x1), xmask,
eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (24 + 4 * x0 + 48 * x1), xmask,
eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (25 + 4 * x0 + 48 * x1), xmask,
eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (26 + 4 * x0 + 48 * x1), xmask,
eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (27 + 4 * x0 + 48 * x1), xmask,
eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (36 + 4 * x0 + 48 * x1), xmask,
eviction_policy='evict_last')
tmp25 = tl.load(in_ptr0 + (37 + 4 * x0 + 48 * x1), xmask,
eviction_policy='evict_last')
tmp27 = tl.load(in_ptr0 + (38 + 4 * x0 + 48 * x1), xmask,
eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (39 + 4 * x0 + 48 * x1), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp20 = triton_helpers.maximum(tmp19, tmp18)
tmp22 = triton_helpers.maximum(tmp21, tmp20)
tmp24 = triton_helpers.maximum(tmp23, tmp22)
tmp26 = triton_helpers.maximum(tmp25, tmp24)
tmp28 = triton_helpers.maximum(tmp27, tmp26)
tmp30 = triton_helpers.maximum(tmp29, tmp28)
tmp31 = tmp1 > tmp0
tmp32 = tl.full([1], 1, tl.int8)
tmp33 = tl.full([1], 0, tl.int8)
tmp34 = tl.where(tmp31, tmp32, tmp33)
tmp35 = tmp3 > tmp2
tmp36 = tl.full([1], 2, tl.int8)
tmp37 = tl.where(tmp35, tmp36, tmp34)
tmp38 = tmp5 > tmp4
tmp39 = tl.full([1], 3, tl.int8)
tmp40 = tl.where(tmp38, tmp39, tmp37)
tmp41 = tmp7 > tmp6
tmp42 = tl.full([1], 4, tl.int8)
tmp43 = tl.where(tmp41, tmp42, tmp40)
tmp44 = tmp9 > tmp8
tmp45 = tl.full([1], 5, tl.int8)
tmp46 = tl.where(tmp44, tmp45, tmp43)
tmp47 = tmp11 > tmp10
tmp48 = tl.full([1], 6, tl.int8)
tmp49 = tl.where(tmp47, tmp48, tmp46)
tmp50 = tmp13 > tmp12
tmp51 = tl.full([1], 7, tl.int8)
tmp52 = tl.where(tmp50, tmp51, tmp49)
tmp53 = tmp15 > tmp14
tmp54 = tl.full([1], 8, tl.int8)
tmp55 = tl.where(tmp53, tmp54, tmp52)
tmp56 = tmp17 > tmp16
tmp57 = tl.full([1], 9, tl.int8)
tmp58 = tl.where(tmp56, tmp57, tmp55)
tmp59 = tmp19 > tmp18
tmp60 = tl.full([1], 10, tl.int8)
tmp61 = tl.where(tmp59, tmp60, tmp58)
tmp62 = tmp21 > tmp20
tmp63 = tl.full([1], 11, tl.int8)
tmp64 = tl.where(tmp62, tmp63, tmp61)
tmp65 = tmp23 > tmp22
tmp66 = tl.full([1], 12, tl.int8)
tmp67 = tl.where(tmp65, tmp66, tmp64)
tmp68 = tmp25 > tmp24
tmp69 = tl.full([1], 13, tl.int8)
tmp70 = tl.where(tmp68, tmp69, tmp67)
tmp71 = tmp27 > tmp26
tmp72 = tl.full([1], 14, tl.int8)
tmp73 = tl.where(tmp71, tmp72, tmp70)
tmp74 = tmp29 > tmp28
tmp75 = tl.full([1], 15, tl.int8)
tmp76 = tl.where(tmp74, tmp75, tmp73)
tl.store(out_ptr0 + x2, tmp30, xmask)
tl.store(out_ptr1 + x2, tmp76, xmask)
@triton.jit
def triton_poi_fused__softmax_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 72
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 9
x2 = xindex // 18
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 18 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (9 + x0 + 18 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp1 - tmp3
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp2 - tmp3
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tmp5 / tmp10
tl.store(out_ptr0 + x3, tmp11, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (10, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (10,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (20, 10, 3, 3), (90, 9, 3, 1))
assert_size_stride(primals_5, (20,), (1,))
assert_size_stride(primals_6, (50, 20, 3, 3), (180, 9, 3, 1))
assert_size_stride(primals_7, (50,), (1,))
assert_size_stride(primals_8, (2, 50, 1, 1), (50, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 10, 62, 62), (38440, 3844, 62, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(153760)](buf1, primals_2,
153760, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 10, 31, 31), (9728, 961, 31, 1),
torch.int8)
buf3 = empty_strided_cuda((4, 10, 31, 31), (9610, 961, 31, 1),
torch.float32)
triton_poi_fused_max_pool2d_with_indices_relu_1[grid(38440)](buf1,
buf2, buf3, 38440, XBLOCK=512, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 20, 29, 29), (16820, 841, 29, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_2[grid(67280)](buf5, primals_5, 67280,
XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 20, 14, 14), (3968, 196, 14, 1),
torch.int8)
buf7 = empty_strided_cuda((4, 20, 14, 14), (3920, 196, 14, 1),
torch.float32)
triton_poi_fused_max_pool2d_with_indices_relu_3[grid(15680)](buf5,
buf6, buf7, 15680, XBLOCK=128, num_warps=4, num_stages=1)
buf8 = extern_kernels.convolution(buf7, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 50, 12, 12), (7200, 144, 12, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_4[grid(28800)](buf9, primals_7, 28800,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf10 = extern_kernels.convolution(buf9, primals_8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 2, 12, 12), (288, 144, 12, 1))
buf11 = empty_strided_cuda((4, 2, 3, 3), (18, 9, 3, 1), torch.float32)
buf12 = empty_strided_cuda((4, 2, 3, 3), (18, 9, 3, 1), torch.int8)
triton_poi_fused_max_pool2d_with_indices_5[grid(72)](buf10, buf11,
buf12, 72, XBLOCK=128, num_warps=4, num_stages=1)
buf13 = empty_strided_cuda((4, 2, 3, 3), (18, 9, 3, 1), torch.float32)
triton_poi_fused__softmax_6[grid(72)](buf11, buf13, 72, XBLOCK=128,
num_warps=4, num_stages=1)
del buf11
return (buf13, primals_1, primals_3, primals_4, primals_6, primals_8,
buf1, buf2, buf3, buf5, buf6, buf7, buf9, buf10, buf12, buf13)
class NetNew(nn.Module):
def __init__(self):
super(NetNew, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=3)
self.conv2 = nn.Conv2d(10, 20, kernel_size=3)
self.conv3 = nn.Conv2d(20, 50, kernel_size=3)
self.conv4 = nn.Conv2d(50, 2, kernel_size=1, bias=False, padding=0,
stride=1)
self.max_pool2d = nn.MaxPool2d((4, 4))
self.softmax = nn.Softmax(dim=1)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
|
krodyush/training_extensions
|
Net
| false
| 10,992
|
[
"Apache-2.0"
] | 0
|
542f4004dfbc6fc62a622065367ba4f85a703dd3
|
https://github.com/krodyush/training_extensions/tree/542f4004dfbc6fc62a622065367ba4f85a703dd3
|
CFRB
|
import torch
from torch import nn
from collections import OrderedDict
import torch.nn.functional as F
def sequential(*args):
"""Advanced nn.Sequential.
Args:
nn.Sequential, nn.Module
Returns:
nn.Sequential
"""
if len(args) == 1:
if isinstance(args[0], OrderedDict):
raise NotImplementedError(
'sequential does not support OrderedDict input.')
return args[0]
modules = []
for module in args:
if isinstance(module, nn.Sequential):
for submodule in module.children():
modules.append(submodule)
elif isinstance(module, nn.Module):
modules.append(module)
return nn.Sequential(*modules)
def conv(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=
1, bias=True, mode='CBR', negative_slope=0.2):
L = []
for t in mode:
if t == 'C':
L.append(nn.Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=kernel_size, stride=stride,
padding=padding, bias=bias))
elif t == 'S':
L.append(nn.utils.spectral_norm(nn.Conv2d(in_channels=
in_channels, out_channels=out_channels, kernel_size=
kernel_size, stride=stride, padding=padding, bias=bias)))
elif t == 'T':
L.append(nn.ConvTranspose2d(in_channels=in_channels,
out_channels=out_channels, kernel_size=kernel_size, stride=
stride, padding=padding, bias=bias))
elif t == 'B':
L.append(nn.BatchNorm2d(out_channels, momentum=0.9, eps=0.0001))
elif t == 'I':
L.append(nn.InstanceNorm2d(out_channels, affine=True))
elif t == 'R':
L.append(nn.ReLU(inplace=True))
elif t == 'r':
L.append(nn.ReLU(inplace=False))
elif t == 'E':
L.append(nn.ELU(inplace=True))
elif t == 'E':
L.append(nn.ELU(inplace=False))
elif t == 'L':
L.append(nn.LeakyReLU(negative_slope=negative_slope, inplace=True))
elif t == 'l':
L.append(nn.LeakyReLU(negative_slope=negative_slope, inplace=False)
)
elif t == 's':
L.append(nn.Softplus())
elif t == 'G':
L.append(nn.Sigmoid())
elif t == 't':
L.append(nn.Tanh())
elif t == '2':
L.append(nn.PixelShuffle(upscale_factor=2))
elif t == '3':
L.append(nn.PixelShuffle(upscale_factor=3))
elif t == '4':
L.append(nn.PixelShuffle(upscale_factor=4))
elif t == 'U':
L.append(nn.Upsample(scale_factor=2, mode='nearest'))
elif t == 'u':
L.append(nn.Upsample(scale_factor=3, mode='nearest'))
elif t == 'v':
L.append(nn.Upsample(scale_factor=4, mode='nearest'))
elif t == 'M':
L.append(nn.MaxPool2d(kernel_size=kernel_size, stride=stride,
padding=0))
elif t == 'A':
L.append(nn.AvgPool2d(kernel_size=kernel_size, stride=stride,
padding=0))
else:
raise NotImplementedError('Undefined type: ')
return sequential(*L)
class ESA(nn.Module):
def __init__(self, channel=64, reduction=4, bias=True):
super(ESA, self).__init__()
self.r_nc = channel // reduction
self.conv1 = nn.Conv2d(channel, self.r_nc, kernel_size=1)
self.conv21 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=1)
self.conv2 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, stride=
2, padding=0)
self.conv3 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, padding=1)
self.conv4 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, padding=1)
self.conv5 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, padding=1)
self.conv6 = nn.Conv2d(self.r_nc, channel, kernel_size=1)
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x1 = self.conv1(x)
x2 = F.max_pool2d(self.conv2(x1), kernel_size=7, stride=3)
x2 = self.relu(self.conv3(x2))
x2 = self.relu(self.conv4(x2))
x2 = F.interpolate(self.conv5(x2), (x.size(2), x.size(3)), mode=
'bilinear', align_corners=False)
x2 = self.conv6(x2 + self.conv21(x1))
return x.mul(self.sigmoid(x2))
class CFRB(nn.Module):
def __init__(self, in_channels=50, out_channels=50, kernel_size=3,
stride=1, padding=1, bias=True, mode='CL', d_rate=0.5,
negative_slope=0.05):
super(CFRB, self).__init__()
self.d_nc = int(in_channels * d_rate)
self.r_nc = in_channels
assert mode[0] == 'C', 'convolutional layer first'
self.conv1_d = conv(in_channels, self.d_nc, kernel_size=1, stride=1,
padding=0, bias=bias, mode=mode[0])
self.conv1_r = conv(in_channels, self.r_nc, kernel_size, stride,
padding, bias=bias, mode=mode[0])
self.conv2_d = conv(self.r_nc, self.d_nc, kernel_size=1, stride=1,
padding=0, bias=bias, mode=mode[0])
self.conv2_r = conv(self.r_nc, self.r_nc, kernel_size, stride,
padding, bias=bias, mode=mode[0])
self.conv3_d = conv(self.r_nc, self.d_nc, kernel_size=1, stride=1,
padding=0, bias=bias, mode=mode[0])
self.conv3_r = conv(self.r_nc, self.r_nc, kernel_size, stride,
padding, bias=bias, mode=mode[0])
self.conv4_d = conv(self.r_nc, self.d_nc, kernel_size, stride,
padding, bias=bias, mode=mode[0])
self.conv1x1 = conv(self.d_nc * 4, out_channels, kernel_size=1,
stride=1, padding=0, bias=bias, mode=mode[0])
self.act = conv(mode=mode[-1], negative_slope=negative_slope)
self.esa = ESA(in_channels, reduction=4, bias=True)
def forward(self, x):
d1 = self.conv1_d(x)
x = self.act(self.conv1_r(x) + x)
d2 = self.conv2_d(x)
x = self.act(self.conv2_r(x) + x)
d3 = self.conv3_d(x)
x = self.act(self.conv3_r(x) + x)
x = self.conv4_d(x)
x = self.act(torch.cat([d1, d2, d3, x], dim=1))
x = self.esa(self.conv1x1(x))
return x
def get_inputs():
return [torch.rand([4, 50, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
from collections import OrderedDict
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 50
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = tmp4 > tmp5
tmp7 = 0.05
tmp8 = tmp4 * tmp7
tmp9 = tl.where(tmp6, tmp4, tmp8)
tl.store(in_out_ptr0 + x3, tmp9, None)
@triton.jit
def triton_poi_fused_cat_leaky_relu_1(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 100
x0 = xindex % 4096
x2 = xindex // 409600
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 25, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 102400 * x2), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 50, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tmp10 & tmp12
tmp14 = tl.load(in_ptr2 + (x0 + 4096 * (-25 + x1) + 102400 * x2), tmp13,
other=0.0)
tmp15 = tl.load(in_ptr3 + (-25 + x1), tmp13, eviction_policy=
'evict_last', other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp13, tmp16, tmp17)
tmp19 = tmp0 >= tmp11
tmp20 = tl.full([1], 75, tl.int64)
tmp21 = tmp0 < tmp20
tmp22 = tmp19 & tmp21
tmp23 = tl.load(in_ptr4 + (x0 + 4096 * (-50 + x1) + 102400 * x2), tmp22,
other=0.0)
tmp24 = tl.load(in_ptr5 + (-50 + x1), tmp22, eviction_policy=
'evict_last', other=0.0)
tmp25 = tmp23 + tmp24
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp22, tmp25, tmp26)
tmp28 = tmp0 >= tmp20
tl.full([1], 100, tl.int64)
tmp31 = tl.load(in_ptr6 + (x0 + 4096 * (-75 + x1) + 102400 * x2), tmp28,
other=0.0)
tmp32 = tl.load(in_ptr7 + (-75 + x1), tmp28, eviction_policy=
'evict_last', other=0.0)
tmp33 = tmp31 + tmp32
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp28, tmp33, tmp34)
tmp36 = tl.where(tmp22, tmp27, tmp35)
tmp37 = tl.where(tmp13, tmp18, tmp36)
tmp38 = tl.where(tmp4, tmp9, tmp37)
tmp39 = 0.0
tmp40 = tmp38 > tmp39
tmp41 = 0.05
tmp42 = tmp38 * tmp41
tmp43 = tl.where(tmp40, tmp38, tmp42)
tl.store(in_out_ptr0 + x3, tmp43, None)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 50
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 12
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 46128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 961 % 12
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 3888
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 81 % 12
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused__to_copy_6(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.140625
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tl.store(out_ptr0 + x0, tmp9, xmask)
@triton.jit
def triton_poi_fused_add_clamp_7(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.140625
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tl.full([1], 1, tl.int64)
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 8, tl.int64)
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_8(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.140625
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp8 - tmp10
tmp12 = triton_helpers.maximum(tmp11, tmp7)
tmp13 = 1.0
tmp14 = triton_helpers.minimum(tmp12, tmp13)
tl.store(out_ptr0 + x0, tmp14, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_mul_sub_9(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7,
in_ptr8, in_ptr9, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 64 % 64
x0 = xindex % 64
x5 = xindex // 4096
x2 = xindex // 4096 % 12
x6 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr7 + x1, None, eviction_policy='evict_last')
tmp37 = tl.load(in_ptr8 + x6, None)
tmp38 = tl.load(in_ptr9 + x2, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 9, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 9 * tmp4 + 81 * x5), None,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp13 = tmp12 + tmp1
tmp14 = tmp12 < 0
tmp15 = tl.where(tmp14, tmp13, tmp12)
tmp16 = tl.load(in_ptr2 + (tmp15 + 9 * tmp4 + 81 * x5), None,
eviction_policy='evict_last')
tmp17 = tmp16 + tmp10
tmp18 = tmp17 - tmp11
tmp20 = tmp18 * tmp19
tmp21 = tmp11 + tmp20
tmp23 = tmp22 + tmp1
tmp24 = tmp22 < 0
tmp25 = tl.where(tmp24, tmp23, tmp22)
tmp26 = tl.load(in_ptr2 + (tmp8 + 9 * tmp25 + 81 * x5), None,
eviction_policy='evict_last')
tmp27 = tmp26 + tmp10
tmp28 = tl.load(in_ptr2 + (tmp15 + 9 * tmp25 + 81 * x5), None,
eviction_policy='evict_last')
tmp29 = tmp28 + tmp10
tmp30 = tmp29 - tmp27
tmp31 = tmp30 * tmp19
tmp32 = tmp27 + tmp31
tmp33 = tmp32 - tmp21
tmp35 = tmp33 * tmp34
tmp36 = tmp21 + tmp35
tmp39 = tmp37 + tmp38
tmp40 = tmp36 + tmp39
tl.store(in_out_ptr0 + x6, tmp40, None)
@triton.jit
def triton_poi_fused_convolution_mul_sigmoid_10(in_out_ptr0, in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 50
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, None)
tmp2 = tmp0 + tmp1
tmp4 = tl.sigmoid(tmp2)
tmp5 = tmp3 * tmp4
tl.store(in_out_ptr0 + x3, tmp2, None)
tl.store(out_ptr0 + x3, tmp5, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31) = args
args.clear()
assert_size_stride(primals_1, (25, 50, 1, 1), (50, 1, 1, 1))
assert_size_stride(primals_2, (25,), (1,))
assert_size_stride(primals_3, (4, 50, 64, 64), (204800, 4096, 64, 1))
assert_size_stride(primals_4, (50, 50, 3, 3), (450, 9, 3, 1))
assert_size_stride(primals_5, (50,), (1,))
assert_size_stride(primals_6, (25, 50, 1, 1), (50, 1, 1, 1))
assert_size_stride(primals_7, (25,), (1,))
assert_size_stride(primals_8, (50, 50, 3, 3), (450, 9, 3, 1))
assert_size_stride(primals_9, (50,), (1,))
assert_size_stride(primals_10, (25, 50, 1, 1), (50, 1, 1, 1))
assert_size_stride(primals_11, (25,), (1,))
assert_size_stride(primals_12, (50, 50, 3, 3), (450, 9, 3, 1))
assert_size_stride(primals_13, (50,), (1,))
assert_size_stride(primals_14, (25, 50, 3, 3), (450, 9, 3, 1))
assert_size_stride(primals_15, (25,), (1,))
assert_size_stride(primals_16, (50, 100, 1, 1), (100, 1, 1, 1))
assert_size_stride(primals_17, (50,), (1,))
assert_size_stride(primals_18, (12, 50, 1, 1), (50, 1, 1, 1))
assert_size_stride(primals_19, (12,), (1,))
assert_size_stride(primals_20, (12, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_21, (12,), (1,))
assert_size_stride(primals_22, (12, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_23, (12,), (1,))
assert_size_stride(primals_24, (12, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_25, (12,), (1,))
assert_size_stride(primals_26, (12, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_27, (12,), (1,))
assert_size_stride(primals_28, (12, 12, 1, 1), (12, 1, 1, 1))
assert_size_stride(primals_29, (12,), (1,))
assert_size_stride(primals_30, (50, 12, 1, 1), (12, 1, 1, 1))
assert_size_stride(primals_31, (50,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 25, 64, 64), (102400, 4096, 64, 1))
buf1 = extern_kernels.convolution(primals_3, primals_4, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 50, 64, 64), (204800, 4096, 64, 1))
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_add_convolution_leaky_relu_0[grid(819200)](buf2,
primals_5, primals_3, 819200, XBLOCK=1024, num_warps=4,
num_stages=1)
del primals_5
buf3 = extern_kernels.convolution(buf2, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 25, 64, 64), (102400, 4096, 64, 1))
buf4 = extern_kernels.convolution(buf2, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 50, 64, 64), (204800, 4096, 64, 1))
buf5 = buf4
del buf4
triton_poi_fused_add_convolution_leaky_relu_0[grid(819200)](buf5,
primals_9, buf2, 819200, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_9
buf6 = extern_kernels.convolution(buf5, primals_10, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 25, 64, 64), (102400, 4096, 64, 1))
buf7 = extern_kernels.convolution(buf5, primals_12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 50, 64, 64), (204800, 4096, 64, 1))
buf8 = buf7
del buf7
triton_poi_fused_add_convolution_leaky_relu_0[grid(819200)](buf8,
primals_13, buf5, 819200, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_13
buf9 = extern_kernels.convolution(buf8, primals_14, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 25, 64, 64), (102400, 4096, 64, 1))
buf10 = empty_strided_cuda((4, 100, 64, 64), (409600, 4096, 64, 1),
torch.float32)
buf11 = buf10
del buf10
triton_poi_fused_cat_leaky_relu_1[grid(1638400)](buf11, buf0,
primals_2, buf3, primals_7, buf6, primals_11, buf9, primals_15,
1638400, XBLOCK=512, num_warps=8, num_stages=1)
del buf0
del buf3
del buf6
del buf9
del primals_11
del primals_15
del primals_2
del primals_7
buf12 = extern_kernels.convolution(buf11, primals_16, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 50, 64, 64), (204800, 4096, 64, 1))
buf13 = buf12
del buf12
triton_poi_fused_convolution_2[grid(819200)](buf13, primals_17,
819200, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_17
buf14 = extern_kernels.convolution(buf13, primals_18, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 12, 64, 64), (49152, 4096, 64, 1))
buf15 = buf14
del buf14
triton_poi_fused_convolution_3[grid(196608)](buf15, primals_19,
196608, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_19
buf16 = extern_kernels.convolution(buf15, primals_20, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 12, 31, 31), (11532, 961, 31, 1))
buf17 = buf16
del buf16
triton_poi_fused_convolution_4[grid(46128)](buf17, primals_21,
46128, XBLOCK=512, num_warps=4, num_stages=1)
del primals_21
buf18 = torch.ops.aten.max_pool2d_with_indices.default(buf17, [7, 7
], [3, 3])
buf19 = buf18[0]
buf20 = buf18[1]
del buf18
buf21 = extern_kernels.convolution(buf19, primals_22, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf21, (4, 12, 9, 9), (972, 81, 9, 1))
buf22 = buf21
del buf21
triton_poi_fused_convolution_relu_5[grid(3888)](buf22, primals_23,
3888, XBLOCK=256, num_warps=4, num_stages=1)
del primals_23
buf23 = extern_kernels.convolution(buf22, primals_24, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf23, (4, 12, 9, 9), (972, 81, 9, 1))
buf24 = buf23
del buf23
triton_poi_fused_convolution_relu_5[grid(3888)](buf24, primals_25,
3888, XBLOCK=256, num_warps=4, num_stages=1)
del primals_25
buf25 = extern_kernels.convolution(buf24, primals_26, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf25, (4, 12, 9, 9), (972, 81, 9, 1))
buf26 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_6[grid(64)](buf26, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf27 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_7[grid(64)](buf27, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf28 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused__to_copy_6[grid(64)](buf28, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf29 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused_add_clamp_7[grid(64)](buf29, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf30 = empty_strided_cuda((64,), (1,), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_8[grid(64)](buf30,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf32 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_8[grid(64)](buf32,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf34 = extern_kernels.convolution(buf15, primals_28, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf34, (4, 12, 64, 64), (49152, 4096, 64, 1))
buf33 = empty_strided_cuda((4, 12, 64, 64), (49152, 4096, 64, 1),
torch.float32)
buf35 = buf33
del buf33
triton_poi_fused__unsafe_index_add_convolution_mul_sub_9[grid(196608)](
buf35, buf26, buf28, buf25, primals_27, buf29, buf30, buf27,
buf32, buf34, primals_29, 196608, XBLOCK=512, num_warps=8,
num_stages=1)
del buf25
del buf34
del primals_27
del primals_29
buf36 = extern_kernels.convolution(buf35, primals_30, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf36, (4, 50, 64, 64), (204800, 4096, 64, 1))
buf37 = buf36
del buf36
buf38 = empty_strided_cuda((4, 50, 64, 64), (204800, 4096, 64, 1),
torch.float32)
triton_poi_fused_convolution_mul_sigmoid_10[grid(819200)](buf37,
primals_31, buf13, buf38, 819200, XBLOCK=512, num_warps=8,
num_stages=1)
del primals_31
return (buf38, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, primals_12, primals_14, primals_16, primals_18,
primals_20, primals_22, primals_24, primals_26, primals_28,
primals_30, buf2, buf5, buf8, buf11, buf13, buf15, buf17, buf19,
buf20, buf22, buf24, buf26, buf27, buf28, buf29, buf30, buf32,
buf35, buf37)
def sequential(*args):
"""Advanced nn.Sequential.
Args:
nn.Sequential, nn.Module
Returns:
nn.Sequential
"""
if len(args) == 1:
if isinstance(args[0], OrderedDict):
raise NotImplementedError(
'sequential does not support OrderedDict input.')
return args[0]
modules = []
for module in args:
if isinstance(module, nn.Sequential):
for submodule in module.children():
modules.append(submodule)
elif isinstance(module, nn.Module):
modules.append(module)
return nn.Sequential(*modules)
def conv(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=
1, bias=True, mode='CBR', negative_slope=0.2):
L = []
for t in mode:
if t == 'C':
L.append(nn.Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=kernel_size, stride=stride,
padding=padding, bias=bias))
elif t == 'S':
L.append(nn.utils.spectral_norm(nn.Conv2d(in_channels=
in_channels, out_channels=out_channels, kernel_size=
kernel_size, stride=stride, padding=padding, bias=bias)))
elif t == 'T':
L.append(nn.ConvTranspose2d(in_channels=in_channels,
out_channels=out_channels, kernel_size=kernel_size, stride=
stride, padding=padding, bias=bias))
elif t == 'B':
L.append(nn.BatchNorm2d(out_channels, momentum=0.9, eps=0.0001))
elif t == 'I':
L.append(nn.InstanceNorm2d(out_channels, affine=True))
elif t == 'R':
L.append(nn.ReLU(inplace=True))
elif t == 'r':
L.append(nn.ReLU(inplace=False))
elif t == 'E':
L.append(nn.ELU(inplace=True))
elif t == 'E':
L.append(nn.ELU(inplace=False))
elif t == 'L':
L.append(nn.LeakyReLU(negative_slope=negative_slope, inplace=True))
elif t == 'l':
L.append(nn.LeakyReLU(negative_slope=negative_slope, inplace=False)
)
elif t == 's':
L.append(nn.Softplus())
elif t == 'G':
L.append(nn.Sigmoid())
elif t == 't':
L.append(nn.Tanh())
elif t == '2':
L.append(nn.PixelShuffle(upscale_factor=2))
elif t == '3':
L.append(nn.PixelShuffle(upscale_factor=3))
elif t == '4':
L.append(nn.PixelShuffle(upscale_factor=4))
elif t == 'U':
L.append(nn.Upsample(scale_factor=2, mode='nearest'))
elif t == 'u':
L.append(nn.Upsample(scale_factor=3, mode='nearest'))
elif t == 'v':
L.append(nn.Upsample(scale_factor=4, mode='nearest'))
elif t == 'M':
L.append(nn.MaxPool2d(kernel_size=kernel_size, stride=stride,
padding=0))
elif t == 'A':
L.append(nn.AvgPool2d(kernel_size=kernel_size, stride=stride,
padding=0))
else:
raise NotImplementedError('Undefined type: ')
return sequential(*L)
class ESA(nn.Module):
def __init__(self, channel=64, reduction=4, bias=True):
super(ESA, self).__init__()
self.r_nc = channel // reduction
self.conv1 = nn.Conv2d(channel, self.r_nc, kernel_size=1)
self.conv21 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=1)
self.conv2 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, stride=
2, padding=0)
self.conv3 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, padding=1)
self.conv4 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, padding=1)
self.conv5 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, padding=1)
self.conv6 = nn.Conv2d(self.r_nc, channel, kernel_size=1)
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x1 = self.conv1(x)
x2 = F.max_pool2d(self.conv2(x1), kernel_size=7, stride=3)
x2 = self.relu(self.conv3(x2))
x2 = self.relu(self.conv4(x2))
x2 = F.interpolate(self.conv5(x2), (x.size(2), x.size(3)), mode=
'bilinear', align_corners=False)
x2 = self.conv6(x2 + self.conv21(x1))
return x.mul(self.sigmoid(x2))
class CFRBNew(nn.Module):
def __init__(self, in_channels=50, out_channels=50, kernel_size=3,
stride=1, padding=1, bias=True, mode='CL', d_rate=0.5,
negative_slope=0.05):
super(CFRBNew, self).__init__()
self.d_nc = int(in_channels * d_rate)
self.r_nc = in_channels
assert mode[0] == 'C', 'convolutional layer first'
self.conv1_d = conv(in_channels, self.d_nc, kernel_size=1, stride=1,
padding=0, bias=bias, mode=mode[0])
self.conv1_r = conv(in_channels, self.r_nc, kernel_size, stride,
padding, bias=bias, mode=mode[0])
self.conv2_d = conv(self.r_nc, self.d_nc, kernel_size=1, stride=1,
padding=0, bias=bias, mode=mode[0])
self.conv2_r = conv(self.r_nc, self.r_nc, kernel_size, stride,
padding, bias=bias, mode=mode[0])
self.conv3_d = conv(self.r_nc, self.d_nc, kernel_size=1, stride=1,
padding=0, bias=bias, mode=mode[0])
self.conv3_r = conv(self.r_nc, self.r_nc, kernel_size, stride,
padding, bias=bias, mode=mode[0])
self.conv4_d = conv(self.r_nc, self.d_nc, kernel_size, stride,
padding, bias=bias, mode=mode[0])
self.conv1x1 = conv(self.d_nc * 4, out_channels, kernel_size=1,
stride=1, padding=0, bias=bias, mode=mode[0])
self.act = conv(mode=mode[-1], negative_slope=negative_slope)
self.esa = ESA(in_channels, reduction=4, bias=True)
def forward(self, input_0):
primals_1 = self.conv1_d.weight
primals_2 = self.conv1_d.bias
primals_4 = self.conv1_r.weight
primals_5 = self.conv1_r.bias
primals_6 = self.conv2_d.weight
primals_7 = self.conv2_d.bias
primals_8 = self.conv2_r.weight
primals_9 = self.conv2_r.bias
primals_10 = self.conv3_d.weight
primals_11 = self.conv3_d.bias
primals_12 = self.conv3_r.weight
primals_13 = self.conv3_r.bias
primals_14 = self.conv4_d.weight
primals_15 = self.conv4_d.bias
primals_16 = self.conv1x1.weight
primals_17 = self.conv1x1.bias
primals_18 = self.esa.conv1.weight
primals_19 = self.esa.conv1.bias
primals_28 = self.esa.conv21.weight
primals_21 = self.esa.conv21.bias
primals_20 = self.esa.conv2.weight
primals_23 = self.esa.conv2.bias
primals_22 = self.esa.conv3.weight
primals_25 = self.esa.conv3.bias
primals_24 = self.esa.conv4.weight
primals_27 = self.esa.conv4.bias
primals_26 = self.esa.conv5.weight
primals_29 = self.esa.conv5.bias
primals_30 = self.esa.conv6.weight
primals_31 = self.esa.conv6.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31])
return output[0]
|
samuro95/Prox-PnP
|
CFRB
| false
| 10,993
|
[
"MIT"
] | 0
|
c05a48a586f0ef27c8ddc14e0a4c2c3d6814f8c9
|
https://github.com/samuro95/Prox-PnP/tree/c05a48a586f0ef27c8ddc14e0a4c2c3d6814f8c9
|
ZeroLayer
|
import torch
import torch.nn as nn
import torch.utils.data
class ZeroLayer(nn.Module):
def __init__(self, stride):
super(ZeroLayer, self).__init__()
self.stride = stride
def forward(self, x):
"""n, c, h, w = x.size()
h //= self.stride
w //= self.stride
device = x.get_device() if x.is_cuda else torch.device('cpu')
# noinspection PyUnresolvedReferences
padding = torch.zeros(n, c, h, w, device=device, requires_grad=False)
return padding"""
return x * 0
@staticmethod
def is_zero_layer():
return True
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'stride': 1}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class ZeroLayerNew(nn.Module):
def __init__(self, stride):
super(ZeroLayerNew, self).__init__()
self.stride = stride
@staticmethod
def is_zero_layer():
return True
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
pkuyym/nni
|
ZeroLayer
| false
| 10,994
|
[
"MIT"
] | 0
|
fe533e3bc65ea27997e16250adb503638548d500
|
https://github.com/pkuyym/nni/tree/fe533e3bc65ea27997e16250adb503638548d500
|
context_embedding
|
import torch
import torch.nn.functional as F
class CausalConv1d(torch.nn.Conv1d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
dilation=1, groups=1, bias=True):
super(CausalConv1d, self).__init__(in_channels, out_channels,
kernel_size=kernel_size, stride=stride, padding=0, dilation=
dilation, groups=groups, bias=bias)
self.__padding = (kernel_size - 1) * dilation
def forward(self, input):
return super(CausalConv1d, self).forward(F.pad(input, (self.
__padding, 0)))
class context_embedding(torch.nn.Module):
def __init__(self, in_channels=1, embedding_size=256, k=5):
super(context_embedding, self).__init__()
self.causal_convolution = CausalConv1d(in_channels, embedding_size,
kernel_size=k)
def forward(self, x):
x = self.causal_convolution(x)
return F.tanh(x)
def get_inputs():
return [torch.rand([4, 1, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 272
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 68
x1 = xindex // 68
x2 = xindex
tmp0 = -4 + x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.load(in_ptr0 + (-4 + x0 + 64 * x1), tmp2 & xmask, other=0.0)
tl.store(out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused_convolution_tanh_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 256
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x3, tmp3, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 64), (64, 64, 1))
assert_size_stride(primals_2, (256, 1, 5), (5, 5, 1))
assert_size_stride(primals_3, (256,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 68), (68, 68, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(272)](primals_1, buf0, 272,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 256, 64), (16384, 64, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_tanh_1[grid(65536)](buf2, primals_3,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_3
return buf2, primals_2, buf0, buf2
class CausalConv1d(torch.nn.Conv1d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
dilation=1, groups=1, bias=True):
super(CausalConv1d, self).__init__(in_channels, out_channels,
kernel_size=kernel_size, stride=stride, padding=0, dilation=
dilation, groups=groups, bias=bias)
self.__padding = (kernel_size - 1) * dilation
def forward(self, input):
return super(CausalConv1d, self).forward(F.pad(input, (self.
__padding, 0)))
class context_embeddingNew(torch.nn.Module):
def __init__(self, in_channels=1, embedding_size=256, k=5):
super(context_embeddingNew, self).__init__()
self.causal_convolution = CausalConv1d(in_channels, embedding_size,
kernel_size=k)
def forward(self, input_0):
primals_2 = self.causal_convolution.weight
primals_3 = self.causal_convolution.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
xingtaodhu/logdeep
|
context_embedding
| false
| 10,995
|
[
"MIT"
] | 0
|
9626fa4b3345799940cb293c7aedb34dd33b5637
|
https://github.com/xingtaodhu/logdeep/tree/9626fa4b3345799940cb293c7aedb34dd33b5637
|
SmallBlock
|
import torch
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
class SmallBlock(nn.Module):
def __init__(self, channels):
super(SmallBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels=channels, out_channels=channels,
kernel_size=3, stride=1, padding=1, bias=False)
self.relu = nn.ReLU(inplace=False)
self.conv2 = nn.Conv2d(in_channels=channels, out_channels=channels,
kernel_size=3, stride=1, padding=1, bias=False)
def forward(self, x):
identity_data = x
output = self.relu(x)
output = self.conv1(output)
output = self.relu(output)
output = self.conv2(output)
output = torch.add(output, identity_data)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_relu_0[grid(256)](primals_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_relu_1[grid(256)](buf2, 256, XBLOCK=256, num_warps
=4, num_stages=1)
buf3 = extern_kernels.convolution(buf2, primals_3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = buf3
del buf3
triton_poi_fused_add_2[grid(256)](buf4, primals_1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_1
return buf4, primals_2, primals_3, buf0, buf2
class SmallBlockNew(nn.Module):
def __init__(self, channels):
super(SmallBlockNew, self).__init__()
self.conv1 = nn.Conv2d(in_channels=channels, out_channels=channels,
kernel_size=3, stride=1, padding=1, bias=False)
self.relu = nn.ReLU(inplace=False)
self.conv2 = nn.Conv2d(in_channels=channels, out_channels=channels,
kernel_size=3, stride=1, padding=1, bias=False)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv2.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
krodyush/training_extensions
|
SmallBlock
| false
| 10,996
|
[
"Apache-2.0"
] | 0
|
542f4004dfbc6fc62a622065367ba4f85a703dd3
|
https://github.com/krodyush/training_extensions/tree/542f4004dfbc6fc62a622065367ba4f85a703dd3
|
DirichletPolicyTwoLayer
|
import torch
import numpy as np
import torch.nn.functional as F
import torch.distributions as td
import torch.nn as nn
class PolicyNetwork(nn.Module):
"""Base class for stochastic policy networks."""
def __init__(self):
super().__init__()
def forward(self, state):
"""Take state as input, then output the parameters of the policy."""
raise NotImplementedError('forward not implemented.')
def sample(self, state):
"""
Sample an action based on the model parameters given the current state.
"""
raise NotImplementedError('sample not implemented.')
class DirichletPolicyBase(PolicyNetwork):
"""
Base class for Dirichlet policies.
Desired network needs to be implemented.
"""
def __init__(self, min_alpha=-np.inf, max_alpha=np.inf):
super().__init__()
self.min_alpha = min_alpha
self.max_alpha = max_alpha
def sample(self, state, no_log_prob=False):
alpha = self.forward(state)
dist = td.Dirichlet(alpha)
action = dist.sample()
return action if no_log_prob else (action, dist.log_prob(action))
class DirichletPolicyTwoLayer(DirichletPolicyBase):
"""Working, single-layer Dirichlet policy network."""
def __init__(self, state_dim, action_dim, hidden_layer1_size=256,
hidden_layer2_size=256, min_alpha=-np.inf, max_alpha=np.inf,
init_std=0.0001):
super().__init__(min_alpha, max_alpha)
self.linear1 = nn.Linear(state_dim, hidden_layer1_size)
self.linear2 = nn.Linear(hidden_layer1_size, hidden_layer2_size)
self.linear3 = nn.Linear(hidden_layer2_size, action_dim)
nn.init.normal_(self.linear1.weight, std=init_std)
nn.init.normal_(self.linear1.bias, std=init_std)
nn.init.normal_(self.linear2.weight, std=init_std)
nn.init.normal_(self.linear2.bias, std=init_std)
nn.init.normal_(self.linear3.weight, std=init_std)
nn.init.normal_(self.linear3.bias, mean=-np.log(max_alpha - 1), std
=init_std)
def forward(self, state):
x = F.relu(self.linear1(state))
x = F.relu(self.linear2(x))
action = self.max_alpha * torch.sigmoid(self.linear3(x))
return torch.clamp(action, self.min_alpha, self.max_alpha)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4, 'action_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import numpy as np
import torch.distributions as td
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_clamp_mul_sigmoid_1(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = float('inf')
tmp3 = tmp1 * tmp2
tmp4 = float('-inf')
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = triton_helpers.minimum(tmp5, tmp2)
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (256, 256), (256, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (4, 256), (256, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf0
buf7 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf1,
primals_2, buf7, 16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0),
reinterpret_tensor(primals_4, (256, 256), (1, 256), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf2
buf6 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf3,
primals_5, buf6, 16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 256),
(256, 1), 0), reinterpret_tensor(primals_6, (256, 4), (1, 256),
0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clamp_mul_sigmoid_1[grid(256)](buf4, buf5, 256,
XBLOCK=256, num_warps=4, num_stages=1)
return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 256), (256, 1), 0
), reinterpret_tensor(buf3, (64, 256), (256, 1), 0
), buf4, primals_6, buf6, primals_4, buf7
class PolicyNetwork(nn.Module):
"""Base class for stochastic policy networks."""
def __init__(self):
super().__init__()
def forward(self, state):
"""Take state as input, then output the parameters of the policy."""
raise NotImplementedError('forward not implemented.')
def sample(self, state):
"""
Sample an action based on the model parameters given the current state.
"""
raise NotImplementedError('sample not implemented.')
class DirichletPolicyBase(PolicyNetwork):
"""
Base class for Dirichlet policies.
Desired network needs to be implemented.
"""
def __init__(self, min_alpha=-np.inf, max_alpha=np.inf):
super().__init__()
self.min_alpha = min_alpha
self.max_alpha = max_alpha
def sample(self, state, no_log_prob=False):
alpha = self.forward(state)
dist = td.Dirichlet(alpha)
action = dist.sample()
return action if no_log_prob else (action, dist.log_prob(action))
class DirichletPolicyTwoLayerNew(DirichletPolicyBase):
"""Working, single-layer Dirichlet policy network."""
def __init__(self, state_dim, action_dim, hidden_layer1_size=256,
hidden_layer2_size=256, min_alpha=-np.inf, max_alpha=np.inf,
init_std=0.0001):
super().__init__(min_alpha, max_alpha)
self.linear1 = nn.Linear(state_dim, hidden_layer1_size)
self.linear2 = nn.Linear(hidden_layer1_size, hidden_layer2_size)
self.linear3 = nn.Linear(hidden_layer2_size, action_dim)
nn.init.normal_(self.linear1.weight, std=init_std)
nn.init.normal_(self.linear1.bias, std=init_std)
nn.init.normal_(self.linear2.weight, std=init_std)
nn.init.normal_(self.linear2.bias, std=init_std)
nn.init.normal_(self.linear3.weight, std=init_std)
nn.init.normal_(self.linear3.bias, mean=-np.log(max_alpha - 1), std
=init_std)
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_6 = self.linear3.weight
primals_7 = self.linear3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
wessle/costaware
|
DirichletPolicyTwoLayer
| false
| 10,997
|
[
"MIT"
] | 0
|
151502308411528eaa703d353d138fc809e59d8e
|
https://github.com/wessle/costaware/tree/151502308411528eaa703d353d138fc809e59d8e
|
CausalConv1d
|
import torch
import torch.nn.functional as F
class CausalConv1d(torch.nn.Conv1d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
dilation=1, groups=1, bias=True):
super(CausalConv1d, self).__init__(in_channels, out_channels,
kernel_size=kernel_size, stride=stride, padding=0, dilation=
dilation, groups=groups, bias=bias)
self.__padding = (kernel_size - 1) * dilation
def forward(self, input):
return super(CausalConv1d, self).forward(F.pad(input, (self.
__padding, 0)))
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 28
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 7
x1 = xindex // 7
x2 = xindex
tmp0 = -3 + x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.load(in_ptr0 + (-3 + x0 + 4 * x1), tmp2 & xmask, other=0.0)
tl.store(out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 7), (7, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(28)](primals_1, buf0, 28,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (1, 4, 7
), (0, 7, 1), 0), primals_2, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf1, (1, 4, 4), (16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(16)](buf2, primals_3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
return reinterpret_tensor(buf2, (4, 4), (4, 1), 0
), primals_2, reinterpret_tensor(buf0, (1, 4, 7), (28, 7, 1), 0)
class CausalConv1dNew(torch.nn.Conv1d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
dilation=1, groups=1, bias=True):
super(CausalConv1dNew, self).__init__(in_channels, out_channels,
kernel_size=kernel_size, stride=stride, padding=0, dilation=
dilation, groups=groups, bias=bias)
self.__padding = (kernel_size - 1) * dilation
def forward(self, input_0):
primals_2 = self.weight
primals_3 = self.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
xingtaodhu/logdeep
|
CausalConv1d
| false
| 10,998
|
[
"MIT"
] | 0
|
9626fa4b3345799940cb293c7aedb34dd33b5637
|
https://github.com/xingtaodhu/logdeep/tree/9626fa4b3345799940cb293c7aedb34dd33b5637
|
LinearCombine
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
class LinearCombine(nn.Module):
def __init__(self, layers_num, trainable=True, input_aware=False,
word_level=False):
super(LinearCombine, self).__init__()
self.input_aware = input_aware
self.word_level = word_level
if input_aware:
raise NotImplementedError('Input aware is not supported.')
self.w = nn.Parameter(torch.full((layers_num, 1, 1, 1), 1.0 /
layers_num), requires_grad=trainable)
def forward(self, seq):
nw = F.softmax(self.w, dim=0)
seq = torch.mul(seq, nw)
seq = torch.sum(seq, dim=0)
return seq
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'layers_num': 1}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp7 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp10 = tl.load(in_ptr0 + (128 + x0), xmask)
tmp13 = tl.load(in_ptr0 + (192 + x0), xmask)
tmp3 = tmp2 - tmp2
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp4 / tmp4
tmp6 = tmp0 * tmp5
tmp8 = tmp7 * tmp5
tmp9 = tmp6 + tmp8
tmp11 = tmp10 * tmp5
tmp12 = tmp9 + tmp11
tmp14 = tmp13 * tmp5
tmp15 = tmp12 + tmp14
tl.store(out_ptr0 + x0, tmp15, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (1, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_mul_sum_0[grid(64)](primals_2, primals_1,
buf0, 64, XBLOCK=64, num_warps=1, num_stages=1)
return buf0, primals_1, primals_2
class LinearCombineNew(nn.Module):
def __init__(self, layers_num, trainable=True, input_aware=False,
word_level=False):
super(LinearCombineNew, self).__init__()
self.input_aware = input_aware
self.word_level = word_level
if input_aware:
raise NotImplementedError('Input aware is not supported.')
self.w = nn.Parameter(torch.full((layers_num, 1, 1, 1), 1.0 /
layers_num), requires_grad=trainable)
def forward(self, input_0):
primals_1 = self.w
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
pkuyym/nni
|
LinearCombine
| false
| 10,999
|
[
"MIT"
] | 0
|
fe533e3bc65ea27997e16250adb503638548d500
|
https://github.com/pkuyym/nni/tree/fe533e3bc65ea27997e16250adb503638548d500
|
ToRGB
|
import torch
import torch.nn as nn
class ToRGB(nn.Module):
"""Some Information about ToRGB"""
def __init__(self, channels):
super(ToRGB, self).__init__()
self.conv = nn.Conv2d(channels, 3, kernel_size=1, stride=1, padding
=0, bias=True)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
return self.sigmoid(self.conv(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_sigmoid_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 3
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x3, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (3, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (3,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 3, 4, 4), (48, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_sigmoid_0[grid(192)](buf1, primals_2,
192, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
return buf1, primals_1, primals_3, buf1
class ToRGBNew(nn.Module):
"""Some Information about ToRGB"""
def __init__(self, channels):
super(ToRGBNew, self).__init__()
self.conv = nn.Conv2d(channels, 3, kernel_size=1, stride=1, padding
=0, bias=True)
self.sigmoid = nn.Sigmoid()
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
uthree/gan-image-generator
|
ToRGB
| false
| 11,000
|
[
"MIT"
] | 0
|
85585e389b5a494393da0789d82824f8c811e263
|
https://github.com/uthree/gan-image-generator/tree/85585e389b5a494393da0789d82824f8c811e263
|
FromRGB
|
import torch
import torch.nn as nn
class FromRGB(nn.Module):
"""Some Information about FromRGB"""
def __init__(self, channels):
super(FromRGB, self).__init__()
self.conv = nn.Conv2d(3, channels, kernel_size=1, stride=1, padding
=0, bias=True)
def forward(self, x):
return self.conv(x)
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {'channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 4
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 3, 1, 1), (3, 1, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(65536)](buf1, primals_2, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
del primals_2
return buf1, primals_1, primals_3
class FromRGBNew(nn.Module):
"""Some Information about FromRGB"""
def __init__(self, channels):
super(FromRGBNew, self).__init__()
self.conv = nn.Conv2d(3, channels, kernel_size=1, stride=1, padding
=0, bias=True)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
uthree/gan-image-generator
|
FromRGB
| false
| 11,001
|
[
"MIT"
] | 0
|
85585e389b5a494393da0789d82824f8c811e263
|
https://github.com/uthree/gan-image-generator/tree/85585e389b5a494393da0789d82824f8c811e263
|
GatedResidualNetwork
|
import torch
from torch.nn import functional as F
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
class GatedLinearUnit(nn.Module):
def __init__(self, input_size, output_size, dropout=0):
super().__init__()
self.dropout = nn.Dropout(dropout)
self.w4 = nn.Linear(input_size, output_size)
self.w5 = nn.Linear(input_size, output_size)
self.act = nn.Sigmoid()
def forward(self, x):
x = self.dropout(x)
x = self.act(self.w4(x)) * self.w5(x)
return x
class GatedResidualNetwork(nn.Module):
def __init__(self, input_size, hidden_size, output_size, context_size=
None, dropout=0):
super().__init__()
self.w1 = nn.Linear(hidden_size, hidden_size)
self.w2 = nn.Linear(input_size, hidden_size)
self.w3 = None if context_size is None else nn.Linear(context_size,
hidden_size, bias=False)
self.glu = GatedLinearUnit(hidden_size, output_size, dropout)
self.layer_norm = nn.LayerNorm(output_size)
self.residual = nn.Sequential(
) if input_size == output_size else nn.Linear(input_size,
output_size)
def forward(self, a, c=None):
if c is not None:
n2 = F.elu(self.w2(a) + self.w3(c))
else:
n2 = F.elu(self.w2(a))
n1 = self.w1(n2)
grn = self.layer_norm(self.residual(a) + self.glu(n1))
return grn
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4, 'output_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tl.store(out_ptr0 + x0, tmp7, xmask)
@triton.jit
def triton_poi_fused_add_mul_native_layer_norm_sigmoid_1(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp23 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tl.sigmoid(tmp1)
tmp4 = tmp2 * tmp3
tmp5 = tmp0 + tmp4
tmp8 = tl.sigmoid(tmp7)
tmp10 = tmp8 * tmp9
tmp11 = tmp6 + tmp10
tmp12 = tmp5 + tmp11
tmp15 = tl.sigmoid(tmp14)
tmp17 = tmp15 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp12 + tmp18
tmp22 = tl.sigmoid(tmp21)
tmp24 = tmp22 * tmp23
tmp25 = tmp20 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + x0, tmp28, xmask)
tl.store(out_ptr1 + x0, tmp40, xmask)
@triton.jit
def triton_poi_fused_add_mul_native_layer_norm_sigmoid_2(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x2, xmask)
tmp6 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp4 = tmp2 * tmp3
tmp5 = tmp0 + tmp4
tmp7 = tmp5 - tmp6
tmp9 = 1e-05
tmp10 = tmp8 + tmp9
tmp11 = libdevice.rsqrt(tmp10)
tmp12 = tmp7 * tmp11
tmp14 = tmp12 * tmp13
tmp16 = tmp14 + tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_elu_0[grid(256)](buf0, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, buf2, reinterpret_tensor(primals_6,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_7
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, buf2, reinterpret_tensor(primals_8,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_9
buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused_add_mul_native_layer_norm_sigmoid_1[grid(64)](
primals_3, buf3, buf4, buf5, buf6, 64, XBLOCK=64, num_warps=1,
num_stages=1)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_native_layer_norm_sigmoid_2[grid(256)](
primals_3, buf3, buf4, buf5, buf6, primals_10, primals_11, buf7,
256, XBLOCK=128, num_warps=4, num_stages=1)
del buf5
del buf6
del primals_11
return buf7, primals_3, primals_10, buf0, reinterpret_tensor(buf1, (64,
4), (4, 1), 0), buf2, buf3, buf4, primals_8, primals_6, primals_4
class GatedLinearUnit(nn.Module):
def __init__(self, input_size, output_size, dropout=0):
super().__init__()
self.dropout = nn.Dropout(dropout)
self.w4 = nn.Linear(input_size, output_size)
self.w5 = nn.Linear(input_size, output_size)
self.act = nn.Sigmoid()
def forward(self, x):
x = self.dropout(x)
x = self.act(self.w4(x)) * self.w5(x)
return x
class GatedResidualNetworkNew(nn.Module):
def __init__(self, input_size, hidden_size, output_size, context_size=
None, dropout=0):
super().__init__()
self.w1 = nn.Linear(hidden_size, hidden_size)
self.w2 = nn.Linear(input_size, hidden_size)
self.w3 = None if context_size is None else nn.Linear(context_size,
hidden_size, bias=False)
self.glu = GatedLinearUnit(hidden_size, output_size, dropout)
self.layer_norm = nn.LayerNorm(output_size)
self.residual = nn.Sequential(
) if input_size == output_size else nn.Linear(input_size,
output_size)
def forward(self, input_0):
primals_1 = self.w1.weight
primals_2 = self.w1.bias
primals_4 = self.w2.weight
primals_5 = self.w2.bias
primals_6 = self.glu.w4.weight
primals_7 = self.glu.w4.bias
primals_8 = self.glu.w5.weight
primals_9 = self.glu.w5.bias
primals_10 = self.layer_norm.weight
primals_11 = self.layer_norm.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
krodyush/training_extensions
|
GatedResidualNetwork
| false
| 11,002
|
[
"Apache-2.0"
] | 0
|
542f4004dfbc6fc62a622065367ba4f85a703dd3
|
https://github.com/krodyush/training_extensions/tree/542f4004dfbc6fc62a622065367ba4f85a703dd3
|
LearnableBias
|
import torch
import torch.nn as nn
class LearnableBias(nn.Module):
def __init__(self, out_chn):
super(LearnableBias, self).__init__()
self.bias = nn.Parameter(torch.zeros(out_chn), requires_grad=True)
def forward(self, x):
out = x + self.bias.expand_as(x)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'out_chn': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](primals_2, primals_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
return buf0,
class LearnableBiasNew(nn.Module):
def __init__(self, out_chn):
super(LearnableBiasNew, self).__init__()
self.bias = nn.Parameter(torch.zeros(out_chn), requires_grad=True)
def forward(self, input_0):
primals_1 = self.bias
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
uzair789/pytorch-retinanet
|
LearnableBias
| false
| 11,003
|
[
"Apache-2.0"
] | 0
|
cabac159a9877825ef04ab06d3b9a63bdfa4f306
|
https://github.com/uzair789/pytorch-retinanet/tree/cabac159a9877825ef04ab06d3b9a63bdfa4f306
|
DirichletPolicySingleLayer
|
import torch
import numpy as np
import torch.nn.functional as F
import torch.distributions as td
import torch.nn as nn
class PolicyNetwork(nn.Module):
"""Base class for stochastic policy networks."""
def __init__(self):
super().__init__()
def forward(self, state):
"""Take state as input, then output the parameters of the policy."""
raise NotImplementedError('forward not implemented.')
def sample(self, state):
"""
Sample an action based on the model parameters given the current state.
"""
raise NotImplementedError('sample not implemented.')
class DirichletPolicyBase(PolicyNetwork):
"""
Base class for Dirichlet policies.
Desired network needs to be implemented.
"""
def __init__(self, min_alpha=-np.inf, max_alpha=np.inf):
super().__init__()
self.min_alpha = min_alpha
self.max_alpha = max_alpha
def sample(self, state, no_log_prob=False):
alpha = self.forward(state)
dist = td.Dirichlet(alpha)
action = dist.sample()
return action if no_log_prob else (action, dist.log_prob(action))
class DirichletPolicySingleLayer(DirichletPolicyBase):
"""Working, single-layer Dirichlet policy network."""
def __init__(self, state_dim, action_dim, hidden_layer_size=256,
min_alpha=-np.inf, max_alpha=np.inf):
super().__init__(min_alpha, max_alpha)
self.linear1 = nn.Linear(state_dim, hidden_layer_size)
self.linear2 = nn.Linear(hidden_layer_size, action_dim)
nn.init.normal_(self.linear1.weight, std=0.001)
nn.init.normal_(self.linear1.bias, std=0.001)
nn.init.normal_(self.linear2.weight, std=0.001)
nn.init.normal_(self.linear2.bias, std=0.001)
def forward(self, state):
x = F.relu(self.linear1(state))
action = self.max_alpha * F.sigmoid(self.linear2(x))
return torch.clamp(action, self.min_alpha, self.max_alpha)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4, 'action_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import numpy as np
import torch.distributions as td
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_clamp_mul_sigmoid_1(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = float('inf')
tmp3 = tmp1 * tmp2
tmp4 = float('-inf')
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = triton_helpers.minimum(tmp5, tmp2)
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 256), (256, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf0
buf4 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf1,
primals_2, buf4, 16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 256),
(256, 1), 0), reinterpret_tensor(primals_4, (256, 4), (1, 256),
0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clamp_mul_sigmoid_1[grid(256)](buf2, buf3, 256,
XBLOCK=256, num_warps=4, num_stages=1)
return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 256), (256, 1), 0
), buf2, primals_4, buf4
class PolicyNetwork(nn.Module):
"""Base class for stochastic policy networks."""
def __init__(self):
super().__init__()
def forward(self, state):
"""Take state as input, then output the parameters of the policy."""
raise NotImplementedError('forward not implemented.')
def sample(self, state):
"""
Sample an action based on the model parameters given the current state.
"""
raise NotImplementedError('sample not implemented.')
class DirichletPolicyBase(PolicyNetwork):
"""
Base class for Dirichlet policies.
Desired network needs to be implemented.
"""
def __init__(self, min_alpha=-np.inf, max_alpha=np.inf):
super().__init__()
self.min_alpha = min_alpha
self.max_alpha = max_alpha
def sample(self, state, no_log_prob=False):
alpha = self.forward(state)
dist = td.Dirichlet(alpha)
action = dist.sample()
return action if no_log_prob else (action, dist.log_prob(action))
class DirichletPolicySingleLayerNew(DirichletPolicyBase):
"""Working, single-layer Dirichlet policy network."""
def __init__(self, state_dim, action_dim, hidden_layer_size=256,
min_alpha=-np.inf, max_alpha=np.inf):
super().__init__(min_alpha, max_alpha)
self.linear1 = nn.Linear(state_dim, hidden_layer_size)
self.linear2 = nn.Linear(hidden_layer_size, action_dim)
nn.init.normal_(self.linear1.weight, std=0.001)
nn.init.normal_(self.linear1.bias, std=0.001)
nn.init.normal_(self.linear2.weight, std=0.001)
nn.init.normal_(self.linear2.bias, std=0.001)
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
wessle/costaware
|
DirichletPolicySingleLayer
| false
| 11,004
|
[
"MIT"
] | 0
|
151502308411528eaa703d353d138fc809e59d8e
|
https://github.com/wessle/costaware/tree/151502308411528eaa703d353d138fc809e59d8e
|
PinballLoss
|
import torch
import torch.nn as nn
class PinballLoss(nn.Module):
""" Pinball Loss
Computes the pinball loss between y and y_hat.
Parameters
----------
y: tensor
actual values in torch tensor.
y_hat: tensor (same shape as y)
predicted values in torch tensor.
tau: float, between 0 and 1
the slope of the pinball loss, in the context of
quantile regression, the value of tau determines the
conditional quantile level.
Returns
----------
pinball_loss:
average accuracy for the predicted quantile
"""
def __init__(self, tau=0.5):
super(PinballLoss, self).__init__()
self.tau = tau
def forward(self, y, y_hat):
delta_y = torch.sub(y, y_hat)
pinball = torch.max(torch.mul(self.tau, delta_y), torch.mul(self.
tau - 1, delta_y))
pinball = pinball.mean()
return pinball
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_maximum_mean_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = 0.5
tmp4 = tmp3 * tmp2
tmp5 = -0.5
tmp6 = tmp5 * tmp2
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = 256.0
tmp12 = tmp10 / tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_maximum_mean_mul_sub_0[grid(1)](buf1, arg1_1,
arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class PinballLossNew(nn.Module):
""" Pinball Loss
Computes the pinball loss between y and y_hat.
Parameters
----------
y: tensor
actual values in torch tensor.
y_hat: tensor (same shape as y)
predicted values in torch tensor.
tau: float, between 0 and 1
the slope of the pinball loss, in the context of
quantile regression, the value of tau determines the
conditional quantile level.
Returns
----------
pinball_loss:
average accuracy for the predicted quantile
"""
def __init__(self, tau=0.5):
super(PinballLossNew, self).__init__()
self.tau = tau
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
venkatkorapaty/esrnn
|
PinballLoss
| false
| 11,005
|
[
"MIT"
] | 0
|
411d3191e7e12f29e521e06bc18f9b9b0fdf0f0c
|
https://github.com/venkatkorapaty/esrnn/tree/411d3191e7e12f29e521e06bc18f9b9b0fdf0f0c
|
AdaptiveInstanceNormalization
|
import torch
import torch.nn as nn
class AdaptiveInstanceNormalization(nn.Module):
"""Some Information about AdaptiveInstanceNormalization"""
def __init__(self, channels, style_dim):
super(AdaptiveInstanceNormalization, self).__init__()
self.affine = nn.Linear(style_dim, channels * 2)
self.norm = nn.InstanceNorm2d(channels)
def forward(self, x, style):
scale, bias = self.affine(style).chunk(2, dim=1)
scale = scale.unsqueeze(2).unsqueeze(3)
bias = bias.unsqueeze(2).unsqueeze(3)
x = self.norm(x)
x = x * scale + bias
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'channels': 4, 'style_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_mul_0(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
x3 = xindex // 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + (x2 + 8 * x3), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr1 + (4 + x2 + 8 * x3), xmask, eviction_policy=
'evict_last')
tmp29 = tl.load(in_ptr2 + (4 + x2), xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 16.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tmp22 = tmp0 - tmp10
tmp23 = tmp22 * tmp21
tmp26 = tmp24 + tmp25
tmp27 = tmp23 * tmp26
tmp30 = tmp28 + tmp29
tmp31 = tmp27 + tmp30
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp21, xmask)
tl.store(out_ptr1 + (r1 + 16 * x0), tmp31, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (8, 4), (4, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 8),
(1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf2 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf4 = reinterpret_tensor(buf2, (1, 16, 1, 1), (16, 1, 1, 1), 0)
del buf2
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_add_mul_0[grid(16)](buf4,
primals_4, buf0, primals_2, buf1, buf5, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
del buf0
del primals_2
return buf5, primals_3, primals_4, buf1, buf4
class AdaptiveInstanceNormalizationNew(nn.Module):
"""Some Information about AdaptiveInstanceNormalization"""
def __init__(self, channels, style_dim):
super(AdaptiveInstanceNormalizationNew, self).__init__()
self.affine = nn.Linear(style_dim, channels * 2)
self.norm = nn.InstanceNorm2d(channels)
def forward(self, input_0, input_1):
primals_1 = self.affine.weight
primals_2 = self.affine.bias
primals_4 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
uthree/gan-image-generator
|
AdaptiveInstanceNormalization
| false
| 11,006
|
[
"MIT"
] | 0
|
85585e389b5a494393da0789d82824f8c811e263
|
https://github.com/uthree/gan-image-generator/tree/85585e389b5a494393da0789d82824f8c811e263
|
upsampleBlock
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
def swish(x):
return x * F.sigmoid(x)
class upsampleBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(upsampleBlock, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, 3, stride=1, padding=1
)
self.shuffler = nn.PixelShuffle(2)
def forward(self, x):
return swish(self.shuffler(self.conv(x)))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8 % 8
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (4 * (x1 // 2) + 16 * (x0 % 2) + 32 * (x1 % 2) +
64 * x2 + x0 // 2), xmask, eviction_policy='evict_last')
tmp1 = tl.sigmoid(tmp0)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(256)](buf1, primals_2, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 1, 8, 8), (64, 64, 8, 1), torch.float32)
triton_poi_fused_mul_sigmoid_1[grid(256)](buf1, buf2, 256, XBLOCK=
256, num_warps=4, num_stages=1)
return buf2, primals_1, primals_3, buf1
def swish(x):
return x * F.sigmoid(x)
class upsampleBlockNew(nn.Module):
def __init__(self, in_channels, out_channels):
super(upsampleBlockNew, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, 3, stride=1, padding=1
)
self.shuffler = nn.PixelShuffle(2)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
tomron27/srganus
|
upsampleBlock
| false
| 11,007
|
[
"Apache-2.0"
] | 0
|
5dab73540535138375203bf31e31246cd203f3c0
|
https://github.com/tomron27/srganus/tree/5dab73540535138375203bf31e31246cd203f3c0
|
DisaggregatedPinballLoss
|
import torch
import torch.nn as nn
class DisaggregatedPinballLoss(nn.Module):
""" Pinball Loss
Computes the pinball loss between y and y_hat.
Parameters
----------
y: tensor
actual values in torch tensor.
y_hat: tensor (same shape as y)
predicted values in torch tensor.
tau: float, between 0 and 1
the slope of the pinball loss, in the context of
quantile regression, the value of tau determines the
conditional quantile level.
Returns
----------
pinball_loss:
average accuracy for the predicted quantile
"""
def __init__(self, tau=0.5):
super(DisaggregatedPinballLoss, self).__init__()
self.tau = tau
def forward(self, y, y_hat):
delta_y = torch.sub(y, y_hat)
pinball = torch.max(torch.mul(self.tau, delta_y), torch.mul(self.
tau - 1, delta_y))
pinball = pinball.mean(axis=0).mean(axis=1)
return pinball
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_maximum_mean_mul_sub_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp8 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp9 = tl.load(in_ptr1 + (64 + x0), xmask)
tmp15 = tl.load(in_ptr0 + (128 + x0), xmask)
tmp16 = tl.load(in_ptr1 + (128 + x0), xmask)
tmp22 = tl.load(in_ptr0 + (192 + x0), xmask)
tmp23 = tl.load(in_ptr1 + (192 + x0), xmask)
tmp2 = tmp0 - tmp1
tmp3 = 0.5
tmp4 = tmp3 * tmp2
tmp5 = -0.5
tmp6 = tmp5 * tmp2
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp10 = tmp8 - tmp9
tmp11 = tmp3 * tmp10
tmp12 = tmp5 * tmp10
tmp13 = triton_helpers.maximum(tmp11, tmp12)
tmp14 = tmp7 + tmp13
tmp17 = tmp15 - tmp16
tmp18 = tmp3 * tmp17
tmp19 = tmp5 * tmp17
tmp20 = triton_helpers.maximum(tmp18, tmp19)
tmp21 = tmp14 + tmp20
tmp24 = tmp22 - tmp23
tmp25 = tmp3 * tmp24
tmp26 = tmp5 * tmp24
tmp27 = triton_helpers.maximum(tmp25, tmp26)
tmp28 = tmp21 + tmp27
tmp29 = 4.0
tmp30 = tmp28 / tmp29
tl.store(out_ptr0 + x0, tmp30, xmask)
@triton.jit
def triton_poi_fused_mean_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_maximum_mean_mul_sub_0[grid(64)](arg1_1, arg0_1,
buf0, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_mean_1[grid(16)](buf0, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf0
return buf1,
class DisaggregatedPinballLossNew(nn.Module):
""" Pinball Loss
Computes the pinball loss between y and y_hat.
Parameters
----------
y: tensor
actual values in torch tensor.
y_hat: tensor (same shape as y)
predicted values in torch tensor.
tau: float, between 0 and 1
the slope of the pinball loss, in the context of
quantile regression, the value of tau determines the
conditional quantile level.
Returns
----------
pinball_loss:
average accuracy for the predicted quantile
"""
def __init__(self, tau=0.5):
super(DisaggregatedPinballLossNew, self).__init__()
self.tau = tau
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
venkatkorapaty/esrnn
|
DisaggregatedPinballLoss
| false
| 11,008
|
[
"MIT"
] | 0
|
411d3191e7e12f29e521e06bc18f9b9b0fdf0f0c
|
https://github.com/venkatkorapaty/esrnn/tree/411d3191e7e12f29e521e06bc18f9b9b0fdf0f0c
|
MegatronGelu
|
import torch
import torch.nn
import torch.onnx
class MegatronGelu(torch.nn.Module):
def forward(self, x):
return x * 0.5 * (torch.erf(x / 1.41421) + 1.0)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_erf_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071085623775818
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_erf_mul_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class MegatronGeluNew(torch.nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
thilow/onnxruntime
|
MegatronGelu
| false
| 11,009
|
[
"MIT"
] | 0
|
1a3ddf0714e1bdf9b807a342eee5f6e160ad1ec9
|
https://github.com/thilow/onnxruntime/tree/1a3ddf0714e1bdf9b807a342eee5f6e160ad1ec9
|
InteractiveKLLoss
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
class InteractiveKLLoss(nn.Module):
def __init__(self, temperature):
super().__init__()
self.temperature = temperature
self.kl_loss = nn.KLDivLoss()
def forward(self, student, teacher):
return self.kl_loss(F.log_softmax(student / self.temperature, dim=1
), F.softmax(teacher / self.temperature, dim=1))
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'temperature': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x3, tmp17, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + x3, tmp16, xmask)
@triton.jit
def triton_per_fused__log_softmax__softmax_mean_mul_sub_xlogy_2(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = rindex // 64
tmp0 = tl.load(in_ptr0 + r3, None)
tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last'
)
tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr1 + r3, None)
tmp18 = tl.load(in_ptr1 + (r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr1 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr1 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp26 = tl.load(in_ptr1 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = libdevice.isnan(tmp8).to(tl.int1)
tmp10 = 0.0
tmp11 = tmp8 == tmp10
tmp12 = tl_math.log(tmp8)
tmp13 = tmp8 * tmp12
tmp14 = tl.where(tmp11, tmp10, tmp13)
tmp15 = float('nan')
tmp16 = tl.where(tmp9, tmp15, tmp14)
tmp19 = tl_math.exp(tmp18)
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tl_math.log(tmp28)
tmp30 = tmp17 - tmp29
tmp31 = tmp8 * tmp30
tmp32 = tmp16 - tmp31
tmp33 = tl.broadcast_to(tmp32, [RBLOCK])
tmp35 = triton_helpers.promote_to_tensor(tl.sum(tmp33, 0))
tmp36 = 256.0
tmp37 = tmp35 / tmp36
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp37, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_1[grid(256)](arg0_1, buf2, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3
del buf3
triton_per_fused__log_softmax__softmax_mean_mul_sub_xlogy_2[grid(1)](
buf4, buf0, buf2, 1, 256, num_warps=2, num_stages=1)
del buf0
del buf2
return buf4,
class InteractiveKLLossNew(nn.Module):
def __init__(self, temperature):
super().__init__()
self.temperature = temperature
self.kl_loss = nn.KLDivLoss()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
pkuyym/nni
|
InteractiveKLLoss
| false
| 11,010
|
[
"MIT"
] | 0
|
fe533e3bc65ea27997e16250adb503638548d500
|
https://github.com/pkuyym/nni/tree/fe533e3bc65ea27997e16250adb503638548d500
|
LevelVariabilityLoss
|
import torch
import torch.nn as nn
class LevelVariabilityLoss(nn.Module):
""" Level Variability Loss
Computes the variability penalty for the level.
Parameters
----------
levels: tensor with shape (batch, n_time)
levels obtained from exponential smoothing component of ESRNN
level_variability_penalty: float
this parameter controls the strength of the penalization
to the wigglines of the level vector, induces smoothness
in the output
Returns
----------
level_var_loss:
wiggliness loss for the level vector
"""
def __init__(self, level_variability_penalty):
super(LevelVariabilityLoss, self).__init__()
self.level_variability_penalty = level_variability_penalty
def forward(self, levels):
assert levels.shape[1] > 2
level_prev = torch.log(levels[:, :-1])
level_next = torch.log(levels[:, 1:])
log_diff_of_levels = torch.sub(level_prev, level_next)
log_diff_prev = log_diff_of_levels[:, :-1]
log_diff_next = log_diff_of_levels[:, 1:]
diff = torch.sub(log_diff_prev, log_diff_next)
level_var_loss = diff ** 2
level_var_loss = level_var_loss.mean() * self.level_variability_penalty
return level_var_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'level_variability_penalty': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mean_mul_pow_sub_0(in_out_ptr0, in_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 32
r1 = rindex // 32
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp5 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp1 = tl_math.log(tmp0)
tmp3 = tl_math.log(tmp2)
tmp4 = tmp1 - tmp3
tmp6 = tl_math.log(tmp5)
tmp7 = tmp3 - tmp6
tmp8 = tmp4 - tmp7
tmp9 = tmp8 * tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.sum(tmp10, 1)[:, None]
tmp13 = 128.0
tmp14 = tmp12 / tmp13
tmp15 = 4.0
tmp16 = tmp14 * tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp16, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_mul_pow_sub_0[grid(1)](buf1, arg0_1, 1, 128,
XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf1,
class LevelVariabilityLossNew(nn.Module):
""" Level Variability Loss
Computes the variability penalty for the level.
Parameters
----------
levels: tensor with shape (batch, n_time)
levels obtained from exponential smoothing component of ESRNN
level_variability_penalty: float
this parameter controls the strength of the penalization
to the wigglines of the level vector, induces smoothness
in the output
Returns
----------
level_var_loss:
wiggliness loss for the level vector
"""
def __init__(self, level_variability_penalty):
super(LevelVariabilityLossNew, self).__init__()
self.level_variability_penalty = level_variability_penalty
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
venkatkorapaty/esrnn
|
LevelVariabilityLoss
| false
| 11,011
|
[
"MIT"
] | 0
|
411d3191e7e12f29e521e06bc18f9b9b0fdf0f0c
|
https://github.com/venkatkorapaty/esrnn/tree/411d3191e7e12f29e521e06bc18f9b9b0fdf0f0c
|
L1ExactPenaltyConstraintLoss
|
import torch
from torch import nn
from torch.nn import functional as F
class L1ExactPenaltyConstraintLoss(nn.Module):
def __init__(self):
super(L1ExactPenaltyConstraintLoss, self).__init__()
def forward(self, x):
gap_constraint = F.relu(x)
return torch.norm(gap_constraint, p=1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_linalg_vector_norm_relu_0(in_out_ptr0, in_ptr0, xnumel,
rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = tl_math.abs(tmp2)
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp6, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_linalg_vector_norm_relu_0[grid(1)](buf1, arg0_1, 1,
256, num_warps=2, num_stages=1)
del arg0_1
return buf1,
class L1ExactPenaltyConstraintLossNew(nn.Module):
def __init__(self):
super(L1ExactPenaltyConstraintLossNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
ykt345/fairtorch
|
L1ExactPenaltyConstraintLoss
| false
| 11,012
|
[
"MIT"
] | 0
|
fe7e0cfaec3de0fc2b9c92943bb02639acd46bb4
|
https://github.com/ykt345/fairtorch/tree/fe7e0cfaec3de0fc2b9c92943bb02639acd46bb4
|
L2PenaltyConstraintLoss
|
import torch
from torch import nn
from torch.nn import functional as F
class L2PenaltyConstraintLoss(nn.Module):
def __init__(self):
super(L2PenaltyConstraintLoss, self).__init__()
def forward(self, x):
gap_constraint = F.relu(x)
return torch.norm(gap_constraint, p=2)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_linalg_vector_norm_relu_0(in_out_ptr0, in_ptr0, xnumel,
rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = libdevice.sqrt(tmp6)
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp7, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_linalg_vector_norm_relu_0[grid(1)](buf1, arg0_1, 1,
256, num_warps=2, num_stages=1)
del arg0_1
return buf1,
class L2PenaltyConstraintLossNew(nn.Module):
def __init__(self):
super(L2PenaltyConstraintLossNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
ykt345/fairtorch
|
L2PenaltyConstraintLoss
| false
| 11,013
|
[
"MIT"
] | 0
|
fe7e0cfaec3de0fc2b9c92943bb02639acd46bb4
|
https://github.com/ykt345/fairtorch/tree/fe7e0cfaec3de0fc2b9c92943bb02639acd46bb4
|
MegatronFastGelu
|
import torch
import torch.nn
import torch.onnx
class MegatronFastGelu(torch.nn.Module):
def forward(self, x):
return 0.5 * x * (1.0 + torch.tanh(0.7978845608028654 * x * (1.0 +
0.044715 * x * x)))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7978845608028654
tmp4 = tmp0 * tmp3
tmp5 = 0.044715
tmp6 = tmp0 * tmp5
tmp7 = tmp6 * tmp0
tmp8 = 1.0
tmp9 = tmp7 + tmp8
tmp10 = tmp4 * tmp9
tmp11 = libdevice.tanh(tmp10)
tmp12 = tmp11 + tmp8
tmp13 = tmp2 * tmp12
tl.store(out_ptr0 + x0, tmp13, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_tanh_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class MegatronFastGeluNew(torch.nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
thilow/onnxruntime
|
MegatronFastGelu
| false
| 11,014
|
[
"MIT"
] | 0
|
1a3ddf0714e1bdf9b807a342eee5f6e160ad1ec9
|
https://github.com/thilow/onnxruntime/tree/1a3ddf0714e1bdf9b807a342eee5f6e160ad1ec9
|
UpsampleBLock
|
import torch
import torch.nn as nn
import torch.utils.data
class UpsampleBLock(nn.Module):
def __init__(self, in_channels):
super(UpsampleBLock, self).__init__()
self.conv = nn.Conv2d(in_channels, in_channels * 2 ** 2,
kernel_size=3, padding=1)
self.pixel_shuffle = nn.PixelShuffle(2)
self.prelu = nn.PReLU()
def forward(self, x):
x = self.conv(x)
x = self.pixel_shuffle(x)
x = self.prelu(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused__prelu_kernel_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8 % 8
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (4 * (x1 // 2) + 16 * (x0 % 2) + 32 * (x1 % 2) +
64 * x2 + x0 // 2), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp5 = tmp4 * tmp0
tmp6 = tl.where(tmp2, tmp0, tmp5)
tl.store(out_ptr0 + x3, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (16, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 4, 4), (256, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(1024)](buf1, primals_2, 1024,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
triton_poi_fused__prelu_kernel_1[grid(1024)](buf1, primals_4, buf2,
1024, XBLOCK=256, num_warps=4, num_stages=1)
return buf2, primals_1, primals_3, primals_4, buf1
class UpsampleBLockNew(nn.Module):
def __init__(self, in_channels):
super(UpsampleBLockNew, self).__init__()
self.conv = nn.Conv2d(in_channels, in_channels * 2 ** 2,
kernel_size=3, padding=1)
self.pixel_shuffle = nn.PixelShuffle(2)
self.prelu = nn.PReLU()
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_4 = self.prelu.weight
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
tomron27/srganus
|
UpsampleBLock
| false
| 11,015
|
[
"Apache-2.0"
] | 0
|
5dab73540535138375203bf31e31246cd203f3c0
|
https://github.com/tomron27/srganus/tree/5dab73540535138375203bf31e31246cd203f3c0
|
HuggingfaceFastGelu
|
import torch
import torch.nn
import torch.onnx
class HuggingfaceFastGelu(torch.nn.Module):
def forward(self, x):
return 0.5 * x * (1.0 + torch.tanh(x * 0.7978845608 * (1.0 +
0.044715 * x * x)))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7978845608
tmp4 = tmp0 * tmp3
tmp5 = 0.044715
tmp6 = tmp0 * tmp5
tmp7 = tmp6 * tmp0
tmp8 = 1.0
tmp9 = tmp7 + tmp8
tmp10 = tmp4 * tmp9
tmp11 = libdevice.tanh(tmp10)
tmp12 = tmp11 + tmp8
tmp13 = tmp2 * tmp12
tl.store(out_ptr0 + x0, tmp13, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_tanh_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class HuggingfaceFastGeluNew(torch.nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
thilow/onnxruntime
|
HuggingfaceFastGelu
| false
| 11,016
|
[
"MIT"
] | 0
|
1a3ddf0714e1bdf9b807a342eee5f6e160ad1ec9
|
https://github.com/thilow/onnxruntime/tree/1a3ddf0714e1bdf9b807a342eee5f6e160ad1ec9
|
NeuralNetNonDifferentiableOutput
|
import torch
import torch.nn
import torch.onnx
class NeuralNetNonDifferentiableOutput(torch.nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNetNonDifferentiableOutput, self).__init__()
self.fc1 = torch.nn.Linear(input_size, hidden_size)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(hidden_size, num_classes)
def forward(self, input1):
out = self.fc1(input1)
out1 = self.relu(out)
out2 = self.fc2(out1)
mask1 = torch.gt(out1, 0.01)
mask1 = mask1.long()
mask2 = torch.lt(out2, 0.02)
mask2 = mask2.long()
return out1, mask1, out2, mask2
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4, 'num_classes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__to_copy_gt_relu_0(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.01
tmp6 = tmp4 > tmp5
tmp7 = tmp6.to(tl.int64)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused__to_copy_lt_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.02
tmp2 = tmp0 < tmp1
tmp3 = tmp2.to(tl.int64)
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int64)
get_raw_stream(0)
triton_poi_fused__to_copy_gt_relu_0[grid(256)](buf1, primals_2,
buf3, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int64)
triton_poi_fused__to_copy_lt_1[grid(256)](buf2, buf4, 256, XBLOCK=
256, num_warps=4, num_stages=1)
return buf1, buf3, reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, primals_4
class NeuralNetNonDifferentiableOutputNew(torch.nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNetNonDifferentiableOutputNew, self).__init__()
self.fc1 = torch.nn.Linear(input_size, hidden_size)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(hidden_size, num_classes)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1], output[2], output[3]
|
thilow/onnxruntime
|
NeuralNetNonDifferentiableOutput
| false
| 11,017
|
[
"MIT"
] | 0
|
1a3ddf0714e1bdf9b807a342eee5f6e160ad1ec9
|
https://github.com/thilow/onnxruntime/tree/1a3ddf0714e1bdf9b807a342eee5f6e160ad1ec9
|
TemperatureHolder
|
import torch
from torch import nn
class TemperatureHolder(nn.Module):
"""Module that holds a temperature as a learnable value.
Args:
initial_log_temperature (float): Initial value of log(temperature).
"""
def __init__(self, initial_log_temperature=0):
super().__init__()
self.log_temperature = nn.Parameter(torch.tensor(
initial_log_temperature, dtype=torch.float32))
def forward(self):
"""Return a temperature as a torch.Tensor."""
return torch.exp(self.log_temperature)
def get_inputs():
return []
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_exp_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl_math.exp(tmp1)
tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp2, None)
def call(args):
primals_1, = args
args.clear()
assert_size_stride(primals_1, (), ())
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_poi_fused_exp_0[grid(1)](primals_1, buf0, 1, XBLOCK=1,
num_warps=1, num_stages=1)
del primals_1
return buf0, buf0
class TemperatureHolderNew(nn.Module):
"""Module that holds a temperature as a learnable value.
Args:
initial_log_temperature (float): Initial value of log(temperature).
"""
def __init__(self, initial_log_temperature=0):
super().__init__()
self.log_temperature = nn.Parameter(torch.tensor(
initial_log_temperature, dtype=torch.float32))
def forward(self):
primals_1 = self.log_temperature
output = call([primals_1])
return output[0]
|
tarokiritani/pfrl
|
TemperatureHolder
| false
| 11,018
|
[
"MIT"
] | 0
|
284ed1f43b32654a2ec1569b16a0f6b9acbd5e79
|
https://github.com/tarokiritani/pfrl/tree/284ed1f43b32654a2ec1569b16a0f6b9acbd5e79
|
NeuralNetMultiplePositionalArguments
|
import torch
import torch.nn
import torch.onnx
class NeuralNetMultiplePositionalArguments(torch.nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNetMultiplePositionalArguments, self).__init__()
self.fc1 = torch.nn.Linear(input_size, hidden_size)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(hidden_size, num_classes)
def forward(self, input1, input2):
model_input = input1 + input2
out = self.fc1(model_input)
out = self.relu(out)
out = self.fc2(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4, 'num_classes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](primals_1, primals_2, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(256)](buf2,
primals_4, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, reinterpret_tensor(buf2, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_6
return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(
buf2, (64, 4), (4, 1), 0), primals_5, buf4
class NeuralNetMultiplePositionalArgumentsNew(torch.nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNetMultiplePositionalArgumentsNew, self).__init__()
self.fc1 = torch.nn.Linear(input_size, hidden_size)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(hidden_size, num_classes)
def forward(self, input_0, input_1):
primals_3 = self.fc1.weight
primals_4 = self.fc1.bias
primals_5 = self.fc2.weight
primals_6 = self.fc2.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
thilow/onnxruntime
|
NeuralNetMultiplePositionalArguments
| false
| 11,019
|
[
"MIT"
] | 0
|
1a3ddf0714e1bdf9b807a342eee5f6e160ad1ec9
|
https://github.com/thilow/onnxruntime/tree/1a3ddf0714e1bdf9b807a342eee5f6e160ad1ec9
|
NeuralNetMultiplePositionalArgumentsMultiOutputsWithoutDependency
|
import torch
import torch.nn
import torch.onnx
class NeuralNetMultiplePositionalArgumentsMultiOutputsWithoutDependency(torch
.nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNetMultiplePositionalArgumentsMultiOutputsWithoutDependency
, self).__init__()
self.fc1 = torch.nn.Linear(input_size, hidden_size)
self.fc2 = torch.nn.Linear(input_size, hidden_size)
self.relu1 = torch.nn.ReLU()
self.relu2 = torch.nn.ReLU()
def forward(self, input1, input2):
model_input = input1 + input2
out1 = self.fc1(model_input)
out2 = self.fc2(model_input)
out1 = self.relu1(out1)
out2 = self.relu2(out2)
return out1, out2
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4, 'num_classes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](primals_1, primals_2, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf2)
del primals_5
buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(256)](buf3,
primals_4, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_4
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(256)](buf4,
primals_6, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_6
return buf3, buf4, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), buf5, buf6
class NeuralNetMultiplePositionalArgumentsMultiOutputsWithoutDependencyNew(
torch.nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(
NeuralNetMultiplePositionalArgumentsMultiOutputsWithoutDependencyNew
, self).__init__()
self.fc1 = torch.nn.Linear(input_size, hidden_size)
self.fc2 = torch.nn.Linear(input_size, hidden_size)
self.relu1 = torch.nn.ReLU()
self.relu2 = torch.nn.ReLU()
def forward(self, input_0, input_1):
primals_3 = self.fc1.weight
primals_4 = self.fc1.bias
primals_5 = self.fc2.weight
primals_6 = self.fc2.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0], output[1]
|
thilow/onnxruntime
|
NeuralNetMultiplePositionalArgumentsMultiOutputsWithoutDependency
| false
| 11,020
|
[
"MIT"
] | 0
|
1a3ddf0714e1bdf9b807a342eee5f6e160ad1ec9
|
https://github.com/thilow/onnxruntime/tree/1a3ddf0714e1bdf9b807a342eee5f6e160ad1ec9
|
FeedForwardLayer
|
import torch
from torch import nn
class FeedForwardLayer(nn.Module):
def __init__(self, hidden_size):
super(FeedForwardLayer, self).__init__()
self.linear_1 = nn.Linear(hidden_size, 4 * hidden_size)
self.linear_2 = nn.Linear(4 * hidden_size, hidden_size)
self.relu = nn.ReLU()
def forward(self, hidden_states):
hidden_states = self.linear_1(hidden_states)
hidden_states = self.relu(hidden_states)
hidden_states = self.linear_2(hidden_states)
return hidden_states
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (16, 4), (4, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 16), (16, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 16), (256, 64, 16, 1), 0)
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(1024)](buf1,
primals_2, buf3, 1024, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 16),
(16, 1), 0), reinterpret_tensor(primals_4, (16, 4), (1, 16), 0),
alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 16), (16, 1), 0), primals_4, buf3
class FeedForwardLayerNew(nn.Module):
def __init__(self, hidden_size):
super(FeedForwardLayerNew, self).__init__()
self.linear_1 = nn.Linear(hidden_size, 4 * hidden_size)
self.linear_2 = nn.Linear(4 * hidden_size, hidden_size)
self.relu = nn.ReLU()
def forward(self, input_0):
primals_1 = self.linear_1.weight
primals_2 = self.linear_1.bias
primals_4 = self.linear_2.weight
primals_5 = self.linear_2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
yongho94/Transformers_NMT
|
FeedForwardLayer
| false
| 11,021
|
[
"MIT"
] | 0
|
14fb08a6b1391da4d49f199dc16d7beb37620c98
|
https://github.com/yongho94/Transformers_NMT/tree/14fb08a6b1391da4d49f199dc16d7beb37620c98
|
NeuralNetPartialNoGradModel
|
import torch
import torch.nn
import torch.onnx
class NeuralNetPartialNoGradModel(torch.nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNetPartialNoGradModel, self).__init__()
self.fc1 = torch.nn.Linear(input_size, hidden_size).requires_grad_(
False)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(hidden_size, num_classes)
def forward(self, model_input):
out = self.relu(self.fc1(model_input))
out = self.fc2(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4, 'num_classes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
del primals_3
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(256)](buf1, primals_2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_4
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0)
class NeuralNetPartialNoGradModelNew(torch.nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNetPartialNoGradModelNew, self).__init__()
self.fc1 = torch.nn.Linear(input_size, hidden_size).requires_grad_(
False)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(hidden_size, num_classes)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
thilow/onnxruntime
|
NeuralNetPartialNoGradModel
| false
| 11,022
|
[
"MIT"
] | 0
|
1a3ddf0714e1bdf9b807a342eee5f6e160ad1ec9
|
https://github.com/thilow/onnxruntime/tree/1a3ddf0714e1bdf9b807a342eee5f6e160ad1ec9
|
ModMSELoss
|
import torch
class ModMSELoss(torch.nn.Module):
def __init__(self, shape_r_gt, shape_c_gt):
super(ModMSELoss, self).__init__()
self.shape_r_gt = shape_r_gt
self.shape_c_gt = shape_c_gt
def forward(self, output, label, prior):
prior_size = prior.shape
output_max = torch.max(torch.max(output, 2)[0], 2)[0].unsqueeze(2
).unsqueeze(2).expand(output.shape[0], output.shape[1], self.
shape_r_gt, self.shape_c_gt)
reg = 1.0 / (prior_size[0] * prior_size[1]) * (1 - prior) ** 2
loss = torch.mean((output / output_max - label) ** 2 / (1 - label +
0.1)) + torch.sum(reg)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'shape_r_gt': 4, 'shape_c_gt': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp24 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp26 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp13 = triton_helpers.maximum(tmp11, tmp12)
tmp14 = triton_helpers.maximum(tmp6, tmp13)
tmp17 = triton_helpers.maximum(tmp15, tmp16)
tmp19 = triton_helpers.maximum(tmp17, tmp18)
tmp21 = triton_helpers.maximum(tmp19, tmp20)
tmp22 = triton_helpers.maximum(tmp14, tmp21)
tmp25 = triton_helpers.maximum(tmp23, tmp24)
tmp27 = triton_helpers.maximum(tmp25, tmp26)
tmp29 = triton_helpers.maximum(tmp27, tmp28)
tmp30 = triton_helpers.maximum(tmp22, tmp29)
tl.store(out_ptr0 + x0, tmp30, xmask)
@triton.jit
def triton_per_fused_add_div_mean_mul_pow_rsub_sub_sum_1(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + r2, None)
tmp1 = tl.load(in_ptr1 + r1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + r2, None)
tmp14 = tl.load(in_ptr3 + r2, None)
tmp2 = tmp0 / tmp1
tmp4 = tmp2 - tmp3
tmp5 = tmp4 * tmp4
tmp6 = 1.0
tmp7 = tmp6 - tmp3
tmp8 = 0.1
tmp9 = tmp7 + tmp8
tmp10 = tmp5 / tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp15 = tmp6 - tmp14
tmp16 = tmp15 * tmp15
tmp17 = 0.0625
tmp18 = tmp16 * tmp17
tmp19 = tl.broadcast_to(tmp18, [RBLOCK])
tmp21 = triton_helpers.promote_to_tensor(tl.sum(tmp19, 0))
tmp22 = 256.0
tmp23 = tmp13 / tmp22
tmp24 = tmp23 + tmp21
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp24, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_0[grid(16)](arg1_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((), (), torch.float32)
buf3 = buf1
del buf1
triton_per_fused_add_div_mean_mul_pow_rsub_sub_sum_1[grid(1)](buf3,
arg1_1, buf0, arg2_1, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
del buf0
return buf3,
class ModMSELossNew(torch.nn.Module):
def __init__(self, shape_r_gt, shape_c_gt):
super(ModMSELossNew, self).__init__()
self.shape_r_gt = shape_r_gt
self.shape_c_gt = shape_c_gt
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
yyuting/learning_from_program_trace
|
ModMSELoss
| false
| 11,023
|
[
"MIT"
] | 0
|
e0e4ac9bc2d4069eef64bdc2de64a87a735fa508
|
https://github.com/yyuting/learning_from_program_trace/tree/e0e4ac9bc2d4069eef64bdc2de64a87a735fa508
|
PositionWiseFeedForward
|
import torch
from torch.nn import functional as F
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
class GatedLinearUnit(nn.Module):
def __init__(self, input_size, output_size, dropout=0):
super().__init__()
self.dropout = nn.Dropout(dropout)
self.w4 = nn.Linear(input_size, output_size)
self.w5 = nn.Linear(input_size, output_size)
self.act = nn.Sigmoid()
def forward(self, x):
x = self.dropout(x)
x = self.act(self.w4(x)) * self.w5(x)
return x
class GateAddNorm(nn.Module):
def __init__(self, input_size, output_size, dropout):
super().__init__()
self.glu = GatedLinearUnit(input_size, output_size, dropout)
self.norm = nn.LayerNorm(output_size)
def forward(self, x, skip):
return self.norm(self.glu(x) + skip)
class GatedResidualNetwork(nn.Module):
def __init__(self, input_size, hidden_size, output_size, context_size=
None, dropout=0):
super().__init__()
self.w1 = nn.Linear(hidden_size, hidden_size)
self.w2 = nn.Linear(input_size, hidden_size)
self.w3 = None if context_size is None else nn.Linear(context_size,
hidden_size, bias=False)
self.glu = GatedLinearUnit(hidden_size, output_size, dropout)
self.layer_norm = nn.LayerNorm(output_size)
self.residual = nn.Sequential(
) if input_size == output_size else nn.Linear(input_size,
output_size)
def forward(self, a, c=None):
if c is not None:
n2 = F.elu(self.w2(a) + self.w3(c))
else:
n2 = F.elu(self.w2(a))
n1 = self.w1(n2)
grn = self.layer_norm(self.residual(a) + self.glu(n1))
return grn
class PositionWiseFeedForward(nn.Module):
def __init__(self, input_size, output_size, dropout):
super().__init__()
self.grn = GatedResidualNetwork(input_size=input_size, hidden_size=
input_size, output_size=output_size, dropout=dropout)
self.gate_add_norm = GateAddNorm(input_size=input_size, output_size
=output_size, dropout=dropout)
def forward(self, x, skip):
out = self.grn(x)
out = self.gate_add_norm(out, skip)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'output_size': 4, 'dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch.nn import functional as F
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tl.store(out_ptr0 + x0, tmp7, xmask)
@triton.jit
def triton_poi_fused_add_mul_native_layer_norm_sigmoid_1(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp23 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tl.sigmoid(tmp1)
tmp4 = tmp2 * tmp3
tmp5 = tmp0 + tmp4
tmp8 = tl.sigmoid(tmp7)
tmp10 = tmp8 * tmp9
tmp11 = tmp6 + tmp10
tmp12 = tmp5 + tmp11
tmp15 = tl.sigmoid(tmp14)
tmp17 = tmp15 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp12 + tmp18
tmp22 = tl.sigmoid(tmp21)
tmp24 = tmp22 * tmp23
tmp25 = tmp20 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + x0, tmp28, xmask)
tl.store(out_ptr1 + x0, tmp40, xmask)
@triton.jit
def triton_poi_fused_add_mul_native_layer_norm_sigmoid_2(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x2, xmask)
tmp6 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp4 = tmp2 * tmp3
tmp5 = tmp0 + tmp4
tmp7 = tmp5 - tmp6
tmp9 = 1e-05
tmp10 = tmp8 + tmp9
tmp11 = libdevice.rsqrt(tmp10)
tmp12 = tmp7 * tmp11
tmp14 = tmp12 * tmp13
tmp16 = tmp14 + tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_add_mul_native_layer_norm_sigmoid_3(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp24 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp5 = tmp3 + tmp4
tmp7 = tl.sigmoid(tmp6)
tmp9 = tmp7 * tmp8
tmp11 = tmp9 + tmp10
tmp12 = tmp5 + tmp11
tmp14 = tl.sigmoid(tmp13)
tmp16 = tmp14 * tmp15
tmp18 = tmp16 + tmp17
tmp19 = tmp12 + tmp18
tmp21 = tl.sigmoid(tmp20)
tmp23 = tmp21 * tmp22
tmp25 = tmp23 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + x0, tmp28, xmask)
tl.store(out_ptr1 + x0, tmp40, xmask)
@triton.jit
def triton_poi_fused_add_mul_native_layer_norm_sigmoid_4(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x2, xmask)
tmp6 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 - tmp6
tmp9 = 1e-05
tmp10 = tmp8 + tmp9
tmp11 = libdevice.rsqrt(tmp10)
tmp12 = tmp7 * tmp11
tmp14 = tmp12 * tmp13
tmp16 = tmp14 + tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17, primals_18
) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4, 4), (4, 1))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (4, 4), (4, 1))
assert_size_stride(primals_15, (4,), (1,))
assert_size_stride(primals_16, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_17, (4,), (1,))
assert_size_stride(primals_18, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_elu_0[grid(256)](buf0, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, buf2, reinterpret_tensor(primals_6,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_7
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, buf2, reinterpret_tensor(primals_8,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_9
buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused_add_mul_native_layer_norm_sigmoid_1[grid(64)](
primals_3, buf3, buf4, buf5, buf6, 64, XBLOCK=64, num_warps=1,
num_stages=1)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_native_layer_norm_sigmoid_2[grid(256)](
primals_3, buf3, buf4, buf5, buf6, primals_10, primals_11, buf7,
256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_11
buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_13, reinterpret_tensor(buf7, (64, 4),
(4, 1), 0), reinterpret_tensor(primals_12, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf8)
del primals_13
buf9 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_15, reinterpret_tensor(buf7, (64, 4),
(4, 1), 0), reinterpret_tensor(primals_14, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf9)
del primals_15
buf10 = buf6
del buf6
buf11 = buf5
del buf5
triton_poi_fused_add_mul_native_layer_norm_sigmoid_3[grid(64)](buf8,
buf9, primals_16, buf10, buf11, 64, XBLOCK=64, num_warps=1,
num_stages=1)
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_native_layer_norm_sigmoid_4[grid(256)](buf8,
buf9, primals_16, buf10, buf11, primals_17, primals_18, buf12,
256, XBLOCK=256, num_warps=4, num_stages=1)
del buf10
del buf11
del primals_18
return (buf12, primals_3, primals_10, primals_16, primals_17, buf0,
reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf2, buf3, buf4,
reinterpret_tensor(buf7, (64, 4), (4, 1), 0), buf8, buf9,
primals_14, primals_12, primals_8, primals_6, primals_4)
class GatedLinearUnit(nn.Module):
def __init__(self, input_size, output_size, dropout=0):
super().__init__()
self.dropout = nn.Dropout(dropout)
self.w4 = nn.Linear(input_size, output_size)
self.w5 = nn.Linear(input_size, output_size)
self.act = nn.Sigmoid()
def forward(self, x):
x = self.dropout(x)
x = self.act(self.w4(x)) * self.w5(x)
return x
class GateAddNorm(nn.Module):
def __init__(self, input_size, output_size, dropout):
super().__init__()
self.glu = GatedLinearUnit(input_size, output_size, dropout)
self.norm = nn.LayerNorm(output_size)
def forward(self, x, skip):
return self.norm(self.glu(x) + skip)
class GatedResidualNetwork(nn.Module):
def __init__(self, input_size, hidden_size, output_size, context_size=
None, dropout=0):
super().__init__()
self.w1 = nn.Linear(hidden_size, hidden_size)
self.w2 = nn.Linear(input_size, hidden_size)
self.w3 = None if context_size is None else nn.Linear(context_size,
hidden_size, bias=False)
self.glu = GatedLinearUnit(hidden_size, output_size, dropout)
self.layer_norm = nn.LayerNorm(output_size)
self.residual = nn.Sequential(
) if input_size == output_size else nn.Linear(input_size,
output_size)
def forward(self, a, c=None):
if c is not None:
n2 = F.elu(self.w2(a) + self.w3(c))
else:
n2 = F.elu(self.w2(a))
n1 = self.w1(n2)
grn = self.layer_norm(self.residual(a) + self.glu(n1))
return grn
class PositionWiseFeedForwardNew(nn.Module):
def __init__(self, input_size, output_size, dropout):
super().__init__()
self.grn = GatedResidualNetwork(input_size=input_size, hidden_size=
input_size, output_size=output_size, dropout=dropout)
self.gate_add_norm = GateAddNorm(input_size=input_size, output_size
=output_size, dropout=dropout)
def forward(self, input_0, input_1):
primals_1 = self.grn.w1.weight
primals_2 = self.grn.w1.bias
primals_4 = self.grn.w2.weight
primals_5 = self.grn.w2.bias
primals_6 = self.grn.glu.w4.weight
primals_7 = self.grn.glu.w4.bias
primals_8 = self.grn.glu.w5.weight
primals_9 = self.grn.glu.w5.bias
primals_10 = self.grn.layer_norm.weight
primals_11 = self.grn.layer_norm.bias
primals_12 = self.gate_add_norm.glu.w4.weight
primals_13 = self.gate_add_norm.glu.w4.bias
primals_14 = self.gate_add_norm.glu.w5.weight
primals_15 = self.gate_add_norm.glu.w5.bias
primals_17 = self.gate_add_norm.norm.weight
primals_18 = self.gate_add_norm.norm.bias
primals_3 = input_0
primals_16 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18])
return output[0]
|
krodyush/training_extensions
|
PositionWiseFeedForward
| false
| 11,024
|
[
"Apache-2.0"
] | 0
|
542f4004dfbc6fc62a622065367ba4f85a703dd3
|
https://github.com/krodyush/training_extensions/tree/542f4004dfbc6fc62a622065367ba4f85a703dd3
|
NeuralNetMultiplePositionalArgumentsMultiOutputsWithDependency
|
import torch
import torch.nn
import torch.onnx
class NeuralNetMultiplePositionalArgumentsMultiOutputsWithDependency(torch.
nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNetMultiplePositionalArgumentsMultiOutputsWithDependency,
self).__init__()
self.fc1 = torch.nn.Linear(input_size, hidden_size)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(hidden_size, num_classes)
def forward(self, input1, input2):
model_input = input1 + input2
out1 = self.fc1(model_input)
out1 = self.relu(out1)
out2 = self.fc2(out1)
return out1, out2
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4, 'num_classes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](primals_1, primals_2, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
triton_poi_fused_relu_1[grid(256)](buf2, primals_4, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, reinterpret_tensor(buf2, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_6
return buf2, reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(buf0, (64, 4), (4, 1), 0), buf2, primals_5
class NeuralNetMultiplePositionalArgumentsMultiOutputsWithDependencyNew(torch
.nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNetMultiplePositionalArgumentsMultiOutputsWithDependencyNew
, self).__init__()
self.fc1 = torch.nn.Linear(input_size, hidden_size)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(hidden_size, num_classes)
def forward(self, input_0, input_1):
primals_3 = self.fc1.weight
primals_4 = self.fc1.bias
primals_5 = self.fc2.weight
primals_6 = self.fc2.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0], output[1]
|
thilow/onnxruntime
|
NeuralNetMultiplePositionalArgumentsMultiOutputsWithDependency
| false
| 11,025
|
[
"MIT"
] | 0
|
1a3ddf0714e1bdf9b807a342eee5f6e160ad1ec9
|
https://github.com/thilow/onnxruntime/tree/1a3ddf0714e1bdf9b807a342eee5f6e160ad1ec9
|
BertOutput
|
from _paritybench_helpers import _mock_config
import torch
from torch import nn
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""LayerNormalization層です。
学習済みモデルをそのままロードするため、学習済みモデルの変数名に変えています。
オリジナルのGitHubの実装から変数名を変えています。
weight→gamma、bias→beta
"""
super(BertLayerNorm, self).__init__()
self.gamma = nn.Parameter(torch.ones(hidden_size))
self.beta = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.gamma * x + self.beta
class BertOutput(nn.Module):
"""BERTのTransformerBlockモジュールのFeedForwardです"""
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
"""
hidden_states: BertIntermediateの出力テンソル
input_tensor:BertAttentionの出力テンソル
"""
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(intermediate_size=4, hidden_size=4,
hidden_dropout_prob=0.5)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mean_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + 1)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp10 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + 2)
tmp15 = tl.broadcast_to(tmp14, [XBLOCK])
tmp17 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + 3)
tmp22 = tl.broadcast_to(tmp21, [XBLOCK])
tmp24 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tmp0 + tmp2
tmp5 = tmp3 + tmp4
tmp9 = tmp6 + tmp8
tmp11 = tmp9 + tmp10
tmp12 = tmp5 + tmp11
tmp16 = tmp13 + tmp15
tmp18 = tmp16 + tmp17
tmp19 = tmp12 + tmp18
tmp23 = tmp20 + tmp22
tmp25 = tmp23 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tl.store(out_ptr0 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_sub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp5 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 - tmp5
tl.store(in_out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_sqrt_2(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp2 * tmp2
tmp5 = tmp4 * tmp4
tmp6 = tmp3 + tmp5
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = 4.0
tmp14 = tmp12 / tmp13
tmp15 = 1e-12
tmp16 = tmp14 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tmp18 = tmp1 / tmp17
tmp19 = tmp0 * tmp18
tmp21 = tmp19 + tmp20
tl.store(out_ptr0 + x2, tmp21, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mean_0[grid(64)](buf0, primals_2, primals_4,
buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused_add_sub_1[grid(256)](buf2, primals_2, primals_4,
buf1, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf1
del primals_2
del primals_4
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_pow_sqrt_2[grid(256)](primals_5,
buf2, primals_6, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_6
return buf3, primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf2
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""LayerNormalization層です。
学習済みモデルをそのままロードするため、学習済みモデルの変数名に変えています。
オリジナルのGitHubの実装から変数名を変えています。
weight→gamma、bias→beta
"""
super(BertLayerNorm, self).__init__()
self.gamma = nn.Parameter(torch.ones(hidden_size))
self.beta = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.gamma * x + self.beta
class BertOutputNew(nn.Module):
"""BERTのTransformerBlockモジュールのFeedForwardです"""
def __init__(self, config):
super(BertOutputNew, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_0, input_1):
primals_1 = self.dense.weight
primals_2 = self.dense.bias
primals_5 = self.LayerNorm.gamma
primals_6 = self.LayerNorm.beta
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
Cyndi-Tokyotech/Fin_Text_Analysis_ML
|
BertOutput
| false
| 11,026
|
[
"MIT"
] | 0
|
7f9b6c1ea78f8e6f32c003b2de32809722df88d4
|
https://github.com/Cyndi-Tokyotech/Fin_Text_Analysis_ML/tree/7f9b6c1ea78f8e6f32c003b2de32809722df88d4
|
MultiHeadAttentionLayer
|
import math
import torch
import torch.nn as nn
class MultiHeadAttentionLayer(nn.Module):
def __init__(self, hidden_dim, n_heads, dropout=0.1):
super().__init__()
assert hidden_dim % n_heads == 0
self.hidden_dim = hidden_dim
self.n_heads = n_heads
self.head_dim = hidden_dim // n_heads
self.fc_q = nn.Linear(hidden_dim, hidden_dim)
self.fc_k = nn.Linear(hidden_dim, hidden_dim)
self.fc_v = nn.Linear(hidden_dim, hidden_dim)
self.fc_o = nn.Linear(hidden_dim, hidden_dim)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(hidden_dim, eps=1e-06)
self.scale = math.sqrt(self.head_dim)
def forward(self, q, k, v, mask=None):
batch_size = q.size(0)
q = self.layer_norm(q)
q = self.fc_q(q)
k = self.fc_k(k)
v = self.fc_v(v)
q = q.view(batch_size, -1, self.n_heads, self.head_dim).permute(0,
2, 1, 3)
k = k.view(batch_size, -1, self.n_heads, self.head_dim).permute(0,
2, 1, 3)
v = v.view(batch_size, -1, self.n_heads, self.head_dim).permute(0,
2, 1, 3)
att = torch.matmul(q / self.scale, k.permute(0, 1, 3, 2))
if mask is not None:
att = att.masked_fill(mask == 0, -10000000000.0)
att = torch.softmax(att, dim=-1)
out = torch.matmul(self.dropout(att), v)
out = out.permute(0, 2, 1, 3).contiguous()
out = out.view(batch_size, self.hidden_dim)
out = self.dropout(self.fc_o(out))
return out, att
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4,
4, 4])]
def get_init_inputs():
return [[], {'hidden_dim': 4, 'n_heads': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-06
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_div_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_per_fused__softmax_4(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + 16 * x0), tmp11, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_12, (4, 4), (4, 1))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(4)](primals_1, buf0, buf1,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(16)](primals_1, buf0,
buf1, primals_2, primals_3, buf2, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del buf0
del buf1
del primals_2
del primals_3
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4
), 0), out=buf3)
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_8, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4)
del primals_6
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_11, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf5)
del primals_9
buf6 = reinterpret_tensor(buf3, (4, 4, 1, 1), (4, 1, 16, 16), 0)
del buf3
triton_poi_fused_div_2[grid(16)](buf6, primals_5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_5
buf7 = empty_strided_cuda((4, 4, 1, 16), (64, 16, 16, 1), torch.float32
)
triton_poi_fused_clone_3[grid(16, 16)](buf4, primals_7, buf7, 16,
16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_7
buf8 = reinterpret_tensor(buf4, (16, 1, 16), (16, 16, 1), 0)
del buf4
extern_kernels.bmm(reinterpret_tensor(buf6, (16, 1, 1), (1, 0, 0),
0), reinterpret_tensor(buf7, (16, 1, 16), (16, 0, 1), 0), out=buf8)
buf11 = empty_strided_cuda((4, 4, 1, 16), (64, 16, 16, 1), torch.
float32)
triton_per_fused__softmax_4[grid(16)](buf8, buf11, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
buf12 = reinterpret_tensor(buf8, (4, 4, 16, 1), (64, 16, 1, 1), 0)
del buf8
triton_poi_fused_clone_3[grid(16, 16)](buf5, primals_10, buf12, 16,
16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del buf5
del primals_10
buf13 = empty_strided_cuda((16, 1, 1), (1, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf11, (16, 1, 16), (16, 16,
1), 0), reinterpret_tensor(buf12, (16, 16, 1), (16, 1, 0), 0),
out=buf13)
buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_13, reinterpret_tensor(buf13, (4, 4),
(4, 1), 0), reinterpret_tensor(primals_12, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf14)
del primals_13
return buf14, buf11, primals_1, buf2, reinterpret_tensor(primals_8, (64,
4), (4, 1), 0), reinterpret_tensor(primals_11, (64, 4), (4, 1), 0
), buf11, reinterpret_tensor(buf13, (4, 4), (4, 1), 0
), primals_12, reinterpret_tensor(buf12, (16, 1, 16), (16, 1, 1), 0
), reinterpret_tensor(buf6, (16, 1, 1), (1, 1, 4), 0
), reinterpret_tensor(buf7, (16, 16, 1), (16, 1, 16), 0), primals_4
class MultiHeadAttentionLayerNew(nn.Module):
def __init__(self, hidden_dim, n_heads, dropout=0.1):
super().__init__()
assert hidden_dim % n_heads == 0
self.hidden_dim = hidden_dim
self.n_heads = n_heads
self.head_dim = hidden_dim // n_heads
self.fc_q = nn.Linear(hidden_dim, hidden_dim)
self.fc_k = nn.Linear(hidden_dim, hidden_dim)
self.fc_v = nn.Linear(hidden_dim, hidden_dim)
self.fc_o = nn.Linear(hidden_dim, hidden_dim)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(hidden_dim, eps=1e-06)
self.scale = math.sqrt(self.head_dim)
def forward(self, input_0, input_1, input_2):
primals_1 = self.fc_q.weight
primals_2 = self.fc_q.bias
primals_4 = self.fc_k.weight
primals_3 = self.fc_k.bias
primals_6 = self.fc_v.weight
primals_5 = self.fc_v.bias
primals_9 = self.fc_o.weight
primals_7 = self.fc_o.bias
primals_10 = self.layer_norm.weight
primals_13 = self.layer_norm.bias
primals_12 = input_0
primals_8 = input_1
primals_11 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0], output[1]
|
wenjunyoung/PAN_PLUS
|
MultiHeadAttentionLayer
| false
| 11,027
|
[
"Apache-2.0"
] | 0
|
c893ff4775c8ff137a21c15d34fb93b9394dbfe5
|
https://github.com/wenjunyoung/PAN_PLUS/tree/c893ff4775c8ff137a21c15d34fb93b9394dbfe5
|
UpsampleConvLayer
|
import torch
class UpsampleConvLayer(torch.nn.Module):
"""UpsampleConvLayer
Upsamples the input and then does a convolution. This method gives better results
compared to ConvTranspose2d.
ref: http://distill.pub/2016/deconv-checkerboard/
"""
def __init__(self, in_channels, out_channels, kernel_size, stride, upsample
):
super(UpsampleConvLayer, self).__init__()
self.upsample = upsample
self.upsample_layer = torch.nn.Upsample(scale_factor=upsample)
reflection_padding = kernel_size // 2
self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding)
self.conv2d = torch.nn.Conv2d(in_channels, out_channels,
kernel_size, stride)
def forward(self, x):
x_in = x
x_in = self.upsample_layer(x_in)
out = self.reflection_pad(x_in)
out = self.conv2d(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4,
'stride': 1, 'upsample': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__unsafe_index_reflection_pad2d_0(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 20 % 20
x0 = xindex % 20
x2 = xindex // 400
x5 = xindex
tmp0 = 15 + -1 * tl_math.abs(-15 + tl_math.abs(-2 + x1))
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.25
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tmp5 = 15 + -1 * tl_math.abs(-15 + tl_math.abs(-2 + x0))
tmp6 = tmp5.to(tl.float32)
tmp7 = tmp6 * tmp2
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.load(in_ptr0 + (tmp8 + 4 * tmp4 + 16 * x2), xmask,
eviction_policy='evict_last')
tl.store(out_ptr0 + x5, tmp9, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 4624
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 289 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 20, 20), (1600, 400, 20, 1), torch
.float32)
get_raw_stream(0)
triton_poi_fused__unsafe_index_reflection_pad2d_0[grid(6400)](primals_1
, buf0, 6400, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 17, 17), (1156, 289, 17, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(4624)](buf2, primals_3, 4624,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
return buf2, primals_2, buf0
class UpsampleConvLayerNew(torch.nn.Module):
"""UpsampleConvLayer
Upsamples the input and then does a convolution. This method gives better results
compared to ConvTranspose2d.
ref: http://distill.pub/2016/deconv-checkerboard/
"""
def __init__(self, in_channels, out_channels, kernel_size, stride, upsample
):
super(UpsampleConvLayerNew, self).__init__()
self.upsample = upsample
self.upsample_layer = torch.nn.Upsample(scale_factor=upsample)
reflection_padding = kernel_size // 2
self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding)
self.conv2d = torch.nn.Conv2d(in_channels, out_channels,
kernel_size, stride)
def forward(self, input_0):
primals_1 = self.conv2d.weight
primals_3 = self.conv2d.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
yuweiliandrew/openrtist
|
UpsampleConvLayer
| false
| 11,028
|
[
"Apache-2.0"
] | 0
|
4b6b17e77587751593d5e529b154e60513de3236
|
https://github.com/yuweiliandrew/openrtist/tree/4b6b17e77587751593d5e529b154e60513de3236
|
Attention
|
import torch
def activation_func(name):
name = name.lower()
if name == 'sigmoid':
return torch.nn.Sigmoid()
elif name == 'tanh':
return torch.nn.Tanh()
elif name == 'relu':
return torch.nn.ReLU()
elif name == 'softmax':
return torch.nn.Softmax()
elif name == 'leaky_relu':
return torch.nn.LeakyReLU(0.1)
else:
return torch.nn.Sequential()
def cosine_similarity(input1, input2):
query_norm = torch.sqrt(torch.sum(input1 ** 2 + 1e-05, 1))
doc_norm = torch.sqrt(torch.sum(input2 ** 2 + 1e-05, 1))
prod = torch.sum(torch.mul(input1, input2), 1)
norm_prod = torch.mul(query_norm, doc_norm)
cos_sim_raw = torch.div(prod, norm_prod)
return cos_sim_raw
class Attention(torch.nn.Module):
def __init__(self, n_k, activation='relu'):
super(Attention, self).__init__()
self.n_k = n_k
self.fc_layer = torch.nn.Linear(self.n_k, self.n_k, activation_func
(activation))
self.soft_max_layer = torch.nn.Softmax()
def forward(self, pu, mp):
expanded_pu = pu.repeat(1, len(mp)).view(len(mp), -1)
inputs = cosine_similarity(expanded_pu, mp)
fc_layers = self.fc_layer(inputs)
attention_values = self.soft_max_layer(fc_layers)
return attention_values
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 16])]
def get_init_inputs():
return [[], {'n_k': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_div_mul_pow_repeat_sqrt_sum_view_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4 * x0 + r1 % 4), xmask, other=0.0)
tmp8 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = 1e-05
tmp3 = tmp1 + tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp9 = tmp8 * tmp8
tmp10 = tmp9 + tmp2
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tmp15 = tmp0 * tmp8
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.where(xmask, tmp16, 0)
tmp19 = tl.sum(tmp18, 1)[:, None]
tmp20 = libdevice.sqrt(tmp7)
tmp21 = libdevice.sqrt(tmp14)
tmp22 = tmp20 * tmp21
tmp23 = tmp19 / tmp22
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp23, xmask)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = triton_helpers.max2(tmp1, 1)[:, None]
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.sum(tmp6, 1)[:, None]
tmp9 = tmp5 / tmp8
tl.store(out_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp9, None)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 16), (16, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
buf3 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mul_pow_repeat_sqrt_sum_view_0[grid(4)](buf3,
primals_1, primals_2, 4, 16, XBLOCK=1, num_warps=2, num_stages=1)
del primals_1
del primals_2
buf4 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_4, reinterpret_tensor(buf3, (1, 4), (0,
1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), alpha
=1, beta=1, out=buf4)
del primals_3
del primals_4
buf7 = empty_strided_cuda((4,), (1,), torch.float32)
triton_per_fused__softmax_1[grid(1)](buf4, buf7, 1, 4, XBLOCK=1,
num_warps=2, num_stages=1)
del buf4
return buf7, reinterpret_tensor(buf3, (1, 4), (4, 1), 0), buf7
def activation_func(name):
name = name.lower()
if name == 'sigmoid':
return torch.nn.Sigmoid()
elif name == 'tanh':
return torch.nn.Tanh()
elif name == 'relu':
return torch.nn.ReLU()
elif name == 'softmax':
return torch.nn.Softmax()
elif name == 'leaky_relu':
return torch.nn.LeakyReLU(0.1)
else:
return torch.nn.Sequential()
def cosine_similarity(input1, input2):
query_norm = torch.sqrt(torch.sum(input1 ** 2 + 1e-05, 1))
doc_norm = torch.sqrt(torch.sum(input2 ** 2 + 1e-05, 1))
prod = torch.sum(torch.mul(input1, input2), 1)
norm_prod = torch.mul(query_norm, doc_norm)
cos_sim_raw = torch.div(prod, norm_prod)
return cos_sim_raw
class AttentionNew(torch.nn.Module):
def __init__(self, n_k, activation='relu'):
super(AttentionNew, self).__init__()
self.n_k = n_k
self.fc_layer = torch.nn.Linear(self.n_k, self.n_k, activation_func
(activation))
self.soft_max_layer = torch.nn.Softmax()
def forward(self, input_0, input_1):
primals_1 = self.fc_layer.weight
primals_4 = self.fc_layer.bias
primals_3 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
xu94-nlp/Code-for-MAMO
|
Attention
| false
| 11,029
|
[
"Apache-2.0"
] | 0
|
d9c6655e0660976c90c07fa096a1f5dc8328a60b
|
https://github.com/xu94-nlp/Code-for-MAMO/tree/d9c6655e0660976c90c07fa096a1f5dc8328a60b
|
AngleSimpleLinear
|
import torch
from torch.nn import functional as F
from torch import nn
from torchvision import models as models
from torch.nn import Parameter
from torch.nn.parameter import Parameter
import torch.onnx
import torch.nn
class AngleSimpleLinear(nn.Module):
"""Computes cos of angles between input vectors and weights vectors"""
def __init__(self, in_features, out_features):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(in_features, out_features))
self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-05).mul_(100000.0)
def forward(self, x):
cos_theta = F.normalize(x, dim=1).mm(F.normalize(self.weight, dim=0))
return cos_theta.clamp(-1.0 + 1e-07, 1.0 - 1e-07),
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
from torchvision import models as models
from torch.nn import Parameter
from torch.nn.parameter import Parameter
import torch.onnx
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_clamp_ge_le_logical_and_2(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = -0.9999999
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 0.9999999
tmp4 = triton_helpers.minimum(tmp2, tmp3)
tmp5 = tmp0 >= tmp1
tmp6 = tmp0 <= tmp3
tmp7 = tmp5 & tmp6
tl.store(out_ptr0 + x0, tmp4, xmask)
tl.store(out_ptr1 + x0, tmp7, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(16)](primals_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_div_1[grid(16)](primals_2, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, buf1, out=buf2)
buf3 = buf1
del buf1
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_clamp_ge_le_logical_and_2[grid(16)](buf2, buf3,
buf4, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf2
return buf3, primals_2, buf4, reinterpret_tensor(buf0, (4, 4), (1, 4), 0)
class AngleSimpleLinearNew(nn.Module):
"""Computes cos of angles between input vectors and weights vectors"""
def __init__(self, in_features, out_features):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(in_features, out_features))
self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-05).mul_(100000.0)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
krodyush/training_extensions
|
AngleSimpleLinear
| false
| 11,030
|
[
"Apache-2.0"
] | 0
|
542f4004dfbc6fc62a622065367ba4f85a703dd3
|
https://github.com/krodyush/training_extensions/tree/542f4004dfbc6fc62a622065367ba4f85a703dd3
|
FCLateActionSAQFunction
|
import torch
import numpy as np
from torch import nn
from abc import ABCMeta
from abc import abstractmethod
import torch.nn.functional as F
def init_lecun_normal(tensor, scale=1.0):
"""Initializes the tensor with LeCunNormal."""
fan_in = torch.nn.init._calculate_correct_fan(tensor, 'fan_in')
std = scale * np.sqrt(1.0 / fan_in)
with torch.no_grad():
return tensor.normal_(0, std)
@torch.no_grad()
def init_chainer_default(layer):
"""Initializes the layer with the chainer default.
weights with LeCunNormal(scale=1.0) and zeros as biases
"""
assert isinstance(layer, nn.Module)
if isinstance(layer, (nn.Linear, nn.Conv2d)):
init_lecun_normal(layer.weight)
if layer.bias is not None:
nn.init.zeros_(layer.bias)
return layer
class MLP(nn.Module):
"""Multi-Layer Perceptron"""
def __init__(self, in_size, out_size, hidden_sizes, nonlinearity=F.relu,
last_wscale=1):
self.in_size = in_size
self.out_size = out_size
self.hidden_sizes = hidden_sizes
self.nonlinearity = nonlinearity
super().__init__()
if hidden_sizes:
self.hidden_layers = nn.ModuleList()
self.hidden_layers.append(nn.Linear(in_size, hidden_sizes[0]))
for hin, hout in zip(hidden_sizes, hidden_sizes[1:]):
self.hidden_layers.append(nn.Linear(hin, hout))
self.hidden_layers.apply(init_chainer_default)
self.output = nn.Linear(hidden_sizes[-1], out_size)
else:
self.output = nn.Linear(in_size, out_size)
init_lecun_normal(self.output.weight, scale=last_wscale)
nn.init.zeros_(self.output.bias)
def forward(self, x):
h = x
if self.hidden_sizes:
for layer in self.hidden_layers:
h = self.nonlinearity(layer(h))
return self.output(h)
class StateActionQFunction(object, metaclass=ABCMeta):
"""Abstract Q-function with state and action input."""
@abstractmethod
def __call__(self, x, a):
"""Evaluates Q-function
Args:
x (ndarray): state input
a (ndarray): action input
Returns:
Q-value for state x and action a
"""
raise NotImplementedError()
class FCLateActionSAQFunction(nn.Module, StateActionQFunction):
"""Fully-connected (s,a)-input Q-function with late action input.
Actions are not included until the second hidden layer and not normalized.
This architecture is used in the DDPG paper:
http://arxiv.org/abs/1509.02971
Args:
n_dim_obs (int): Number of dimensions of observation space.
n_dim_action (int): Number of dimensions of action space.
n_hidden_channels (int): Number of hidden channels.
n_hidden_layers (int): Number of hidden layers. It must be greater than
or equal to 1.
nonlinearity (callable): Nonlinearity between layers. It must accept a
Variable as an argument and return a Variable with the same shape.
Nonlinearities with learnable parameters such as PReLU are not
supported.
last_wscale (float): Scale of weight initialization of the last layer.
"""
def __init__(self, n_dim_obs, n_dim_action, n_hidden_channels,
n_hidden_layers, nonlinearity=F.relu, last_wscale=1.0):
assert n_hidden_layers >= 1
self.n_input_channels = n_dim_obs + n_dim_action
self.n_hidden_layers = n_hidden_layers
self.n_hidden_channels = n_hidden_channels
self.nonlinearity = nonlinearity
super().__init__()
self.obs_mlp = MLP(in_size=n_dim_obs, out_size=n_hidden_channels,
hidden_sizes=[])
self.mlp = MLP(in_size=n_hidden_channels + n_dim_action, out_size=1,
hidden_sizes=[self.n_hidden_channels] * (self.n_hidden_layers -
1), nonlinearity=nonlinearity, last_wscale=last_wscale)
self.output = self.mlp.output
def forward(self, state, action):
h = self.nonlinearity(self.obs_mlp(state))
h = torch.cat((h, action), dim=1)
return self.mlp(h)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'n_dim_obs': 4, 'n_dim_action': 4, 'n_hidden_channels': 4,
'n_hidden_layers': 1}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import numpy as np
from torch import nn
from abc import ABCMeta
from abc import abstractmethod
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp15 = tl.load(in_ptr2 + (4 * x1 + (-4 + x0)), tmp12 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tl.where(tmp4, tmp11, tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (1, 8), (8, 1))
assert_size_stride(primals_6, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4),
(1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](buf0, primals_3, primals_4, buf1,
32, XBLOCK=32, num_warps=1, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_6, buf1, reinterpret_tensor(primals_5,
(8, 1), (1, 8), 0), alpha=1, beta=1, out=buf3)
del primals_6
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(16)](buf0,
primals_3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf0
del primals_3
return buf3, primals_1, buf1, primals_5, buf4
def init_lecun_normal(tensor, scale=1.0):
"""Initializes the tensor with LeCunNormal."""
fan_in = torch.nn.init._calculate_correct_fan(tensor, 'fan_in')
std = scale * np.sqrt(1.0 / fan_in)
with torch.no_grad():
return tensor.normal_(0, std)
@torch.no_grad()
def init_chainer_default(layer):
"""Initializes the layer with the chainer default.
weights with LeCunNormal(scale=1.0) and zeros as biases
"""
assert isinstance(layer, nn.Module)
if isinstance(layer, (nn.Linear, nn.Conv2d)):
init_lecun_normal(layer.weight)
if layer.bias is not None:
nn.init.zeros_(layer.bias)
return layer
class MLP(nn.Module):
"""Multi-Layer Perceptron"""
def __init__(self, in_size, out_size, hidden_sizes, nonlinearity=F.relu,
last_wscale=1):
self.in_size = in_size
self.out_size = out_size
self.hidden_sizes = hidden_sizes
self.nonlinearity = nonlinearity
super().__init__()
if hidden_sizes:
self.hidden_layers = nn.ModuleList()
self.hidden_layers.append(nn.Linear(in_size, hidden_sizes[0]))
for hin, hout in zip(hidden_sizes, hidden_sizes[1:]):
self.hidden_layers.append(nn.Linear(hin, hout))
self.hidden_layers.apply(init_chainer_default)
self.output = nn.Linear(hidden_sizes[-1], out_size)
else:
self.output = nn.Linear(in_size, out_size)
init_lecun_normal(self.output.weight, scale=last_wscale)
nn.init.zeros_(self.output.bias)
def forward(self, x):
h = x
if self.hidden_sizes:
for layer in self.hidden_layers:
h = self.nonlinearity(layer(h))
return self.output(h)
class StateActionQFunction(object, metaclass=ABCMeta):
"""Abstract Q-function with state and action input."""
@abstractmethod
def __call__(self, x, a):
"""Evaluates Q-function
Args:
x (ndarray): state input
a (ndarray): action input
Returns:
Q-value for state x and action a
"""
raise NotImplementedError()
class FCLateActionSAQFunctionNew(nn.Module, StateActionQFunction):
"""Fully-connected (s,a)-input Q-function with late action input.
Actions are not included until the second hidden layer and not normalized.
This architecture is used in the DDPG paper:
http://arxiv.org/abs/1509.02971
Args:
n_dim_obs (int): Number of dimensions of observation space.
n_dim_action (int): Number of dimensions of action space.
n_hidden_channels (int): Number of hidden channels.
n_hidden_layers (int): Number of hidden layers. It must be greater than
or equal to 1.
nonlinearity (callable): Nonlinearity between layers. It must accept a
Variable as an argument and return a Variable with the same shape.
Nonlinearities with learnable parameters such as PReLU are not
supported.
last_wscale (float): Scale of weight initialization of the last layer.
"""
def __init__(self, n_dim_obs, n_dim_action, n_hidden_channels,
n_hidden_layers, nonlinearity=F.relu, last_wscale=1.0):
assert n_hidden_layers >= 1
self.n_input_channels = n_dim_obs + n_dim_action
self.n_hidden_layers = n_hidden_layers
self.n_hidden_channels = n_hidden_channels
self.nonlinearity = nonlinearity
super().__init__()
self.obs_mlp = MLP(in_size=n_dim_obs, out_size=n_hidden_channels,
hidden_sizes=[])
self.mlp = MLP(in_size=n_hidden_channels + n_dim_action, out_size=1,
hidden_sizes=[self.n_hidden_channels] * (self.n_hidden_layers -
1), nonlinearity=nonlinearity, last_wscale=last_wscale)
self.output = self.mlp.output
def forward(self, input_0, input_1):
primals_1 = self.obs_mlp.output.weight
primals_3 = self.obs_mlp.output.bias
primals_5 = self.mlp.output.weight
primals_6 = self.mlp.output.bias
primals_2 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
tarokiritani/pfrl
|
FCLateActionSAQFunction
| false
| 11,031
|
[
"MIT"
] | 0
|
284ed1f43b32654a2ec1569b16a0f6b9acbd5e79
|
https://github.com/tarokiritani/pfrl/tree/284ed1f43b32654a2ec1569b16a0f6b9acbd5e79
|
conv_head_pooling
|
import torch
import torch.nn as nn
class conv_head_pooling(nn.Module):
def __init__(self, in_feature, out_feature, stride, conv_type,
padding_mode='zeros', dilation=1):
super(conv_head_pooling, self).__init__()
if conv_type == 'depthwise':
_groups = in_feature
else:
_groups = 1
None
self.conv = nn.Conv2d(in_feature, out_feature, kernel_size=3,
padding=dilation, dilation=dilation, stride=stride,
padding_mode=padding_mode, groups=_groups)
self.fc = nn.Linear(in_feature, out_feature)
def forward(self, x, cls_token):
x = self.conv(x)
cls_token = self.fc(cls_token)
return x, cls_token
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_feature': 4, 'out_feature': 4, 'stride': 1,
'conv_type': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(256)](buf1, primals_2, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf2)
del primals_4
del primals_5
return buf1, reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_1, primals_3, reinterpret_tensor(primals_6, (64, 4), (4,
1), 0)
class conv_head_poolingNew(nn.Module):
def __init__(self, in_feature, out_feature, stride, conv_type,
padding_mode='zeros', dilation=1):
super(conv_head_poolingNew, self).__init__()
if conv_type == 'depthwise':
_groups = in_feature
else:
_groups = 1
None
self.conv = nn.Conv2d(in_feature, out_feature, kernel_size=3,
padding=dilation, dilation=dilation, stride=stride,
padding_mode=padding_mode, groups=_groups)
self.fc = nn.Linear(in_feature, out_feature)
def forward(self, input_0, input_1):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_4 = self.fc.weight
primals_5 = self.fc.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0], output[1]
|
yasarniyazoglu/d2go
|
conv_head_pooling
| false
| 11,032
|
[
"Apache-2.0"
] | 0
|
308c2700c51c70a7a928d99a477b64e856d1ed5e
|
https://github.com/yasarniyazoglu/d2go/tree/308c2700c51c70a7a928d99a477b64e856d1ed5e
|
MultiHeadAttention
|
import torch
import torchvision.transforms.functional as F
import torch.nn as nn
import torch.nn.functional as F
class ScaledDotProductAttention(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
def forward(self, q, k, v, mask=None):
attn = torch.matmul(q / self.temperature, k.transpose(2, 3))
if mask is not None:
attn = attn.masked_fill(mask == 0, -1000000000.0)
attn = F.softmax(attn, dim=-1)
output = torch.matmul(attn, v)
return output, attn
class MultiHeadAttention(nn.Module):
""" Multi-Head Attention module """
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)
self.fc = nn.Linear(n_head * d_v, d_model, bias=False)
self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-06)
def forward(self, q, k, v, mask=None):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1)
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)
if mask is not None:
mask = mask.unsqueeze(1)
q, attn = self.attention(q, k, v, mask=mask)
q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1)
q = self.fc(q)
q += residual
q = self.layer_norm(q)
return q, attn
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'n_head': 4, 'd_model': 4, 'd_k': 4, 'd_v': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torchvision.transforms.functional as F
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x4, tmp2, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-06
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (16, 4), (4, 1))
assert_size_stride(primals_6, (16, 4), (4, 1))
assert_size_stride(primals_7, (4, 16), (16, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf0)
del primals_4
buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 16), (1, 4), 0), out=buf1)
del primals_5
buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 16), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_div_0[grid(256)](buf0, buf3, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused_clone_1[grid(64, 4)](buf1, buf4, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
buf5 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0)
del buf1
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused__softmax_3[grid(256)](buf6, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf8 = buf6
del buf6
triton_poi_fused_clone_4[grid(256)](buf2, buf8, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf9 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_4[grid(256)](buf9, buf10, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf9
buf11 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf10, (16, 16), (16, 1), 0),
reinterpret_tensor(primals_7, (16, 4), (1, 16), 0), out=buf11)
buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_native_layer_norm_5[grid(16)](buf11, primals_1,
buf12, buf13, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_6[grid(64)](buf11, primals_1,
buf12, buf13, primals_8, primals_9, buf14, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf12
del buf13
del primals_9
return buf14, buf7, primals_1, primals_8, reinterpret_tensor(primals_2,
(16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0
), buf7, reinterpret_tensor(buf10, (16, 16), (16, 1), 0
), buf11, primals_7, reinterpret_tensor(buf8, (16, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf4, (16, 4, 4), (16, 1, 4), 0)
class ScaledDotProductAttention(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
def forward(self, q, k, v, mask=None):
attn = torch.matmul(q / self.temperature, k.transpose(2, 3))
if mask is not None:
attn = attn.masked_fill(mask == 0, -1000000000.0)
attn = F.softmax(attn, dim=-1)
output = torch.matmul(attn, v)
return output, attn
class MultiHeadAttentionNew(nn.Module):
""" Multi-Head Attention module """
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)
self.fc = nn.Linear(n_head * d_v, d_model, bias=False)
self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-06)
def forward(self, input_0, input_1, input_2):
primals_4 = self.w_qs.weight
primals_5 = self.w_ks.weight
primals_6 = self.w_vs.weight
primals_7 = self.fc.weight
primals_8 = self.layer_norm.weight
primals_9 = self.layer_norm.bias
primals_1 = input_0
primals_2 = input_1
primals_3 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0], output[1]
|
yshen47/mvsnerf
|
MultiHeadAttention
| false
| 11,033
|
[
"MIT"
] | 0
|
38ab4cf4fc5d025a9ad04e4a801b501ea9a78fb4
|
https://github.com/yshen47/mvsnerf/tree/38ab4cf4fc5d025a9ad04e4a801b501ea9a78fb4
|
topk_PAM_Module
|
from torch.nn import Module
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn.modules.module import Module
def mask_softmax(input, mask=None, dim=-1):
"""Applies a softmax function.
Softmax is defined as:
:math:`\\text{Softmax}(x_{i}) = \\frac{exp(x_i)}{\\sum_j exp(x_j)}`
It is applied to all slices along dim, and will re-scale them so that the elements
lie in the range `(0, 1)` and sum to 1.
See :class:`~torch.nn.Softmax` for more details.
Arguments:
input (Tensor): input
dim (int): A dimension along which softmax will be computed.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
If specified, the input tensor is casted to :attr:`dtype` before the operation
is performed. This is useful for preventing data type overflows. Default: None.
.. note::
This function doesn't work directly with NLLLoss,
which expects the Log to be computed between the Softmax and itself.
Use log_softmax instead (it's faster and has better numerical properties).
"""
if mask is None:
return F.softmax(input, dim=dim, _stacklevel=5)
else:
max_input = input.max(dim=dim, keepdim=True)
exp_input = torch.exp(input - max_input[0])
mask_exp_input = torch.mul(exp_input, mask)
sum_mask_exp_input = torch.sum(mask_exp_input, dim=dim, keepdim=True
) + 1e-10
return torch.div(mask_exp_input, sum_mask_exp_input)
def mvmask_softmax(input, mask=None, dim=-1):
"""Applies a softmax function.
Softmax is defined as:
:math:`\\text{Softmax}(x_{i}) = \\frac{exp(x_i)}{\\sum_j exp(x_j)}`
It is applied to all slices along dim, and will re-scale them so that the elements
lie in the range `(0, 1)` and sum to 1.
See :class:`~torch.nn.Softmax` for more details.
Arguments:
input (Tensor): input
dim (int): A dimension along which softmax will be computed.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
If specified, the input tensor is casted to :attr:`dtype` before the operation
is performed. This is useful for preventing data type overflows. Default: None.
.. note::
This function doesn't work directly with NLLLoss,
which expects the Log to be computed between the Softmax and itself.
Use log_softmax instead (it's faster and has better numerical properties).
"""
if mask is None:
return F.softmax(input, dim=dim, _stacklevel=5)
else:
if torch.is_tensor(mask):
return mask_softmax(input, mask=mask, dim=dim)
mask = [mask[0], mask[1]]
N, _H, _W = mask[0].size()
max_input = input.max(dim=dim, keepdim=True)
exp_input = torch.exp(input - max_input[0])
if N == 1:
mask_exp_input = torch.mul(exp_input, mask[0])
sum_mask_exp_input = torch.sum(mask_exp_input, dim=dim, keepdim
=True) + 1e-10
return torch.div(mask_exp_input, sum_mask_exp_input)
else:
Sm = 0
for i in range(N):
mask_exp_input = torch.mul(exp_input, mask[0][i])
sum_mask_exp_input = torch.sum(mask_exp_input, dim=dim,
keepdim=True) + 1e-10
Sm = Sm + torch.div(mask_exp_input, sum_mask_exp_input)
return torch.mul(Sm, mask[1])
class Mask_Softmax(Module):
"""Applies the Softmax function to an n-dimensional input Tensor
rescaling them so that the elements of the n-dimensional output Tensor
lie in the range (0,1) and sum to 1
Softmax is defined as:
.. math::
\\text{Softmax}(x_{i}) = \\frac{\\exp(x_i)}{\\sum_j \\exp(x_j)}
Shape:
- Input: any shape
- Output: same as input
Returns:
a Tensor of the same dimension and shape as the input with
values in the range [0, 1]
Arguments:
dim (int): A dimension along which Softmax will be computed (so every slice
along dim will sum to 1).
.. note::
This module doesn't work directly with NLLLoss,
which expects the Log to be computed between the Softmax and itself.
Use `LogSoftmax` instead (it's faster and has better numerical properties).
Examples::
>>> m = nn.Softmax()
>>> input = torch.randn(2, 3)
>>> output = m(input)
"""
def __init__(self, dim=None):
super(Mask_Softmax, self).__init__()
self.dim = dim
def __setstate__(self, state):
self.__dict__.update(state)
if not hasattr(self, 'dim'):
self.dim = None
def forward(self, input):
return mvmask_softmax(input[0], input[1], self.dim)
class topk_PAM_Module(nn.Module):
""" Position attention module"""
def __init__(self, in_dim, key_dim, out_dim, topk=10):
super(topk_PAM_Module, self).__init__()
self.chanel_in = in_dim
self.topk = topk
self.key_channels = key_dim
self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels=
key_dim, kernel_size=1)
self.key_conv = nn.Conv2d(in_channels=in_dim, out_channels=key_dim,
kernel_size=1)
self.value_conv = nn.Conv2d(in_channels=in_dim, out_channels=
out_dim, kernel_size=1)
self.gamma = nn.Parameter(torch.zeros(1))
self.softmax = Mask_Softmax(dim=-1)
def forward(self, x):
"""
inputs :
x : input feature maps( B X C X H X W)
returns :
out : attention value + input feature
attention: B X (HxW) X (HxW)
"""
m_batchsize, C, height, width = x.size()
proj_query = self.query_conv(x).view(m_batchsize, -1, width * height
).permute(0, 2, 1)
proj_key = self.key_conv(x).view(m_batchsize, -1, width * height)
energy = torch.bmm(proj_query, proj_key)
proj_value = self.value_conv(x)
proj_value = proj_value.view(m_batchsize, -1, width * height)
_val, idx = torch.topk(energy, height * width // self.topk, dim=2,
largest=True, sorted=False)
at_sparse = torch.zeros_like(energy)
attention_mask = at_sparse.scatter_(2, idx, 1.0)
attention = self.softmax([energy, attention_mask])
out = torch.bmm(proj_value, attention.permute(0, 2, 1))
out = out.view(m_batchsize, C, height, width)
out = self.gamma * out + x
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4, 'key_dim': 4, 'out_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch.nn import Module
import torch.nn.functional as F
import torch.nn as nn
from torch.nn.modules.module import Module
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_zeros_like_1(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_scatter_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tl.device_assert((0 <= tmp0) & (tmp0 < 16) | ~xmask,
'index out of bounds: 0 <= tmp0 < 16')
tmp2 = 1.0
tl.store(out_ptr0 + (tmp0 + 16 * x0), tmp2, xmask)
@triton.jit
def triton_per_fused_add_div_exp_max_mul_sub_sum_3(in_out_ptr0, in_ptr0,
in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr
):
xnumel = 64
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp8 = tmp6 * tmp7
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.where(xmask, tmp9, 0)
tmp12 = tl.sum(tmp11, 1)[:, None]
tmp14 = tl.broadcast_to(rindex, tmp3.shape)
_, tmp13_tmp = triton_helpers.max_with_index(tmp3, tmp14, 1)
tmp13 = tmp13_tmp[:, None]
tmp15 = 1e-10
tmp16 = tmp12 + tmp15
tmp17 = tmp8 / tmp16
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr2 + (r1 + 16 * x0), tmp17, xmask)
tl.store(out_ptr0 + x0, tmp4, xmask)
tl.store(out_ptr1 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused_add_mul_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask)
tmp3 = tmp1 * tmp2
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(256)](buf1, primals_3, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf2 = extern_kernels.convolution(primals_1, primals_4, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_0[grid(256)](buf3, primals_5, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf1, (4, 16, 4), (64, 1, 16),
0), reinterpret_tensor(buf3, (4, 4, 16), (64, 16, 1), 0), out=buf4)
buf5 = extern_kernels.convolution(primals_1, primals_6, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 4, 4), (64, 16, 4, 1))
buf6 = buf5
del buf5
triton_poi_fused_convolution_0[grid(256)](buf6, primals_7, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
buf7 = torch.ops.aten.topk.default(buf4, 1, 2, True, False)
buf9 = buf7[1]
del buf7
buf10 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
triton_poi_fused_zeros_like_1[grid(1024)](buf10, 1024, XBLOCK=128,
num_warps=4, num_stages=1)
buf11 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
triton_poi_fused_zeros_like_1[grid(1024)](buf11, 1024, XBLOCK=128,
num_warps=4, num_stages=1)
triton_poi_fused_scatter_2[grid(64)](buf9, buf11, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf13 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32)
buf15 = empty_strided_cuda((4, 16, 1), (16, 1, 64), torch.float32)
buf14 = buf9
del buf9
buf16 = reinterpret_tensor(buf15, (4, 16, 1), (16, 1, 1), 0)
del buf15
buf17 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
triton_per_fused_add_div_exp_max_mul_sub_sum_3[grid(64)](buf16,
buf4, buf11, buf13, buf14, buf17, 64, 16, XBLOCK=32, num_warps=
4, num_stages=1)
buf18 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf6, (4, 4, 16), (64, 16, 1),
0), reinterpret_tensor(buf17, (4, 16, 16), (256, 1, 16), 0),
out=buf18)
buf19 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_4[grid(256)](primals_8, buf18, primals_1,
buf19, 256, XBLOCK=128, num_warps=4, num_stages=1)
return (buf19, primals_1, primals_2, primals_4, primals_6, primals_8,
buf4, buf10, buf11, buf13, buf14, buf16, buf17, buf18,
reinterpret_tensor(buf6, (4, 16, 4), (64, 1, 16), 0),
reinterpret_tensor(buf1, (4, 4, 16), (64, 16, 1), 0),
reinterpret_tensor(buf3, (4, 16, 4), (64, 1, 16), 0))
def mask_softmax(input, mask=None, dim=-1):
"""Applies a softmax function.
Softmax is defined as:
:math:`\\text{Softmax}(x_{i}) = \\frac{exp(x_i)}{\\sum_j exp(x_j)}`
It is applied to all slices along dim, and will re-scale them so that the elements
lie in the range `(0, 1)` and sum to 1.
See :class:`~torch.nn.Softmax` for more details.
Arguments:
input (Tensor): input
dim (int): A dimension along which softmax will be computed.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
If specified, the input tensor is casted to :attr:`dtype` before the operation
is performed. This is useful for preventing data type overflows. Default: None.
.. note::
This function doesn't work directly with NLLLoss,
which expects the Log to be computed between the Softmax and itself.
Use log_softmax instead (it's faster and has better numerical properties).
"""
if mask is None:
return F.softmax(input, dim=dim, _stacklevel=5)
else:
max_input = input.max(dim=dim, keepdim=True)
exp_input = torch.exp(input - max_input[0])
mask_exp_input = torch.mul(exp_input, mask)
sum_mask_exp_input = torch.sum(mask_exp_input, dim=dim, keepdim=True
) + 1e-10
return torch.div(mask_exp_input, sum_mask_exp_input)
def mvmask_softmax(input, mask=None, dim=-1):
"""Applies a softmax function.
Softmax is defined as:
:math:`\\text{Softmax}(x_{i}) = \\frac{exp(x_i)}{\\sum_j exp(x_j)}`
It is applied to all slices along dim, and will re-scale them so that the elements
lie in the range `(0, 1)` and sum to 1.
See :class:`~torch.nn.Softmax` for more details.
Arguments:
input (Tensor): input
dim (int): A dimension along which softmax will be computed.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
If specified, the input tensor is casted to :attr:`dtype` before the operation
is performed. This is useful for preventing data type overflows. Default: None.
.. note::
This function doesn't work directly with NLLLoss,
which expects the Log to be computed between the Softmax and itself.
Use log_softmax instead (it's faster and has better numerical properties).
"""
if mask is None:
return F.softmax(input, dim=dim, _stacklevel=5)
else:
if torch.is_tensor(mask):
return mask_softmax(input, mask=mask, dim=dim)
mask = [mask[0], mask[1]]
N, _H, _W = mask[0].size()
max_input = input.max(dim=dim, keepdim=True)
exp_input = torch.exp(input - max_input[0])
if N == 1:
mask_exp_input = torch.mul(exp_input, mask[0])
sum_mask_exp_input = torch.sum(mask_exp_input, dim=dim, keepdim
=True) + 1e-10
return torch.div(mask_exp_input, sum_mask_exp_input)
else:
Sm = 0
for i in range(N):
mask_exp_input = torch.mul(exp_input, mask[0][i])
sum_mask_exp_input = torch.sum(mask_exp_input, dim=dim,
keepdim=True) + 1e-10
Sm = Sm + torch.div(mask_exp_input, sum_mask_exp_input)
return torch.mul(Sm, mask[1])
class Mask_Softmax(Module):
"""Applies the Softmax function to an n-dimensional input Tensor
rescaling them so that the elements of the n-dimensional output Tensor
lie in the range (0,1) and sum to 1
Softmax is defined as:
.. math::
\\text{Softmax}(x_{i}) = \\frac{\\exp(x_i)}{\\sum_j \\exp(x_j)}
Shape:
- Input: any shape
- Output: same as input
Returns:
a Tensor of the same dimension and shape as the input with
values in the range [0, 1]
Arguments:
dim (int): A dimension along which Softmax will be computed (so every slice
along dim will sum to 1).
.. note::
This module doesn't work directly with NLLLoss,
which expects the Log to be computed between the Softmax and itself.
Use `LogSoftmax` instead (it's faster and has better numerical properties).
Examples::
>>> m = nn.Softmax()
>>> input = torch.randn(2, 3)
>>> output = m(input)
"""
def __init__(self, dim=None):
super(Mask_Softmax, self).__init__()
self.dim = dim
def __setstate__(self, state):
self.__dict__.update(state)
if not hasattr(self, 'dim'):
self.dim = None
def forward(self, input):
return mvmask_softmax(input[0], input[1], self.dim)
class topk_PAM_ModuleNew(nn.Module):
""" Position attention module"""
def __init__(self, in_dim, key_dim, out_dim, topk=10):
super(topk_PAM_ModuleNew, self).__init__()
self.chanel_in = in_dim
self.topk = topk
self.key_channels = key_dim
self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels=
key_dim, kernel_size=1)
self.key_conv = nn.Conv2d(in_channels=in_dim, out_channels=key_dim,
kernel_size=1)
self.value_conv = nn.Conv2d(in_channels=in_dim, out_channels=
out_dim, kernel_size=1)
self.gamma = nn.Parameter(torch.zeros(1))
self.softmax = Mask_Softmax(dim=-1)
def forward(self, input_0):
primals_8 = self.gamma
primals_2 = self.query_conv.weight
primals_3 = self.query_conv.bias
primals_4 = self.key_conv.weight
primals_5 = self.key_conv.bias
primals_6 = self.value_conv.weight
primals_7 = self.value_conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
|
yougoforward/OCNet1931
|
topk_PAM_Module
| false
| 11,034
|
[
"MIT"
] | 0
|
e679e9f248aff2f06e1d983e4e30230e5fc5174f
|
https://github.com/yougoforward/OCNet1931/tree/e679e9f248aff2f06e1d983e4e30230e5fc5174f
|
Classifier
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Classifier(nn.Module):
def __init__(self, inputs, hidden_units):
super().__init__()
self.hidden = nn.Linear(inputs, hidden_units)
self.output = nn.Linear(hidden_units, 102)
self.dropout = nn.Dropout(p=0.2)
def forward(self, x):
x = self.hidden(x)
x = F.relu(x)
x = self.dropout(x)
x = self.output(x)
x = F.log_softmax(x, dim=1)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'inputs': 4, 'hidden_units': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 6528
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 408
x2 = xindex // 1632
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 1632 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (408 + x0 + 1632 * x2), xmask, eviction_policy
='evict_last')
tmp4 = tl.load(in_ptr0 + (816 + x0 + 1632 * x2), xmask, eviction_policy
='evict_last')
tmp6 = tl.load(in_ptr0 + (1224 + x0 + 1632 * x2), xmask,
eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 6528
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 408
x2 = xindex // 1632
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 1632 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (408 + x0 + 1632 * x2), xmask, eviction_policy
='evict_last')
tmp6 = tl.load(in_ptr0 + (816 + x0 + 1632 * x2), xmask, eviction_policy
='evict_last')
tmp9 = tl.load(in_ptr0 + (1224 + x0 + 1632 * x2), xmask,
eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x3, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (102, 4), (4, 1))
assert_size_stride(primals_5, (102,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 102), (102, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 102), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 102), (1632, 408, 102, 1),
torch.float32)
triton_poi_fused__log_softmax_1[grid(6528)](buf2, buf3, 6528,
XBLOCK=256, num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 102), (1632, 408, 102, 1), 0)
del buf2
triton_poi_fused__log_softmax_2[grid(6528)](buf3, buf4, 6528,
XBLOCK=256, num_warps=4, num_stages=1)
del buf3
return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf4, primals_4, buf5
class ClassifierNew(nn.Module):
def __init__(self, inputs, hidden_units):
super().__init__()
self.hidden = nn.Linear(inputs, hidden_units)
self.output = nn.Linear(hidden_units, 102)
self.dropout = nn.Dropout(p=0.2)
def forward(self, input_0):
primals_1 = self.hidden.weight
primals_2 = self.hidden.bias
primals_4 = self.output.weight
primals_5 = self.output.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
zamerman/Udacity-AI-Programming
|
Classifier
| false
| 11,035
|
[
"MIT"
] | 0
|
6537f273fb00531d448330c1c85886d86e1161d2
|
https://github.com/zamerman/Udacity-AI-Programming/tree/6537f273fb00531d448330c1c85886d86e1161d2
|
UnaryBlock
|
import torch
import torch.utils.data
import torch.nn as nn
from torch.nn.parameter import Parameter
class BatchNormBlock(nn.Module):
def __init__(self, in_dim, use_bn, bn_momentum):
"""
Initialize a batch normalization block. If network does not use batch normalization, replace with biases.
:param in_dim: dimension input features
:param use_bn: boolean indicating if we use Batch Norm
:param bn_momentum: Batch norm momentum
"""
super(BatchNormBlock, self).__init__()
self.bn_momentum = bn_momentum
self.use_bn = use_bn
self.in_dim = in_dim
if self.use_bn:
self.batch_norm = nn.InstanceNorm1d(in_dim, momentum=bn_momentum)
else:
self.bias = Parameter(torch.zeros(in_dim, dtype=torch.float32),
requires_grad=True)
return
def reset_parameters(self):
nn.init.zeros_(self.bias)
def forward(self, x):
if self.use_bn:
x = x.unsqueeze(2)
x = x.transpose(0, 2)
x = self.batch_norm(x)
x = x.transpose(0, 2)
return x.squeeze()
else:
return x + self.bias
def __repr__(self):
return (
'BatchNormBlock(in_feat: {:d}, momentum: {:.3f}, only_bias: {:s})'
.format(self.in_dim, self.bn_momentum, str(not self.use_bn)))
class UnaryBlock(nn.Module):
def __init__(self, in_dim, out_dim, use_bn, bn_momentum, no_relu=False):
"""
Initialize a standard unary block with its ReLU and BatchNorm.
:param in_dim: dimension input features
:param out_dim: dimension input features
:param use_bn: boolean indicating if we use Batch Norm
:param bn_momentum: Batch norm momentum
"""
super(UnaryBlock, self).__init__()
self.bn_momentum = bn_momentum
self.use_bn = use_bn
self.no_relu = no_relu
self.in_dim = in_dim
self.out_dim = out_dim
self.mlp = nn.Linear(in_dim, out_dim, bias=False)
self.batch_norm = BatchNormBlock(out_dim, self.use_bn, self.bn_momentum
)
if not no_relu:
self.leaky_relu = nn.LeakyReLU(0.1)
return
def forward(self, x, batch=None):
x = self.mlp(x)
x = self.batch_norm(x)
if not self.no_relu:
x = self.leaky_relu(x)
return x
def __repr__(self):
return (
'UnaryBlock(in_feat: {:d}, out_feat: {:d}, BN: {:s}, ReLU: {:s})'
.format(self.in_dim, self.out_dim, str(self.use_bn), str(not
self.no_relu)))
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4, 'out_dim': 4, 'use_bn': 4, 'bn_momentum': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.utils.data
import torch.nn as nn
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__native_batch_norm_legit_clone_0(in_ptr0, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (x1 + 4 * y0), xmask & ymask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = tmp4 > tmp5
tmp7 = 0.1
tmp8 = tmp4 * tmp7
tmp9 = tl.where(tmp6, tmp4, tmp8)
tl.store(out_ptr0 + (y0 + 4 * x1), tmp9, xmask & ymask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_2, reinterpret_tensor(primals_1, (4, 4),
(1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((1, 4, 1), (4, 1, 4), torch.float32)
buf2 = empty_strided_cuda((1, 4, 1), (4, 1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused__native_batch_norm_legit_clone_0[grid(4)](buf0,
buf1, buf2, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
triton_poi_fused_leaky_relu_1[grid(4, 4)](buf0, buf1, buf2, buf3, 4,
4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1)
del buf1
del buf2
return buf3, primals_2, buf0
class BatchNormBlock(nn.Module):
def __init__(self, in_dim, use_bn, bn_momentum):
"""
Initialize a batch normalization block. If network does not use batch normalization, replace with biases.
:param in_dim: dimension input features
:param use_bn: boolean indicating if we use Batch Norm
:param bn_momentum: Batch norm momentum
"""
super(BatchNormBlock, self).__init__()
self.bn_momentum = bn_momentum
self.use_bn = use_bn
self.in_dim = in_dim
if self.use_bn:
self.batch_norm = nn.InstanceNorm1d(in_dim, momentum=bn_momentum)
else:
self.bias = Parameter(torch.zeros(in_dim, dtype=torch.float32),
requires_grad=True)
return
def reset_parameters(self):
nn.init.zeros_(self.bias)
def forward(self, x):
if self.use_bn:
x = x.unsqueeze(2)
x = x.transpose(0, 2)
x = self.batch_norm(x)
x = x.transpose(0, 2)
return x.squeeze()
else:
return x + self.bias
def __repr__(self):
return (
'BatchNormBlock(in_feat: {:d}, momentum: {:.3f}, only_bias: {:s})'
.format(self.in_dim, self.bn_momentum, str(not self.use_bn)))
class UnaryBlockNew(nn.Module):
def __init__(self, in_dim, out_dim, use_bn, bn_momentum, no_relu=False):
"""
Initialize a standard unary block with its ReLU and BatchNorm.
:param in_dim: dimension input features
:param out_dim: dimension input features
:param use_bn: boolean indicating if we use Batch Norm
:param bn_momentum: Batch norm momentum
"""
super(UnaryBlockNew, self).__init__()
self.bn_momentum = bn_momentum
self.use_bn = use_bn
self.no_relu = no_relu
self.in_dim = in_dim
self.out_dim = out_dim
self.mlp = nn.Linear(in_dim, out_dim, bias=False)
self.batch_norm = BatchNormBlock(out_dim, self.use_bn, self.bn_momentum
)
if not no_relu:
self.leaky_relu = nn.LeakyReLU(0.1)
return
def __repr__(self):
return (
'UnaryBlock(in_feat: {:d}, out_feat: {:d}, BN: {:s}, ReLU: {:s})'
.format(self.in_dim, self.out_dim, str(self.use_bn), str(not
self.no_relu)))
def forward(self, input_0):
primals_1 = self.mlp.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
wuxingzhe/OverPredactor
|
UnaryBlock
| false
| 11,036
|
[
"MIT"
] | 0
|
3a0965f4c3fc84ec0dcba555ec7c460f265d9143
|
https://github.com/wuxingzhe/OverPredactor/tree/3a0965f4c3fc84ec0dcba555ec7c460f265d9143
|
Encoder
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Encoder(nn.Module):
"""Estimation of the nonnegative mixture weight by a 1-D conv layer.
"""
def __init__(self, L, N):
super(Encoder, self).__init__()
self.L, self.N = L, N
self.conv1d_U = nn.Conv1d(1, N, kernel_size=L, stride=L // 2, bias=
False)
def forward(self, mixture):
"""
Args:
mixture: [M, T], M is batch size, T is #samples
Returns:
mixture_w: [M, N, K], where K = (T-L)/(L/2)+1 = 2T/L-1
"""
mixture = torch.unsqueeze(mixture, 1)
mixture_w = F.relu(self.conv1d_U(mixture))
return mixture_w
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'L': 4, 'N': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 1, 4), (4, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4,
1, 4), (4, 4, 1), 0), primals_2, stride=(2,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf0, (4, 4, 1), (4, 1, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(16)](buf1, buf2, 16,
XBLOCK=16, num_warps=1, num_stages=1)
return buf1, primals_2, reinterpret_tensor(primals_1, (4, 1, 4), (4, 4,
1), 0), buf2
class EncoderNew(nn.Module):
"""Estimation of the nonnegative mixture weight by a 1-D conv layer.
"""
def __init__(self, L, N):
super(EncoderNew, self).__init__()
self.L, self.N = L, N
self.conv1d_U = nn.Conv1d(1, N, kernel_size=L, stride=L // 2, bias=
False)
def forward(self, input_0):
primals_2 = self.conv1d_U.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
zhangxinaaaa/Conv-TasNet
|
Encoder
| false
| 11,037
|
[
"MIT"
] | 0
|
4622d93d0b9dbe23584addd4f4b9463255651652
|
https://github.com/zhangxinaaaa/Conv-TasNet/tree/4622d93d0b9dbe23584addd4f4b9463255651652
|
Conv2d_dilated
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.conv import _ConvNd
from torch.nn.modules.utils import _pair
def same_padding_length(input_length, filter_size, stride, dilation=1):
dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)
output_length = (input_length + stride - 1) // stride
pad_length = max(0, (output_length - 1) * stride + dilated_filter_size -
input_length)
return pad_length
def compute_same_padding2d(input_shape, kernel_size, strides, dilation):
space = input_shape[2:]
assert len(space) == 2, '{}'.format(space)
new_space = []
new_input = []
for i in range(len(space)):
pad_length = same_padding_length(space[i], kernel_size[i], stride=
strides[i], dilation=dilation[i])
new_space.append(pad_length)
new_input.append(pad_length % 2)
return tuple(new_space), tuple(new_input)
class _Conv2d_dilated(_ConvNd):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
dilation=1, groups=1, bias=True):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
dilation = _pair(dilation)
super(_Conv2d_dilated, self).__init__(in_channels, out_channels,
kernel_size, stride, _pair(0), dilation, False, _pair(0),
groups, bias, padding_mode='zeros')
def forward(self, input, dilation=None):
input_shape = list(input.size())
dilation_rate = self.dilation if dilation is None else _pair(dilation)
padding, pad_input = compute_same_padding2d(input_shape,
kernel_size=self.kernel_size, strides=self.stride, dilation=
dilation_rate)
if pad_input[0] == 1 or pad_input[1] == 1:
input = F.pad(input, [0, int(pad_input[0]), 0, int(pad_input[1])])
return F.conv2d(input, self.weight, self.bias, self.stride, (
padding[0] // 2, padding[1] // 2), dilation_rate, self.groups)
class Conv2d_dilated(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, NL
='relu', same_padding=False, dilation=1, bn=False, bias=True, groups=1
):
super(Conv2d_dilated, self).__init__()
self.conv = _Conv2d_dilated(in_channels, out_channels, kernel_size,
stride, dilation=dilation, groups=groups, bias=bias)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0,
affine=True) if bn else None
if NL == 'relu':
self.relu = nn.ReLU(inplace=True)
elif NL == 'prelu':
self.relu = nn.PReLU()
elif NL == 'tanh':
self.relu = nn.Tanh()
elif NL == 'lrelu':
self.relu = nn.LeakyReLU(inplace=True)
elif NL == 'sigmoid':
self.relu = nn.Sigmoid()
else:
self.relu = None
def forward(self, x, dilation=None):
x = self.conv(x, dilation)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.conv import _ConvNd
from torch.nn.modules.utils import _pair
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 5 % 5
x0 = xindex % 5
x2 = xindex // 25
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = x0
tmp4 = tmp3 < tmp1
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp5 & xmask, other=0.0)
tl.store(out_ptr0 + x3, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr0 + x3, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(400)](primals_1, buf0, 400,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_1[grid(256)](buf2,
primals_3, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
return buf2, primals_2, buf0, buf3
def same_padding_length(input_length, filter_size, stride, dilation=1):
dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)
output_length = (input_length + stride - 1) // stride
pad_length = max(0, (output_length - 1) * stride + dilated_filter_size -
input_length)
return pad_length
def compute_same_padding2d(input_shape, kernel_size, strides, dilation):
space = input_shape[2:]
assert len(space) == 2, '{}'.format(space)
new_space = []
new_input = []
for i in range(len(space)):
pad_length = same_padding_length(space[i], kernel_size[i], stride=
strides[i], dilation=dilation[i])
new_space.append(pad_length)
new_input.append(pad_length % 2)
return tuple(new_space), tuple(new_input)
class _Conv2d_dilated(_ConvNd):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
dilation=1, groups=1, bias=True):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
dilation = _pair(dilation)
super(_Conv2d_dilated, self).__init__(in_channels, out_channels,
kernel_size, stride, _pair(0), dilation, False, _pair(0),
groups, bias, padding_mode='zeros')
def forward(self, input, dilation=None):
input_shape = list(input.size())
dilation_rate = self.dilation if dilation is None else _pair(dilation)
padding, pad_input = compute_same_padding2d(input_shape,
kernel_size=self.kernel_size, strides=self.stride, dilation=
dilation_rate)
if pad_input[0] == 1 or pad_input[1] == 1:
input = F.pad(input, [0, int(pad_input[0]), 0, int(pad_input[1])])
return F.conv2d(input, self.weight, self.bias, self.stride, (
padding[0] // 2, padding[1] // 2), dilation_rate, self.groups)
class Conv2d_dilatedNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, NL
='relu', same_padding=False, dilation=1, bn=False, bias=True, groups=1
):
super(Conv2d_dilatedNew, self).__init__()
self.conv = _Conv2d_dilated(in_channels, out_channels, kernel_size,
stride, dilation=dilation, groups=groups, bias=bias)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0,
affine=True) if bn else None
if NL == 'relu':
self.relu = nn.ReLU(inplace=True)
elif NL == 'prelu':
self.relu = nn.PReLU()
elif NL == 'tanh':
self.relu = nn.Tanh()
elif NL == 'lrelu':
self.relu = nn.LeakyReLU(inplace=True)
elif NL == 'sigmoid':
self.relu = nn.Sigmoid()
else:
self.relu = None
def forward(self, input_0):
primals_1 = self.conv.weight
primals_3 = self.conv.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
xwjBupt/Counting-ICCV-DSSINet
|
Conv2d_dilated
| false
| 11,038
|
[
"MIT"
] | 0
|
92e4c56c93572fb2b026d573c3e711ce85a4af8f
|
https://github.com/xwjBupt/Counting-ICCV-DSSINet/tree/92e4c56c93572fb2b026d573c3e711ce85a4af8f
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.