entry_point
stringlengths 1
65
| original_triton_python_code
stringlengths 208
619k
| optimised_triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
listlengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
|
|---|---|---|---|---|---|---|---|---|---|---|
OptimizedResidualBlock
|
import torch
import torch.nn as nn
import torch.nn.utils as utils
from torchvision import utils
class CustomConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=None, bias=True, spectral_norm=False, residual_init=True):
super(CustomConv2d, self).__init__()
self.residual_init = residual_init
if padding is None:
padding = int((kernel_size - 1) / 2)
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride=stride, padding=padding, bias=bias)
if spectral_norm:
self.conv = utils.spectral_norm(self.conv)
def forward(self, input):
return self.conv(input)
class ConvMeanPool(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, bias=True,
spectral_norm=False, residual_init=True):
super(ConvMeanPool, self).__init__()
self.conv = CustomConv2d(in_channels, out_channels, kernel_size,
bias=bias, spectral_norm=spectral_norm, residual_init=residual_init
)
def forward(self, input):
output = input
output = self.conv(output)
output = (output[:, :, ::2, ::2] + output[:, :, 1::2, ::2] + output
[:, :, ::2, 1::2] + output[:, :, 1::2, 1::2]) / 4
return output
class MeanPoolConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, bias=True,
spectral_norm=False, residual_init=True):
super(MeanPoolConv, self).__init__()
self.conv = CustomConv2d(in_channels, out_channels, kernel_size,
bias=bias, spectral_norm=spectral_norm, residual_init=residual_init
)
def forward(self, input):
output = input
output = (output[:, :, ::2, ::2] + output[:, :, 1::2, ::2] + output
[:, :, ::2, 1::2] + output[:, :, 1::2, 1::2]) / 4
output = self.conv(output)
return output
class OptimizedResidualBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size,
spectral_norm=False):
super(OptimizedResidualBlock, self).__init__()
self.conv1 = CustomConv2d(in_channels, out_channels, kernel_size=
kernel_size, spectral_norm=spectral_norm)
self.conv2 = ConvMeanPool(out_channels, out_channels, kernel_size=
kernel_size, spectral_norm=spectral_norm)
self.conv_shortcut = MeanPoolConv(in_channels, out_channels,
kernel_size=1, spectral_norm=spectral_norm, residual_init=False)
self.relu2 = nn.ReLU()
def forward(self, input):
shortcut = self.conv_shortcut(input)
output = input
output = self.conv1(output)
output = self.relu2(output)
output = self.conv2(output)
return shortcut + output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.utils as utils
from torchvision import utils
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 9 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_div_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp3 + tmp1
tmp5 = tmp2 + tmp4
tmp7 = tmp6 + tmp1
tmp8 = tmp5 + tmp7
tmp10 = tmp9 + tmp1
tmp11 = tmp8 + tmp10
tmp12 = 0.25
tmp13 = tmp11 * tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_add_convolution_div_3(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
x4 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x3, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 2, 2), (16, 4, 2, 1))
buf2 = extern_kernels.convolution(primals_1, primals_4, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 3, 3), (36, 9, 3, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(144)](buf3, primals_5, 144,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 2, 2), (16, 4, 2, 1))
buf5 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
triton_poi_fused_add_div_2[grid(16)](buf4, primals_7, buf5, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del buf4
del primals_7
buf6 = buf1
del buf1
triton_poi_fused_add_convolution_div_3[grid(64)](buf6, primals_3,
buf5, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf5
del primals_3
return buf6, primals_1, primals_2, primals_4, primals_6, buf0, buf3
class CustomConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=None, bias=True, spectral_norm=False, residual_init=True):
super(CustomConv2d, self).__init__()
self.residual_init = residual_init
if padding is None:
padding = int((kernel_size - 1) / 2)
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride=stride, padding=padding, bias=bias)
if spectral_norm:
self.conv = utils.spectral_norm(self.conv)
def forward(self, input):
return self.conv(input)
class ConvMeanPool(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, bias=True,
spectral_norm=False, residual_init=True):
super(ConvMeanPool, self).__init__()
self.conv = CustomConv2d(in_channels, out_channels, kernel_size,
bias=bias, spectral_norm=spectral_norm, residual_init=residual_init
)
def forward(self, input):
output = input
output = self.conv(output)
output = (output[:, :, ::2, ::2] + output[:, :, 1::2, ::2] + output
[:, :, ::2, 1::2] + output[:, :, 1::2, 1::2]) / 4
return output
class MeanPoolConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, bias=True,
spectral_norm=False, residual_init=True):
super(MeanPoolConv, self).__init__()
self.conv = CustomConv2d(in_channels, out_channels, kernel_size,
bias=bias, spectral_norm=spectral_norm, residual_init=residual_init
)
def forward(self, input):
output = input
output = (output[:, :, ::2, ::2] + output[:, :, 1::2, ::2] + output
[:, :, ::2, 1::2] + output[:, :, 1::2, 1::2]) / 4
output = self.conv(output)
return output
class OptimizedResidualBlockNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size,
spectral_norm=False):
super(OptimizedResidualBlockNew, self).__init__()
self.conv1 = CustomConv2d(in_channels, out_channels, kernel_size=
kernel_size, spectral_norm=spectral_norm)
self.conv2 = ConvMeanPool(out_channels, out_channels, kernel_size=
kernel_size, spectral_norm=spectral_norm)
self.conv_shortcut = MeanPoolConv(in_channels, out_channels,
kernel_size=1, spectral_norm=spectral_norm, residual_init=False)
self.relu2 = nn.ReLU()
def forward(self, input_0):
primals_1 = self.conv1.conv.weight
primals_3 = self.conv1.conv.bias
primals_4 = self.conv2.conv.conv.weight
primals_5 = self.conv2.conv.conv.bias
primals_2 = self.conv_shortcut.conv.conv.weight
primals_7 = self.conv_shortcut.conv.conv.bias
primals_6 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
takuhirok/rGAN
|
OptimizedResidualBlock
| false
| 16,602
|
[
"MIT"
] | 103
|
6f7a092de5814c662fd17224b3d48bebe7e03c2f
|
https://github.com/takuhirok/rGAN/tree/6f7a092de5814c662fd17224b3d48bebe7e03c2f
|
WassersteinGeneratorLoss
|
import torch
import torch.nn as nn
def reduce(x, reduction=None):
"""Applies reduction on a torch.Tensor.
Args:
x (torch.Tensor): The tensor on which reduction is to be applied.
reduction (str, optional): The reduction to be applied. If ``mean`` the mean value of the
Tensor is returned. If ``sum`` the elements of the Tensor will be summed. If none of the
above then the Tensor is returning without any change.
Returns:
As per the above ``reduction`` convention.
"""
if reduction == 'mean':
return torch.mean(x)
elif reduction == 'sum':
return torch.sum(x)
else:
return x
def wasserstein_generator_loss(fgz, reduction='mean'):
return reduce(-1.0 * fgz, reduction)
class GeneratorLoss(nn.Module):
"""Base class for all generator losses.
.. note:: All Losses meant to be minimized for optimizing the Generator must subclass this.
Args:
reduction (str, optional): Specifies the reduction to apply to the output.
If ``none`` no reduction will be applied. If ``mean`` the outputs are averaged over batch size.
If ``sum`` the elements of the output are summed.
override_train_ops (function, optional): Function to be used in place of the default ``train_ops``
"""
def __init__(self, reduction='mean', override_train_ops=None):
super(GeneratorLoss, self).__init__()
self.reduction = reduction
self.override_train_ops = override_train_ops
self.arg_map = {}
def set_arg_map(self, value):
"""Updates the ``arg_map`` for passing a different value to the ``train_ops``.
Args:
value (dict): A mapping of the ``argument name`` in the method signature and the
variable name in the ``Trainer`` it corresponds to.
.. note::
If the ``train_ops`` signature is
``train_ops(self, gen, disc, optimizer_generator, device, batch_size, labels=None)``
then we need to map ``gen`` to ``generator`` and ``disc`` to ``discriminator``.
In this case we make the following function call
``loss.set_arg_map({"gen": "generator", "disc": "discriminator"})``.
"""
self.arg_map.update(value)
def train_ops(self, generator, discriminator, optimizer_generator,
device, batch_size, labels=None):
"""Defines the standard ``train_ops`` used by most losses. Losses which have a different
training procedure can either ``subclass`` it **(recommended approach)** or make use of
``override_train_ops`` argument.
The ``standard optimization algorithm`` for the ``generator`` defined in this train_ops
is as follows:
1. :math:`fake = generator(noise)`
2. :math:`value = discriminator(fake)`
3. :math:`loss = loss\\_function(value)`
4. Backpropagate by computing :math:`\\nabla loss`
5. Run a step of the optimizer for generator
Args:
generator (torchgan.models.Generator): The model to be optimized.
discriminator (torchgan.models.Discriminator): The discriminator which judges the
performance of the generator.
optimizer_generator (torch.optim.Optimizer): Optimizer which updates the ``parameters``
of the ``generator``.
device (torch.device): Device on which the ``generator`` and ``discriminator`` is present.
batch_size (int): Batch Size of the data infered from the ``DataLoader`` by the ``Trainer``.
labels (torch.Tensor, optional): Labels for the data.
Returns:
Scalar value of the loss.
"""
if self.override_train_ops is not None:
return self.override_train_ops(generator, discriminator,
optimizer_generator, device, batch_size, labels)
else:
if labels is None and generator.label_type == 'required':
raise Exception('GAN model requires labels for training')
noise = torch.randn(batch_size, generator.encoding_dims, device
=device)
optimizer_generator.zero_grad()
if generator.label_type == 'generated':
label_gen = torch.randint(0, generator.num_classes, (
batch_size,), device=device)
if generator.label_type == 'none':
fake = generator(noise)
elif generator.label_type == 'required':
fake = generator(noise, labels)
elif generator.label_type == 'generated':
fake = generator(noise, label_gen)
if discriminator.label_type == 'none':
dgz = discriminator(fake)
elif generator.label_type == 'generated':
dgz = discriminator(fake, label_gen)
else:
dgz = discriminator(fake, labels)
loss = self.forward(dgz)
loss.backward()
optimizer_generator.step()
return loss.item()
class WassersteinGeneratorLoss(GeneratorLoss):
"""Wasserstein GAN generator loss from
`"Wasserstein GAN by Arjovsky et. al." <https://arxiv.org/abs/1701.07875>`_ paper
The loss can be described as:
.. math:: L(G) = -f(G(z))
where
- :math:`G` : Generator
- :math:`f` : Critic/Discriminator
- :math:`z` : A sample from the noise prior
Args:
reduction (str, optional): Specifies the reduction to apply to the output.
If ``none`` no reduction will be applied. If ``mean`` the mean of the output.
If ``sum`` the elements of the output will be summed.
override_train_ops (function, optional): A function is passed to this argument,
if the default ``train_ops`` is not to be used.
"""
def forward(self, fgz):
"""Computes the loss for the given input.
Args:
dgz (torch.Tensor) : Output of the Discriminator with generated data. It must have the
dimensions (N, \\*) where \\* means any number of additional
dimensions.
Returns:
scalar if reduction is applied else Tensor with dimensions (N, \\*).
"""
return wasserstein_generator_loss(fgz, self.reduction)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mean_mul_0(in_out_ptr0, in_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = -1.0
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = 256.0
tmp7 = tmp5 / tmp6
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp7, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_mul_0[grid(1)](buf1, arg0_1, 1, 256,
num_warps=2, num_stages=1)
del arg0_1
return buf1,
def reduce(x, reduction=None):
"""Applies reduction on a torch.Tensor.
Args:
x (torch.Tensor): The tensor on which reduction is to be applied.
reduction (str, optional): The reduction to be applied. If ``mean`` the mean value of the
Tensor is returned. If ``sum`` the elements of the Tensor will be summed. If none of the
above then the Tensor is returning without any change.
Returns:
As per the above ``reduction`` convention.
"""
if reduction == 'mean':
return torch.mean(x)
elif reduction == 'sum':
return torch.sum(x)
else:
return x
def wasserstein_generator_loss(fgz, reduction='mean'):
return reduce(-1.0 * fgz, reduction)
class GeneratorLoss(nn.Module):
"""Base class for all generator losses.
.. note:: All Losses meant to be minimized for optimizing the Generator must subclass this.
Args:
reduction (str, optional): Specifies the reduction to apply to the output.
If ``none`` no reduction will be applied. If ``mean`` the outputs are averaged over batch size.
If ``sum`` the elements of the output are summed.
override_train_ops (function, optional): Function to be used in place of the default ``train_ops``
"""
def __init__(self, reduction='mean', override_train_ops=None):
super(GeneratorLoss, self).__init__()
self.reduction = reduction
self.override_train_ops = override_train_ops
self.arg_map = {}
def set_arg_map(self, value):
"""Updates the ``arg_map`` for passing a different value to the ``train_ops``.
Args:
value (dict): A mapping of the ``argument name`` in the method signature and the
variable name in the ``Trainer`` it corresponds to.
.. note::
If the ``train_ops`` signature is
``train_ops(self, gen, disc, optimizer_generator, device, batch_size, labels=None)``
then we need to map ``gen`` to ``generator`` and ``disc`` to ``discriminator``.
In this case we make the following function call
``loss.set_arg_map({"gen": "generator", "disc": "discriminator"})``.
"""
self.arg_map.update(value)
def train_ops(self, generator, discriminator, optimizer_generator,
device, batch_size, labels=None):
"""Defines the standard ``train_ops`` used by most losses. Losses which have a different
training procedure can either ``subclass`` it **(recommended approach)** or make use of
``override_train_ops`` argument.
The ``standard optimization algorithm`` for the ``generator`` defined in this train_ops
is as follows:
1. :math:`fake = generator(noise)`
2. :math:`value = discriminator(fake)`
3. :math:`loss = loss\\_function(value)`
4. Backpropagate by computing :math:`\\nabla loss`
5. Run a step of the optimizer for generator
Args:
generator (torchgan.models.Generator): The model to be optimized.
discriminator (torchgan.models.Discriminator): The discriminator which judges the
performance of the generator.
optimizer_generator (torch.optim.Optimizer): Optimizer which updates the ``parameters``
of the ``generator``.
device (torch.device): Device on which the ``generator`` and ``discriminator`` is present.
batch_size (int): Batch Size of the data infered from the ``DataLoader`` by the ``Trainer``.
labels (torch.Tensor, optional): Labels for the data.
Returns:
Scalar value of the loss.
"""
if self.override_train_ops is not None:
return self.override_train_ops(generator, discriminator,
optimizer_generator, device, batch_size, labels)
else:
if labels is None and generator.label_type == 'required':
raise Exception('GAN model requires labels for training')
noise = torch.randn(batch_size, generator.encoding_dims, device
=device)
optimizer_generator.zero_grad()
if generator.label_type == 'generated':
label_gen = torch.randint(0, generator.num_classes, (
batch_size,), device=device)
if generator.label_type == 'none':
fake = generator(noise)
elif generator.label_type == 'required':
fake = generator(noise, labels)
elif generator.label_type == 'generated':
fake = generator(noise, label_gen)
if discriminator.label_type == 'none':
dgz = discriminator(fake)
elif generator.label_type == 'generated':
dgz = discriminator(fake, label_gen)
else:
dgz = discriminator(fake, labels)
loss = self.forward(dgz)
loss.backward()
optimizer_generator.step()
return loss.item()
class WassersteinGeneratorLossNew(GeneratorLoss):
"""Wasserstein GAN generator loss from
`"Wasserstein GAN by Arjovsky et. al." <https://arxiv.org/abs/1701.07875>`_ paper
The loss can be described as:
.. math:: L(G) = -f(G(z))
where
- :math:`G` : Generator
- :math:`f` : Critic/Discriminator
- :math:`z` : A sample from the noise prior
Args:
reduction (str, optional): Specifies the reduction to apply to the output.
If ``none`` no reduction will be applied. If ``mean`` the mean of the output.
If ``sum`` the elements of the output will be summed.
override_train_ops (function, optional): A function is passed to this argument,
if the default ``train_ops`` is not to be used.
"""
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
torchgan/torchgan
|
WassersteinGeneratorLoss
| false
| 16,603
|
[
"MIT"
] | 1,300
|
f4139537ac2d3d8609d5aecc859a6fb797b107a1
|
https://github.com/torchgan/torchgan/tree/f4139537ac2d3d8609d5aecc859a6fb797b107a1
|
Buck
|
import torch
import torch.nn
class Buck(torch.nn.Module):
def __init__(self, A=1.0, B=1.0, C=1.0):
super(Buck, self).__init__()
self.A = torch.nn.Parameter(torch.Tensor([A]))
self.B = torch.nn.Parameter(torch.Tensor([B]))
self.C = torch.nn.Parameter(torch.Tensor([C]))
def Buckingham(self, r, A, B, C):
return A * torch.exp(-B * r) - C / r ** 6
def forward(self, x):
return self.Buckingham(x, self.A, self.B, self.C)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_exp_mul_neg_pow_sub_0(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp5 = tl.load(in_ptr2 + x0, xmask)
tmp9 = tl.load(in_ptr3 + 0)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK])
tmp4 = -tmp3
tmp6 = tmp4 * tmp5
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp1 * tmp7
tmp11 = tmp5 * tmp5
tmp12 = tmp11 * tmp5
tmp13 = tmp12 * tmp12
tmp14 = tmp10 / tmp13
tmp15 = tmp8 - tmp14
tl.store(out_ptr0 + x0, tmp15, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (1,), (1,))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (1,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_exp_mul_neg_pow_sub_0[grid(256)](primals_1,
primals_2, primals_4, primals_3, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_3
return buf0, primals_1, primals_2, primals_4
class BuckNew(torch.nn.Module):
def __init__(self, A=1.0, B=1.0, C=1.0):
super(BuckNew, self).__init__()
self.A = torch.nn.Parameter(torch.Tensor([A]))
self.B = torch.nn.Parameter(torch.Tensor([B]))
self.C = torch.nn.Parameter(torch.Tensor([C]))
def Buckingham(self, r, A, B, C):
return A * torch.exp(-B * r) - C / r ** 6
def forward(self, input_0):
primals_1 = self.A
primals_2 = self.B
primals_3 = self.C
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
torchmd/mdgrad
|
Buck
| false
| 16,604
|
[
"MIT"
] | 54
|
77bd7685b74b41acf54a9483546e1e8cb545eb01
|
https://github.com/torchmd/mdgrad/tree/77bd7685b74b41acf54a9483546e1e8cb545eb01
|
ParityPonderGRU
|
from torch.nn import Module
import torch
from torch import nn
from typing import Tuple
import torch.utils.data
import torch.nn.functional
import torch.autograd
class ParityPonderGRU(Module):
"""
## PonderNet with GRU for Parity Task
This is a simple model that uses a [GRU Cell](https://pytorch.org/docs/stable/generated/torch.nn.GRUCell.html)
as the step function.
This model is for the [Parity Task](../parity.html) where the input is a vector of `n_elems`.
Each element of the vector is either `0`, `1` or `-1` and the output is the parity
- a binary value that is true if the number of `1`s is odd and false otherwise.
The prediction of the model is the log probability of the parity being $1$.
"""
def __init__(self, n_elems: 'int', n_hidden: 'int', max_steps: 'int'):
"""
* `n_elems` is the number of elements in the input vector
* `n_hidden` is the state vector size of the GRU
* `max_steps` is the maximum number of steps $N$
"""
super().__init__()
self.max_steps = max_steps
self.n_hidden = n_hidden
self.gru = nn.GRUCell(n_elems, n_hidden)
self.output_layer = nn.Linear(n_hidden, 1)
self.lambda_layer = nn.Linear(n_hidden, 1)
self.lambda_prob = nn.Sigmoid()
self.is_halt = False
def forward(self, x: 'torch.Tensor') ->Tuple[torch.Tensor, torch.Tensor,
torch.Tensor, torch.Tensor]:
"""
* `x` is the input of shape `[batch_size, n_elems]`
This outputs a tuple of four tensors:
1. $p_1 \\dots p_N$ in a tensor of shape `[N, batch_size]`
2. $\\hat{y}_1 \\dots \\hat{y}_N$ in a tensor of shape `[N, batch_size]` - the log probabilities of the parity being $1$
3. $p_m$ of shape `[batch_size]`
4. $\\hat{y}_m$ of shape `[batch_size]` where the computation was halted at step $m$
"""
batch_size = x.shape[0]
h = x.new_zeros((x.shape[0], self.n_hidden))
h = self.gru(x, h)
p = []
y = []
un_halted_prob = h.new_ones((batch_size,))
halted = h.new_zeros((batch_size,))
p_m = h.new_zeros((batch_size,))
y_m = h.new_zeros((batch_size,))
for n in range(1, self.max_steps + 1):
if n == self.max_steps:
lambda_n = h.new_ones(h.shape[0])
else:
lambda_n = self.lambda_prob(self.lambda_layer(h))[:, 0]
y_n = self.output_layer(h)[:, 0]
p_n = un_halted_prob * lambda_n
un_halted_prob = un_halted_prob * (1 - lambda_n)
halt = torch.bernoulli(lambda_n) * (1 - halted)
p.append(p_n)
y.append(y_n)
p_m = p_m * (1 - halt) + p_n * halt
y_m = y_m * (1 - halt) + y_n * halt
halted = halted + halt
h = self.gru(x, h)
if self.is_halt and halted.sum() == batch_size:
break
return torch.stack(p), torch.stack(y), p_m, y_m
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'n_elems': 4, 'n_hidden': 4, 'max_steps': 4}]
|
import torch
from torch import device
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.nn import Module
from torch import nn
import torch.utils.data
import torch.nn.functional
import torch.autograd
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_new_zeros_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_stack_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 28
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + x0, tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + x0, tmp14 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tmp17 = tl.full([1], 4, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tmp16 & tmp18
tmp20 = tl.load(in_ptr0 + x0, tmp19 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp21 = tmp0 >= tmp17
tmp22 = tl.full([1], 5, tl.int64)
tmp23 = tmp0 < tmp22
tmp24 = tmp21 & tmp23
tmp25 = tl.load(in_ptr1 + x0, tmp24 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp26 = tmp0 >= tmp22
tmp27 = tl.full([1], 6, tl.int64)
tmp28 = tmp0 < tmp27
tmp29 = tmp26 & tmp28
tmp30 = tl.load(in_ptr1 + x0, tmp29 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp31 = tmp0 >= tmp27
tl.full([1], 7, tl.int64)
tmp34 = tl.load(in_ptr1 + x0, tmp31 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp35 = tl.where(tmp29, tmp30, tmp34)
tmp36 = tl.where(tmp24, tmp25, tmp35)
tmp37 = tl.where(tmp19, tmp20, tmp36)
tmp38 = tl.where(tmp14, tmp15, tmp37)
tmp39 = tl.where(tmp9, tmp10, tmp38)
tmp40 = tl.where(tmp4, tmp5, tmp39)
tl.store(out_ptr0 + x2, tmp40, xmask)
@triton.jit
def triton_poi_fused_stack_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 7
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp5 = tl.load(in_ptr0 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp23 = tl.load(in_ptr1 + 0)
tmp24 = tl.broadcast_to(tmp23, [XBLOCK])
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp7 = tmp0 >= tmp3
tmp8 = tl.full([1], 2, tl.int64)
tmp9 = tmp0 < tmp8
tmp10 = tmp7 & tmp9
tmp11 = tmp0 >= tmp8
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tmp0 >= tmp12
tmp16 = tl.full([1], 4, tl.int64)
tmp17 = tmp0 < tmp16
tmp18 = tmp15 & tmp17
tmp19 = tmp0 >= tmp16
tmp20 = tl.full([1], 5, tl.int64)
tmp21 = tmp0 < tmp20
tmp22 = tmp19 & tmp21
tmp25 = tmp0 >= tmp20
tmp26 = tl.full([1], 6, tl.int64)
tmp27 = tmp0 < tmp26
tmp28 = tmp25 & tmp27
tl.full([1], 7, tl.int64)
tmp32 = tl.where(tmp28, tmp24, tmp24)
tmp33 = tl.where(tmp22, tmp24, tmp32)
tmp34 = tl.where(tmp18, tmp6, tmp33)
tmp35 = tl.where(tmp14, tmp6, tmp34)
tmp36 = tl.where(tmp10, tmp6, tmp35)
tmp37 = tl.where(tmp4, tmp6, tmp36)
tl.store(out_ptr0 + x0, tmp37, xmask)
@triton.jit
def triton_poi_fused_stack_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 112
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + 4 * (-4 + x1)), tmp9 & xmask, other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + (x0 + 4 * (-8 + x1)), tmp14 & xmask, other=0.0)
tmp16 = tmp0 >= tmp12
tmp17 = tl.full([1], 16, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tmp16 & tmp18
tmp20 = tl.load(in_ptr3 + (x0 + 4 * (-12 + x1)), tmp19 & xmask, other=0.0)
tmp21 = tmp0 >= tmp17
tmp22 = tl.full([1], 20, tl.int64)
tmp23 = tmp0 < tmp22
tmp24 = tmp21 & tmp23
tmp25 = tl.load(in_ptr0 + (x0 + 4 * (-16 + x1)), tmp24 & xmask, other=0.0)
tmp26 = tmp0 >= tmp22
tmp27 = tl.full([1], 24, tl.int64)
tmp28 = tmp0 < tmp27
tmp29 = tmp26 & tmp28
tmp30 = tl.load(in_ptr1 + (x0 + 4 * (-20 + x1)), tmp29 & xmask, other=0.0)
tmp31 = tmp0 >= tmp27
tl.full([1], 28, tl.int64)
tmp34 = tl.load(in_ptr2 + (x0 + 4 * (-24 + x1)), tmp31 & xmask, other=0.0)
tmp35 = tl.where(tmp29, tmp30, tmp34)
tmp36 = tl.where(tmp24, tmp25, tmp35)
tmp37 = tl.where(tmp19, tmp20, tmp36)
tmp38 = tl.where(tmp14, tmp15, tmp37)
tmp39 = tl.where(tmp9, tmp10, tmp38)
tmp40 = tl.where(tmp4, tmp5, tmp39)
tl.store(out_ptr0 + x2, tmp40, xmask)
@triton.jit
def triton_poi_fused_add_bernoulli_mul_new_ones_new_zeros_rsub_sigmoid_4(
in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5,
out_ptr6, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp3 = tl.load(in_ptr1 + 4)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp5 = tl.load(in_ptr2 + (16 + x0), xmask)
tmp8 = tl.load(in_ptr1 + 5)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp10 = tl.load(in_ptr2 + (20 + x0), xmask)
tmp13 = tl.load(in_ptr1 + 6)
tmp14 = tl.broadcast_to(tmp13, [XBLOCK])
tmp15 = tl.load(in_ptr2 + (24 + x0), xmask)
tmp18 = tl.load(in_ptr3 + x0, xmask)
tmp20 = tl.load(in_ptr4 + x0, xmask)
tmp22 = tl.load(in_ptr5 + x0, xmask)
tmp59 = tl.load(in_ptr1 + 0)
tmp60 = tl.broadcast_to(tmp59, [XBLOCK])
tmp61 = tl.load(in_ptr2 + x0, xmask)
tmp66 = tl.load(in_ptr1 + 1)
tmp67 = tl.broadcast_to(tmp66, [XBLOCK])
tmp68 = tl.load(in_ptr2 + (4 + x0), xmask)
tmp73 = tl.load(in_ptr1 + 2)
tmp74 = tl.broadcast_to(tmp73, [XBLOCK])
tmp75 = tl.load(in_ptr2 + (8 + x0), xmask)
tmp80 = tl.load(in_ptr1 + 3)
tmp81 = tl.broadcast_to(tmp80, [XBLOCK])
tmp82 = tl.load(in_ptr2 + (12 + x0), xmask)
tmp1 = 1.0
tmp2 = tmp0 < tmp1
tmp6 = tmp4 + tmp5
tmp7 = tl.sigmoid(tmp6)
tmp11 = tmp9 + tmp10
tmp12 = tl.sigmoid(tmp11)
tmp16 = tmp14 + tmp15
tmp17 = tl.sigmoid(tmp16)
tmp19 = tmp18 < tmp17
tmp21 = tmp20 < tmp12
tmp23 = tmp22 < tmp7
tmp24 = tmp23.to(tl.float32)
tmp25 = tmp1 - tmp24
tmp26 = 0.0
tmp27 = tmp26 * tmp25
tmp28 = tmp7 * tmp24
tmp29 = tmp27 + tmp28
tmp30 = tmp21.to(tl.float32)
tmp31 = tmp30 * tmp25
tmp32 = tmp1 - tmp31
tmp33 = tmp29 * tmp32
tmp34 = tmp1 - tmp7
tmp35 = tmp34 * tmp12
tmp36 = tmp35 * tmp31
tmp37 = tmp33 + tmp36
tmp38 = tmp19.to(tl.float32)
tmp39 = tmp24 + tmp31
tmp40 = tmp1 - tmp39
tmp41 = tmp38 * tmp40
tmp42 = tmp1 - tmp41
tmp43 = tmp37 * tmp42
tmp44 = tmp1 - tmp12
tmp45 = tmp34 * tmp44
tmp46 = tmp45 * tmp17
tmp47 = tmp46 * tmp41
tmp48 = tmp43 + tmp47
tmp49 = tmp2.to(tl.float32)
tmp50 = tmp39 + tmp41
tmp51 = tmp1 - tmp50
tmp52 = tmp49 * tmp51
tmp53 = tmp1 - tmp52
tmp54 = tmp48 * tmp53
tmp55 = tmp1 - tmp17
tmp56 = tmp45 * tmp55
tmp57 = tmp56 * tmp52
tmp58 = tmp54 + tmp57
tmp62 = tmp60 + tmp61
tmp63 = tmp62 * tmp24
tmp64 = tmp27 + tmp63
tmp65 = tmp64 * tmp32
tmp69 = tmp67 + tmp68
tmp70 = tmp69 * tmp31
tmp71 = tmp65 + tmp70
tmp72 = tmp71 * tmp42
tmp76 = tmp74 + tmp75
tmp77 = tmp76 * tmp41
tmp78 = tmp72 + tmp77
tmp79 = tmp78 * tmp53
tmp83 = tmp81 + tmp82
tmp84 = tmp83 * tmp52
tmp85 = tmp79 + tmp84
tl.store(out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr1 + x0, tmp7, xmask)
tl.store(out_ptr2 + x0, tmp12, xmask)
tl.store(out_ptr3 + x0, tmp17, xmask)
tl.store(out_ptr4 + x0, tmp19, xmask)
tl.store(out_ptr5 + x0, tmp21, xmask)
tl.store(out_ptr6 + x0, tmp23, xmask)
tl.store(in_out_ptr0 + x0, tmp58, xmask)
tl.store(in_out_ptr1 + x0, tmp85, xmask)
@triton.jit
def triton_poi_fused_stack_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + (-4 + x0), tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = 1.0
tmp12 = tmp11 - tmp10
tmp13 = tl.load(in_ptr1 + (-4 + x0), tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp14 = tmp12 * tmp13
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp9, tmp14, tmp15)
tmp17 = tmp0 >= tmp7
tmp18 = tl.full([1], 12, tl.int64)
tmp19 = tmp0 < tmp18
tmp20 = tmp17 & tmp19
tmp21 = tl.load(in_ptr0 + (-8 + x0), tmp20 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp22 = tmp11 - tmp21
tmp23 = tl.load(in_ptr1 + (-8 + x0), tmp20 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp24 = tmp11 - tmp23
tmp25 = tmp22 * tmp24
tmp26 = tl.load(in_ptr2 + (-8 + x0), tmp20 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp27 = tmp25 * tmp26
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp20, tmp27, tmp28)
tmp30 = tmp0 >= tmp18
tl.full([1], 16, tl.int64)
tmp33 = tl.load(in_ptr0 + (-12 + x0), tmp30 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp34 = tmp11 - tmp33
tmp35 = tl.load(in_ptr1 + (-12 + x0), tmp30 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp36 = tmp11 - tmp35
tmp37 = tmp34 * tmp36
tmp38 = tl.load(in_ptr2 + (-12 + x0), tmp30 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp39 = tmp11 - tmp38
tmp40 = tmp37 * tmp39
tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype)
tmp42 = tl.where(tmp30, tmp40, tmp41)
tmp43 = tl.where(tmp20, tmp29, tmp42)
tmp44 = tl.where(tmp9, tmp16, tmp43)
tmp45 = tl.where(tmp4, tmp5, tmp44)
tl.store(out_ptr0 + x0, tmp45, xmask)
@triton.jit
def triton_poi_fused_stack_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp5 = tl.load(in_ptr0 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp15 = tl.load(in_ptr0 + 1)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp25 = tl.load(in_ptr0 + 2)
tmp26 = tl.broadcast_to(tmp25, [XBLOCK])
tmp34 = tl.load(in_ptr0 + 3)
tmp35 = tl.broadcast_to(tmp34, [XBLOCK])
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp7 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp4, tmp8, tmp9)
tmp11 = tmp0 >= tmp3
tmp12 = tl.full([1], 8, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp17 = tl.load(in_ptr1 + (4 + (-4 + x0)), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp18 = tmp16 + tmp17
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp14, tmp18, tmp19)
tmp21 = tmp0 >= tmp12
tmp22 = tl.full([1], 12, tl.int64)
tmp23 = tmp0 < tmp22
tmp24 = tmp21 & tmp23
tmp27 = tl.load(in_ptr1 + (8 + (-8 + x0)), tmp24 & xmask,
eviction_policy='evict_last', other=0.0)
tmp28 = tmp26 + tmp27
tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp30 = tl.where(tmp24, tmp28, tmp29)
tmp31 = tmp0 >= tmp22
tl.full([1], 16, tl.int64)
tmp36 = tl.load(in_ptr1 + (12 + (-12 + x0)), tmp31 & xmask,
eviction_policy='evict_last', other=0.0)
tmp37 = tmp35 + tmp36
tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype)
tmp39 = tl.where(tmp31, tmp37, tmp38)
tmp40 = tl.where(tmp24, tmp30, tmp39)
tmp41 = tl.where(tmp14, tmp20, tmp40)
tmp42 = tl.where(tmp4, tmp10, tmp41)
tl.store(out_ptr0 + x0, tmp42, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (12, 4), (4, 1))
assert_size_stride(primals_4, (12,), (1,))
assert_size_stride(primals_5, (12,), (1,))
assert_size_stride(primals_6, (1, 4), (4, 1))
assert_size_stride(primals_7, (1,), (1,))
assert_size_stride(primals_8, (1, 4), (4, 1))
assert_size_stride(primals_9, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_new_zeros_0[grid(16)](buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 12),
(1, 4), 0), out=buf1)
del primals_2
buf2 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (4, 12), (1,
4), 0), out=buf2)
buf3 = torch.ops.aten._thnn_fused_gru_cell.default(buf1, buf2, buf0,
primals_4, primals_5)
buf4 = buf3[0]
buf5 = buf3[1]
del buf3
buf6 = empty_strided_cuda((7, 4), (4, 1), torch.float32)
triton_poi_fused_stack_1[grid(28)](primals_8, primals_6, buf6, 28,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_6
del primals_8
buf7 = empty_strided_cuda((7,), (1,), torch.float32)
triton_poi_fused_stack_2[grid(7)](primals_9, primals_7, buf7, 7,
XBLOCK=8, num_warps=1, num_stages=1)
del primals_7
del primals_9
buf8 = buf2
del buf2
extern_kernels.mm(buf4, reinterpret_tensor(primals_3, (4, 12), (1,
4), 0), out=buf8)
buf9 = torch.ops.aten._thnn_fused_gru_cell.default(buf1, buf8, buf4,
primals_4, primals_5)
buf10 = buf9[0]
buf11 = buf9[1]
del buf9
buf12 = buf8
del buf8
extern_kernels.mm(buf10, reinterpret_tensor(primals_3, (4, 12), (1,
4), 0), out=buf12)
buf13 = torch.ops.aten._thnn_fused_gru_cell.default(buf1, buf12,
buf10, primals_4, primals_5)
buf14 = buf13[0]
buf15 = buf13[1]
del buf13
buf16 = buf12
del buf12
extern_kernels.mm(buf14, reinterpret_tensor(primals_3, (4, 12), (1,
4), 0), out=buf16)
buf17 = torch.ops.aten._thnn_fused_gru_cell.default(buf1, buf16,
buf14, primals_4, primals_5)
del buf1
del buf16
del primals_4
del primals_5
buf18 = buf17[0]
buf19 = buf17[1]
del buf17
buf20 = empty_strided_cuda((28, 4), (4, 1), torch.float32)
triton_poi_fused_stack_3[grid(112)](buf4, buf10, buf14, buf18,
buf20, 112, XBLOCK=128, num_warps=4, num_stages=1)
buf21 = empty_strided_cuda((7, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf20, (7, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf6, (7, 4, 1), (4, 1, 4), 0), out=buf21)
buf23 = torch.ops.aten.rand.default([4], dtype=torch.float32,
device=device(type='cuda', index=0), pin_memory=False)
buf24 = buf23
del buf23
buf27 = torch.ops.aten.rand.default([4], dtype=torch.float32,
device=device(type='cuda', index=0), pin_memory=False)
buf28 = buf27
del buf27
buf31 = torch.ops.aten.rand.default([4], dtype=torch.float32,
device=device(type='cuda', index=0), pin_memory=False)
buf32 = buf31
del buf31
buf36 = torch.ops.aten.rand.default([4], dtype=torch.float32,
device=device(type='cuda', index=0), pin_memory=False)
buf37 = buf36
del buf36
buf38 = empty_strided_cuda((4,), (1,), torch.bool)
buf30 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
buf26 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
buf22 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
buf25 = empty_strided_cuda((4,), (1,), torch.bool)
buf29 = empty_strided_cuda((4,), (1,), torch.bool)
buf33 = empty_strided_cuda((4,), (1,), torch.bool)
buf34 = empty_strided_cuda((4,), (1,), torch.float32)
buf39 = buf34
del buf34
buf35 = empty_strided_cuda((4,), (1,), torch.float32)
buf40 = buf35
del buf35
triton_poi_fused_add_bernoulli_mul_new_ones_new_zeros_rsub_sigmoid_4[
grid(4)](buf39, buf40, buf37, buf7, buf21, buf24, buf28, buf32,
buf38, buf30, buf26, buf22, buf25, buf29, buf33, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del buf24
del buf28
del buf32
del buf37
buf41 = reinterpret_tensor(buf18, (16,), (1,), 0)
del buf18
triton_poi_fused_stack_5[grid(16)](buf30, buf26, buf22, buf41, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf42 = empty_strided_cuda((16,), (1,), torch.float32)
triton_poi_fused_stack_6[grid(16)](buf7, buf21, buf42, 16, XBLOCK=
16, num_warps=1, num_stages=1)
del buf21
del buf7
return (reinterpret_tensor(buf41, (4, 4), (4, 1), 0),
reinterpret_tensor(buf42, (4, 4), (4, 1), 0), buf39, buf40,
primals_1, buf0, buf4, buf5, buf10, buf11, buf14, buf15, buf19,
buf22, buf25, buf26, buf29, buf30, buf33, buf38, reinterpret_tensor
(buf6, (7, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf20, (7, 4, 4
), (16, 1, 4), 0), primals_3)
class ParityPonderGRUNew(Module):
"""
## PonderNet with GRU for Parity Task
This is a simple model that uses a [GRU Cell](https://pytorch.org/docs/stable/generated/torch.nn.GRUCell.html)
as the step function.
This model is for the [Parity Task](../parity.html) where the input is a vector of `n_elems`.
Each element of the vector is either `0`, `1` or `-1` and the output is the parity
- a binary value that is true if the number of `1`s is odd and false otherwise.
The prediction of the model is the log probability of the parity being $1$.
"""
def __init__(self, n_elems: 'int', n_hidden: 'int', max_steps: 'int'):
"""
* `n_elems` is the number of elements in the input vector
* `n_hidden` is the state vector size of the GRU
* `max_steps` is the maximum number of steps $N$
"""
super().__init__()
self.max_steps = max_steps
self.n_hidden = n_hidden
self.gru = nn.GRUCell(n_elems, n_hidden)
self.output_layer = nn.Linear(n_hidden, 1)
self.lambda_layer = nn.Linear(n_hidden, 1)
self.lambda_prob = nn.Sigmoid()
self.is_halt = False
def forward(self, input_0):
primals_2 = self.gru.weight_ih
primals_3 = self.gru.weight_hh
primals_4 = self.gru.bias_ih
primals_5 = self.gru.bias_hh
primals_6 = self.output_layer.weight
primals_7 = self.output_layer.bias
primals_8 = self.lambda_layer.weight
primals_9 = self.lambda_layer.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0], output[1], output[2], output[3]
|
techthiyanes/annotated_deep_learning_paper_implementations
|
ParityPonderGRU
| false
| 16,605
|
[
"MIT"
] | 3,714
|
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
|
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
|
WassersteinDiscriminatorLoss
|
import torch
import torch.nn as nn
def reduce(x, reduction=None):
"""Applies reduction on a torch.Tensor.
Args:
x (torch.Tensor): The tensor on which reduction is to be applied.
reduction (str, optional): The reduction to be applied. If ``mean`` the mean value of the
Tensor is returned. If ``sum`` the elements of the Tensor will be summed. If none of the
above then the Tensor is returning without any change.
Returns:
As per the above ``reduction`` convention.
"""
if reduction == 'mean':
return torch.mean(x)
elif reduction == 'sum':
return torch.sum(x)
else:
return x
def wasserstein_discriminator_loss(fx, fgz, reduction='mean'):
return reduce(fgz - fx, reduction)
class DiscriminatorLoss(nn.Module):
"""Base class for all discriminator losses.
.. note:: All Losses meant to be minimized for optimizing the Discriminator must subclass this.
Args:
reduction (str, optional): Specifies the reduction to apply to the output.
If ``none`` no reduction will be applied. If ``mean`` the outputs are averaged over batch size.
If ``sum`` the elements of the output are summed.
override_train_ops (function, optional): Function to be used in place of the default ``train_ops``
"""
def __init__(self, reduction='mean', override_train_ops=None):
super(DiscriminatorLoss, self).__init__()
self.reduction = reduction
self.override_train_ops = override_train_ops
self.arg_map = {}
def set_arg_map(self, value):
"""Updates the ``arg_map`` for passing a different value to the ``train_ops``.
Args:
value (dict): A mapping of the ``argument name`` in the method signature and the
variable name in the ``Trainer`` it corresponds to.
.. note::
If the ``train_ops`` signature is
``train_ops(self, gen, disc, optimizer_discriminator, device, batch_size, labels=None)``
then we need to map ``gen`` to ``generator`` and ``disc`` to ``discriminator``.
In this case we make the following function call
``loss.set_arg_map({"gen": "generator", "disc": "discriminator"})``.
"""
self.arg_map.update(value)
def train_ops(self, generator, discriminator, optimizer_discriminator,
real_inputs, device, labels=None):
"""Defines the standard ``train_ops`` used by most losses. Losses which have a different
training procedure can either ``subclass`` it **(recommended approach)** or make use of
``override_train_ops`` argument.
The ``standard optimization algorithm`` for the ``discriminator`` defined in this train_ops
is as follows:
1. :math:`fake = generator(noise)`
2. :math:`value_1 = discriminator(fake)`
3. :math:`value_2 = discriminator(real)`
4. :math:`loss = loss\\_function(value_1, value_2)`
5. Backpropagate by computing :math:`\\nabla loss`
6. Run a step of the optimizer for discriminator
Args:
generator (torchgan.models.Generator): The model to be optimized.
discriminator (torchgan.models.Discriminator): The discriminator which judges the
performance of the generator.
optimizer_discriminator (torch.optim.Optimizer): Optimizer which updates the ``parameters``
of the ``discriminator``.
real_inputs (torch.Tensor): The real data to be fed to the ``discriminator``.
device (torch.device): Device on which the ``generator`` and ``discriminator`` is present.
batch_size (int): Batch Size of the data infered from the ``DataLoader`` by the ``Trainer``.
labels (torch.Tensor, optional): Labels for the data.
Returns:
Scalar value of the loss.
"""
if self.override_train_ops is not None:
return self.override_train_ops(self, generator, discriminator,
optimizer_discriminator, real_inputs, device, labels)
else:
if labels is None and (generator.label_type == 'required' or
discriminator.label_type == 'required'):
raise Exception('GAN model requires labels for training')
batch_size = real_inputs.size(0)
noise = torch.randn(batch_size, generator.encoding_dims, device
=device)
if generator.label_type == 'generated':
label_gen = torch.randint(0, generator.num_classes, (
batch_size,), device=device)
optimizer_discriminator.zero_grad()
if discriminator.label_type == 'none':
dx = discriminator(real_inputs)
elif discriminator.label_type == 'required':
dx = discriminator(real_inputs, labels)
else:
dx = discriminator(real_inputs, label_gen)
if generator.label_type == 'none':
fake = generator(noise)
elif generator.label_type == 'required':
fake = generator(noise, labels)
else:
fake = generator(noise, label_gen)
if discriminator.label_type == 'none':
dgz = discriminator(fake.detach())
elif generator.label_type == 'generated':
dgz = discriminator(fake.detach(), label_gen)
else:
dgz = discriminator(fake.detach(), labels)
loss = self.forward(dx, dgz)
loss.backward()
optimizer_discriminator.step()
return loss.item()
class WassersteinDiscriminatorLoss(DiscriminatorLoss):
"""Wasserstein GAN generator loss from
`"Wasserstein GAN by Arjovsky et. al." <https://arxiv.org/abs/1701.07875>`_ paper
The loss can be described as:
.. math:: L(D) = f(G(z)) - f(x)
where
- :math:`G` : Generator
- :math:`f` : Critic/Discriminator
- :math:`x` : A sample from the data distribution
- :math:`z` : A sample from the noise prior
Args:
reduction (str, optional): Specifies the reduction to apply to the output.
If ``none`` no reduction will be applied. If ``mean`` the mean of the output.
If ``sum`` the elements of the output will be summed.
clip (tuple, optional): Tuple that specifies the maximum and minimum parameter
clamping to be applied, as per the original version of the Wasserstein loss
without Gradient Penalty.
override_train_ops (function, optional): A function is passed to this argument,
if the default ``train_ops`` is not to be used.
"""
def __init__(self, reduction='mean', clip=None, override_train_ops=None):
super(WassersteinDiscriminatorLoss, self).__init__(reduction,
override_train_ops)
if (isinstance(clip, tuple) or isinstance(clip, list)) and len(clip
) > 1:
self.clip = clip
else:
self.clip = None
def forward(self, fx, fgz):
"""Computes the loss for the given input.
Args:
fx (torch.Tensor) : Output of the Discriminator with real data. It must have the
dimensions (N, \\*) where \\* means any number of additional
dimensions.
fgz (torch.Tensor) : Output of the Discriminator with generated data. It must have the
dimensions (N, \\*) where \\* means any number of additional
dimensions.
Returns:
scalar if reduction is applied else Tensor with dimensions (N, \\*).
"""
return wasserstein_discriminator_loss(fx, fgz, self.reduction)
def train_ops(self, generator, discriminator, optimizer_discriminator,
real_inputs, device, labels=None):
"""Defines the standard ``train_ops`` used by wasserstein discriminator loss.
The ``standard optimization algorithm`` for the ``discriminator`` defined in this train_ops
is as follows:
1. Clamp the discriminator parameters to satisfy :math:`lipschitz\\ condition`
2. :math:`fake = generator(noise)`
3. :math:`value_1 = discriminator(fake)`
4. :math:`value_2 = discriminator(real)`
5. :math:`loss = loss\\_function(value_1, value_2)`
6. Backpropagate by computing :math:`\\nabla loss`
7. Run a step of the optimizer for discriminator
Args:
generator (torchgan.models.Generator): The model to be optimized.
discriminator (torchgan.models.Discriminator): The discriminator which judges the
performance of the generator.
optimizer_discriminator (torch.optim.Optimizer): Optimizer which updates the ``parameters``
of the ``discriminator``.
real_inputs (torch.Tensor): The real data to be fed to the ``discriminator``.
device (torch.device): Device on which the ``generator`` and ``discriminator`` is present.
labels (torch.Tensor, optional): Labels for the data.
Returns:
Scalar value of the loss.
"""
if self.override_train_ops is not None:
return self.override_train_ops(generator, discriminator,
optimizer_discriminator, real_inputs, device, labels)
else:
if self.clip is not None:
for p in discriminator.parameters():
p.data.clamp_(self.clip[0], self.clip[1])
return super(WassersteinDiscriminatorLoss, self).train_ops(
generator, discriminator, optimizer_discriminator,
real_inputs, device, labels)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = 256.0
tmp7 = tmp5 / tmp6
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp7, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_sub_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256,
num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
def reduce(x, reduction=None):
"""Applies reduction on a torch.Tensor.
Args:
x (torch.Tensor): The tensor on which reduction is to be applied.
reduction (str, optional): The reduction to be applied. If ``mean`` the mean value of the
Tensor is returned. If ``sum`` the elements of the Tensor will be summed. If none of the
above then the Tensor is returning without any change.
Returns:
As per the above ``reduction`` convention.
"""
if reduction == 'mean':
return torch.mean(x)
elif reduction == 'sum':
return torch.sum(x)
else:
return x
def wasserstein_discriminator_loss(fx, fgz, reduction='mean'):
return reduce(fgz - fx, reduction)
class DiscriminatorLoss(nn.Module):
"""Base class for all discriminator losses.
.. note:: All Losses meant to be minimized for optimizing the Discriminator must subclass this.
Args:
reduction (str, optional): Specifies the reduction to apply to the output.
If ``none`` no reduction will be applied. If ``mean`` the outputs are averaged over batch size.
If ``sum`` the elements of the output are summed.
override_train_ops (function, optional): Function to be used in place of the default ``train_ops``
"""
def __init__(self, reduction='mean', override_train_ops=None):
super(DiscriminatorLoss, self).__init__()
self.reduction = reduction
self.override_train_ops = override_train_ops
self.arg_map = {}
def set_arg_map(self, value):
"""Updates the ``arg_map`` for passing a different value to the ``train_ops``.
Args:
value (dict): A mapping of the ``argument name`` in the method signature and the
variable name in the ``Trainer`` it corresponds to.
.. note::
If the ``train_ops`` signature is
``train_ops(self, gen, disc, optimizer_discriminator, device, batch_size, labels=None)``
then we need to map ``gen`` to ``generator`` and ``disc`` to ``discriminator``.
In this case we make the following function call
``loss.set_arg_map({"gen": "generator", "disc": "discriminator"})``.
"""
self.arg_map.update(value)
def train_ops(self, generator, discriminator, optimizer_discriminator,
real_inputs, device, labels=None):
"""Defines the standard ``train_ops`` used by most losses. Losses which have a different
training procedure can either ``subclass`` it **(recommended approach)** or make use of
``override_train_ops`` argument.
The ``standard optimization algorithm`` for the ``discriminator`` defined in this train_ops
is as follows:
1. :math:`fake = generator(noise)`
2. :math:`value_1 = discriminator(fake)`
3. :math:`value_2 = discriminator(real)`
4. :math:`loss = loss\\_function(value_1, value_2)`
5. Backpropagate by computing :math:`\\nabla loss`
6. Run a step of the optimizer for discriminator
Args:
generator (torchgan.models.Generator): The model to be optimized.
discriminator (torchgan.models.Discriminator): The discriminator which judges the
performance of the generator.
optimizer_discriminator (torch.optim.Optimizer): Optimizer which updates the ``parameters``
of the ``discriminator``.
real_inputs (torch.Tensor): The real data to be fed to the ``discriminator``.
device (torch.device): Device on which the ``generator`` and ``discriminator`` is present.
batch_size (int): Batch Size of the data infered from the ``DataLoader`` by the ``Trainer``.
labels (torch.Tensor, optional): Labels for the data.
Returns:
Scalar value of the loss.
"""
if self.override_train_ops is not None:
return self.override_train_ops(self, generator, discriminator,
optimizer_discriminator, real_inputs, device, labels)
else:
if labels is None and (generator.label_type == 'required' or
discriminator.label_type == 'required'):
raise Exception('GAN model requires labels for training')
batch_size = real_inputs.size(0)
noise = torch.randn(batch_size, generator.encoding_dims, device
=device)
if generator.label_type == 'generated':
label_gen = torch.randint(0, generator.num_classes, (
batch_size,), device=device)
optimizer_discriminator.zero_grad()
if discriminator.label_type == 'none':
dx = discriminator(real_inputs)
elif discriminator.label_type == 'required':
dx = discriminator(real_inputs, labels)
else:
dx = discriminator(real_inputs, label_gen)
if generator.label_type == 'none':
fake = generator(noise)
elif generator.label_type == 'required':
fake = generator(noise, labels)
else:
fake = generator(noise, label_gen)
if discriminator.label_type == 'none':
dgz = discriminator(fake.detach())
elif generator.label_type == 'generated':
dgz = discriminator(fake.detach(), label_gen)
else:
dgz = discriminator(fake.detach(), labels)
loss = self.forward(dx, dgz)
loss.backward()
optimizer_discriminator.step()
return loss.item()
class WassersteinDiscriminatorLossNew(DiscriminatorLoss):
"""Wasserstein GAN generator loss from
`"Wasserstein GAN by Arjovsky et. al." <https://arxiv.org/abs/1701.07875>`_ paper
The loss can be described as:
.. math:: L(D) = f(G(z)) - f(x)
where
- :math:`G` : Generator
- :math:`f` : Critic/Discriminator
- :math:`x` : A sample from the data distribution
- :math:`z` : A sample from the noise prior
Args:
reduction (str, optional): Specifies the reduction to apply to the output.
If ``none`` no reduction will be applied. If ``mean`` the mean of the output.
If ``sum`` the elements of the output will be summed.
clip (tuple, optional): Tuple that specifies the maximum and minimum parameter
clamping to be applied, as per the original version of the Wasserstein loss
without Gradient Penalty.
override_train_ops (function, optional): A function is passed to this argument,
if the default ``train_ops`` is not to be used.
"""
def __init__(self, reduction='mean', clip=None, override_train_ops=None):
super(WassersteinDiscriminatorLossNew, self).__init__(reduction,
override_train_ops)
if (isinstance(clip, tuple) or isinstance(clip, list)) and len(clip
) > 1:
self.clip = clip
else:
self.clip = None
def train_ops(self, generator, discriminator, optimizer_discriminator,
real_inputs, device, labels=None):
"""Defines the standard ``train_ops`` used by wasserstein discriminator loss.
The ``standard optimization algorithm`` for the ``discriminator`` defined in this train_ops
is as follows:
1. Clamp the discriminator parameters to satisfy :math:`lipschitz\\ condition`
2. :math:`fake = generator(noise)`
3. :math:`value_1 = discriminator(fake)`
4. :math:`value_2 = discriminator(real)`
5. :math:`loss = loss\\_function(value_1, value_2)`
6. Backpropagate by computing :math:`\\nabla loss`
7. Run a step of the optimizer for discriminator
Args:
generator (torchgan.models.Generator): The model to be optimized.
discriminator (torchgan.models.Discriminator): The discriminator which judges the
performance of the generator.
optimizer_discriminator (torch.optim.Optimizer): Optimizer which updates the ``parameters``
of the ``discriminator``.
real_inputs (torch.Tensor): The real data to be fed to the ``discriminator``.
device (torch.device): Device on which the ``generator`` and ``discriminator`` is present.
labels (torch.Tensor, optional): Labels for the data.
Returns:
Scalar value of the loss.
"""
if self.override_train_ops is not None:
return self.override_train_ops(generator, discriminator,
optimizer_discriminator, real_inputs, device, labels)
else:
if self.clip is not None:
for p in discriminator.parameters():
p.data.clamp_(self.clip[0], self.clip[1])
return super(WassersteinDiscriminatorLossNew, self).train_ops(
generator, discriminator, optimizer_discriminator,
real_inputs, device, labels)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
torchgan/torchgan
|
WassersteinDiscriminatorLoss
| false
| 16,606
|
[
"MIT"
] | 1,300
|
f4139537ac2d3d8609d5aecc859a6fb797b107a1
|
https://github.com/torchgan/torchgan/tree/f4139537ac2d3d8609d5aecc859a6fb797b107a1
|
NormalizedLinear
|
import math
import torch
import torch.utils.data
from itertools import product as product
from math import sqrt as sqrt
import torch.nn
class NormalizedLinear(torch.nn.Module):
"""
A advanced Linear layer which supports weight normalization or cosine normalization.
"""
def __init__(self, in_features, out_features, bias=False, feat_norm=
True, scale_mode='learn', scale_init=1.0):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.feat_norm = feat_norm
self.scale_mode = scale_mode
self.scale_init = scale_init
self.weight = torch.nn.Parameter(torch.Tensor(out_features,
in_features))
if bias:
self.bias = torch.nn.Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
if self.scale_mode == 'constant':
self.scale = scale_init
elif self.scale_mode == 'learn':
self.scale = torch.nn.Parameter(torch.ones(1) * scale_init)
else:
raise NotImplementedError
def reset_parameters(self):
torch.nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = torch.nn.init._calculate_fan_in_and_fan_out(self.weight
)
bound = 1 / math.sqrt(fan_in)
torch.nn.init.uniform_(self.bias, -bound, bound)
def forward(self, inputs):
"""
Args:
inputs (torch.Tensor): (N, C)
Return:
output (torch.Tensor): (N, D)
"""
if self.feat_norm:
inputs = torch.nn.functional.normalize(inputs, dim=1)
output = inputs.mm(torch.nn.functional.normalize(self.weight, dim=1
).t())
output = self.scale * output
return output
def extra_repr(self):
s = 'in_features={in_features}, out_features={out_features}'
if self.bias is None:
s += ', bias=False'
s += ', feat_norm={feat_norm}'
s += ', scale_mode={scale_mode}'
s += ', scale_init={scale_init}'
return s.format(**self.__dict__)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.utils.data
from itertools import product as product
from math import sqrt as sqrt
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(16)](primals_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_div_0[grid(16)](primals_2, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(buf1, (4, 4), (1, 4), 0),
out=buf2)
buf3 = buf1
del buf1
triton_poi_fused_mul_1[grid(16)](primals_3, buf2, buf3, 16, XBLOCK=
16, num_warps=1, num_stages=1)
return buf3, primals_2, primals_3, buf0, buf2
class NormalizedLinearNew(torch.nn.Module):
"""
A advanced Linear layer which supports weight normalization or cosine normalization.
"""
def __init__(self, in_features, out_features, bias=False, feat_norm=
True, scale_mode='learn', scale_init=1.0):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.feat_norm = feat_norm
self.scale_mode = scale_mode
self.scale_init = scale_init
self.weight = torch.nn.Parameter(torch.Tensor(out_features,
in_features))
if bias:
self.bias = torch.nn.Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
if self.scale_mode == 'constant':
self.scale = scale_init
elif self.scale_mode == 'learn':
self.scale = torch.nn.Parameter(torch.ones(1) * scale_init)
else:
raise NotImplementedError
def reset_parameters(self):
torch.nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = torch.nn.init._calculate_fan_in_and_fan_out(self.weight
)
bound = 1 / math.sqrt(fan_in)
torch.nn.init.uniform_(self.bias, -bound, bound)
def extra_repr(self):
s = 'in_features={in_features}, out_features={out_features}'
if self.bias is None:
s += ', bias=False'
s += ', feat_norm={feat_norm}'
s += ', scale_mode={scale_mode}'
s += ', scale_init={scale_init}'
return s.format(**self.__dict__)
def forward(self, input_0):
primals_1 = self.weight
primals_3 = self.scale
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
tonysy/cvpods
|
NormalizedLinear
| false
| 16,607
|
[
"Apache-2.0"
] | 548
|
e322d7842ca0e34b1ef6237ea6d350633efc793a
|
https://github.com/tonysy/cvpods/tree/e322d7842ca0e34b1ef6237ea6d350633efc793a
|
Value
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Value(nn.Module):
def __init__(self, num_inputs):
super(Value, self).__init__()
self.affine1 = nn.Linear(num_inputs, 64)
self.affine2 = nn.Linear(64, 64)
self.value_head = nn.Linear(64, 1)
self.value_head.weight.data.mul_(0.1)
self.value_head.bias.data.mul_(0.0)
def forward(self, x):
x = F.tanh(self.affine1(x))
x = F.tanh(self.affine2(x))
state_values = self.value_head(x)
return state_values
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_inputs': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (64, 4), (4, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (64, 64), (64, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (1, 64), (64, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(4096)](buf1, primals_2, 4096, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0),
reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf2
triton_poi_fused_tanh_0[grid(4096)](buf3, primals_5, 4096, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64),
(64, 1), 0), reinterpret_tensor(primals_6, (64, 1), (1, 64), 0),
alpha=1, beta=1, out=buf5)
del primals_7
return reinterpret_tensor(buf5, (4, 4, 4, 1), (16, 4, 1, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, buf3, primals_6, primals_4
class ValueNew(nn.Module):
def __init__(self, num_inputs):
super(ValueNew, self).__init__()
self.affine1 = nn.Linear(num_inputs, 64)
self.affine2 = nn.Linear(64, 64)
self.value_head = nn.Linear(64, 1)
self.value_head.weight.data.mul_(0.1)
self.value_head.bias.data.mul_(0.0)
def forward(self, input_0):
primals_1 = self.affine1.weight
primals_2 = self.affine1.bias
primals_4 = self.affine2.weight
primals_5 = self.affine2.bias
primals_6 = self.value_head.weight
primals_7 = self.value_head.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
tpbarron/pytorch-ppo
|
Value
| false
| 16,608
|
[
"MIT"
] | 47
|
f73226865e34443f93dbec58939329c9278828e8
|
https://github.com/tpbarron/pytorch-ppo/tree/f73226865e34443f93dbec58939329c9278828e8
|
MinimaxDiscriminatorLoss
|
import torch
import torch.nn as nn
import torch.nn.functional as F
def minimax_discriminator_loss(dx, dgz, label_smoothing=0.0, reduction='mean'):
target_ones = torch.ones_like(dgz) * (1.0 - label_smoothing)
target_zeros = torch.zeros_like(dx)
loss = F.binary_cross_entropy_with_logits(dx, target_ones, reduction=
reduction)
loss += F.binary_cross_entropy_with_logits(dgz, target_zeros, reduction
=reduction)
return loss
class DiscriminatorLoss(nn.Module):
"""Base class for all discriminator losses.
.. note:: All Losses meant to be minimized for optimizing the Discriminator must subclass this.
Args:
reduction (str, optional): Specifies the reduction to apply to the output.
If ``none`` no reduction will be applied. If ``mean`` the outputs are averaged over batch size.
If ``sum`` the elements of the output are summed.
override_train_ops (function, optional): Function to be used in place of the default ``train_ops``
"""
def __init__(self, reduction='mean', override_train_ops=None):
super(DiscriminatorLoss, self).__init__()
self.reduction = reduction
self.override_train_ops = override_train_ops
self.arg_map = {}
def set_arg_map(self, value):
"""Updates the ``arg_map`` for passing a different value to the ``train_ops``.
Args:
value (dict): A mapping of the ``argument name`` in the method signature and the
variable name in the ``Trainer`` it corresponds to.
.. note::
If the ``train_ops`` signature is
``train_ops(self, gen, disc, optimizer_discriminator, device, batch_size, labels=None)``
then we need to map ``gen`` to ``generator`` and ``disc`` to ``discriminator``.
In this case we make the following function call
``loss.set_arg_map({"gen": "generator", "disc": "discriminator"})``.
"""
self.arg_map.update(value)
def train_ops(self, generator, discriminator, optimizer_discriminator,
real_inputs, device, labels=None):
"""Defines the standard ``train_ops`` used by most losses. Losses which have a different
training procedure can either ``subclass`` it **(recommended approach)** or make use of
``override_train_ops`` argument.
The ``standard optimization algorithm`` for the ``discriminator`` defined in this train_ops
is as follows:
1. :math:`fake = generator(noise)`
2. :math:`value_1 = discriminator(fake)`
3. :math:`value_2 = discriminator(real)`
4. :math:`loss = loss\\_function(value_1, value_2)`
5. Backpropagate by computing :math:`\\nabla loss`
6. Run a step of the optimizer for discriminator
Args:
generator (torchgan.models.Generator): The model to be optimized.
discriminator (torchgan.models.Discriminator): The discriminator which judges the
performance of the generator.
optimizer_discriminator (torch.optim.Optimizer): Optimizer which updates the ``parameters``
of the ``discriminator``.
real_inputs (torch.Tensor): The real data to be fed to the ``discriminator``.
device (torch.device): Device on which the ``generator`` and ``discriminator`` is present.
batch_size (int): Batch Size of the data infered from the ``DataLoader`` by the ``Trainer``.
labels (torch.Tensor, optional): Labels for the data.
Returns:
Scalar value of the loss.
"""
if self.override_train_ops is not None:
return self.override_train_ops(self, generator, discriminator,
optimizer_discriminator, real_inputs, device, labels)
else:
if labels is None and (generator.label_type == 'required' or
discriminator.label_type == 'required'):
raise Exception('GAN model requires labels for training')
batch_size = real_inputs.size(0)
noise = torch.randn(batch_size, generator.encoding_dims, device
=device)
if generator.label_type == 'generated':
label_gen = torch.randint(0, generator.num_classes, (
batch_size,), device=device)
optimizer_discriminator.zero_grad()
if discriminator.label_type == 'none':
dx = discriminator(real_inputs)
elif discriminator.label_type == 'required':
dx = discriminator(real_inputs, labels)
else:
dx = discriminator(real_inputs, label_gen)
if generator.label_type == 'none':
fake = generator(noise)
elif generator.label_type == 'required':
fake = generator(noise, labels)
else:
fake = generator(noise, label_gen)
if discriminator.label_type == 'none':
dgz = discriminator(fake.detach())
elif generator.label_type == 'generated':
dgz = discriminator(fake.detach(), label_gen)
else:
dgz = discriminator(fake.detach(), labels)
loss = self.forward(dx, dgz)
loss.backward()
optimizer_discriminator.step()
return loss.item()
class MinimaxDiscriminatorLoss(DiscriminatorLoss):
"""Minimax game discriminator loss from the original GAN paper `"Generative Adversarial Networks
by Goodfellow et. al." <https://arxiv.org/abs/1406.2661>`_
The loss can be described as:
.. math:: L(D) = -[log(D(x)) + log(1 - D(G(z)))]
where
- :math:`G` : Generator
- :math:`D` : Discriminator
- :math:`x` : A sample from the data distribution
- :math:`z` : A sample from the noise prior
Args:
label_smoothing (float, optional): The factor by which the labels (1 in this case) needs
to be smoothened. For example, label_smoothing = 0.2 changes the value of the real
labels to 0.8.
reduction (str, optional): Specifies the reduction to apply to the output.
If ``none`` no reduction will be applied. If ``mean`` the mean of the output.
If ``sum`` the elements of the output will be summed.
override_train_ops (function, optional): A function is passed to this argument,
if the default ``train_ops`` is not to be used.
"""
def __init__(self, label_smoothing=0.0, reduction='mean',
override_train_ops=None):
super(MinimaxDiscriminatorLoss, self).__init__(reduction,
override_train_ops)
self.label_smoothing = label_smoothing
def forward(self, dx, dgz):
"""Computes the loss for the given input.
Args:
dx (torch.Tensor) : Output of the Discriminator with real data. It must have the
dimensions (N, \\*) where \\* means any number of additional
dimensions.
dgz (torch.Tensor) : Output of the Discriminator with generated data. It must have the
dimensions (N, \\*) where \\* means any number of additional
dimensions.
Returns:
scalar if reduction is applied else Tensor with dimensions (N, \\*).
"""
return minimax_discriminator_loss(dx, dgz, label_smoothing=self.
label_smoothing, reduction=self.reduction)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_binary_cross_entropy_with_logits_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp13 = tl.load(in_ptr1 + r0, None)
tmp1 = 0.0
tmp2 = tmp1 * tmp0
tmp3 = triton_helpers.minimum(tmp1, tmp0)
tmp4 = tl_math.abs(tmp0)
tmp5 = -tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = libdevice.log1p(tmp6)
tmp8 = tmp3 - tmp7
tmp9 = tmp2 - tmp8
tmp10 = tl.broadcast_to(tmp9, [RBLOCK])
tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0))
tmp14 = triton_helpers.minimum(tmp1, tmp13)
tmp15 = tl_math.abs(tmp13)
tmp16 = -tmp15
tmp17 = tl_math.exp(tmp16)
tmp18 = libdevice.log1p(tmp17)
tmp19 = tmp14 - tmp18
tmp20 = tmp13 - tmp19
tmp21 = tl.broadcast_to(tmp20, [RBLOCK])
tmp23 = triton_helpers.promote_to_tensor(tl.sum(tmp21, 0))
tmp24 = 256.0
tmp25 = tmp12 / tmp24
tmp26 = tmp23 / tmp24
tmp27 = tmp25 + tmp26
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp27, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_binary_cross_entropy_with_logits_0[grid(1)](buf2,
arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf2,
def minimax_discriminator_loss(dx, dgz, label_smoothing=0.0, reduction='mean'):
target_ones = torch.ones_like(dgz) * (1.0 - label_smoothing)
target_zeros = torch.zeros_like(dx)
loss = F.binary_cross_entropy_with_logits(dx, target_ones, reduction=
reduction)
loss += F.binary_cross_entropy_with_logits(dgz, target_zeros, reduction
=reduction)
return loss
class DiscriminatorLoss(nn.Module):
"""Base class for all discriminator losses.
.. note:: All Losses meant to be minimized for optimizing the Discriminator must subclass this.
Args:
reduction (str, optional): Specifies the reduction to apply to the output.
If ``none`` no reduction will be applied. If ``mean`` the outputs are averaged over batch size.
If ``sum`` the elements of the output are summed.
override_train_ops (function, optional): Function to be used in place of the default ``train_ops``
"""
def __init__(self, reduction='mean', override_train_ops=None):
super(DiscriminatorLoss, self).__init__()
self.reduction = reduction
self.override_train_ops = override_train_ops
self.arg_map = {}
def set_arg_map(self, value):
"""Updates the ``arg_map`` for passing a different value to the ``train_ops``.
Args:
value (dict): A mapping of the ``argument name`` in the method signature and the
variable name in the ``Trainer`` it corresponds to.
.. note::
If the ``train_ops`` signature is
``train_ops(self, gen, disc, optimizer_discriminator, device, batch_size, labels=None)``
then we need to map ``gen`` to ``generator`` and ``disc`` to ``discriminator``.
In this case we make the following function call
``loss.set_arg_map({"gen": "generator", "disc": "discriminator"})``.
"""
self.arg_map.update(value)
def train_ops(self, generator, discriminator, optimizer_discriminator,
real_inputs, device, labels=None):
"""Defines the standard ``train_ops`` used by most losses. Losses which have a different
training procedure can either ``subclass`` it **(recommended approach)** or make use of
``override_train_ops`` argument.
The ``standard optimization algorithm`` for the ``discriminator`` defined in this train_ops
is as follows:
1. :math:`fake = generator(noise)`
2. :math:`value_1 = discriminator(fake)`
3. :math:`value_2 = discriminator(real)`
4. :math:`loss = loss\\_function(value_1, value_2)`
5. Backpropagate by computing :math:`\\nabla loss`
6. Run a step of the optimizer for discriminator
Args:
generator (torchgan.models.Generator): The model to be optimized.
discriminator (torchgan.models.Discriminator): The discriminator which judges the
performance of the generator.
optimizer_discriminator (torch.optim.Optimizer): Optimizer which updates the ``parameters``
of the ``discriminator``.
real_inputs (torch.Tensor): The real data to be fed to the ``discriminator``.
device (torch.device): Device on which the ``generator`` and ``discriminator`` is present.
batch_size (int): Batch Size of the data infered from the ``DataLoader`` by the ``Trainer``.
labels (torch.Tensor, optional): Labels for the data.
Returns:
Scalar value of the loss.
"""
if self.override_train_ops is not None:
return self.override_train_ops(self, generator, discriminator,
optimizer_discriminator, real_inputs, device, labels)
else:
if labels is None and (generator.label_type == 'required' or
discriminator.label_type == 'required'):
raise Exception('GAN model requires labels for training')
batch_size = real_inputs.size(0)
noise = torch.randn(batch_size, generator.encoding_dims, device
=device)
if generator.label_type == 'generated':
label_gen = torch.randint(0, generator.num_classes, (
batch_size,), device=device)
optimizer_discriminator.zero_grad()
if discriminator.label_type == 'none':
dx = discriminator(real_inputs)
elif discriminator.label_type == 'required':
dx = discriminator(real_inputs, labels)
else:
dx = discriminator(real_inputs, label_gen)
if generator.label_type == 'none':
fake = generator(noise)
elif generator.label_type == 'required':
fake = generator(noise, labels)
else:
fake = generator(noise, label_gen)
if discriminator.label_type == 'none':
dgz = discriminator(fake.detach())
elif generator.label_type == 'generated':
dgz = discriminator(fake.detach(), label_gen)
else:
dgz = discriminator(fake.detach(), labels)
loss = self.forward(dx, dgz)
loss.backward()
optimizer_discriminator.step()
return loss.item()
class MinimaxDiscriminatorLossNew(DiscriminatorLoss):
"""Minimax game discriminator loss from the original GAN paper `"Generative Adversarial Networks
by Goodfellow et. al." <https://arxiv.org/abs/1406.2661>`_
The loss can be described as:
.. math:: L(D) = -[log(D(x)) + log(1 - D(G(z)))]
where
- :math:`G` : Generator
- :math:`D` : Discriminator
- :math:`x` : A sample from the data distribution
- :math:`z` : A sample from the noise prior
Args:
label_smoothing (float, optional): The factor by which the labels (1 in this case) needs
to be smoothened. For example, label_smoothing = 0.2 changes the value of the real
labels to 0.8.
reduction (str, optional): Specifies the reduction to apply to the output.
If ``none`` no reduction will be applied. If ``mean`` the mean of the output.
If ``sum`` the elements of the output will be summed.
override_train_ops (function, optional): A function is passed to this argument,
if the default ``train_ops`` is not to be used.
"""
def __init__(self, label_smoothing=0.0, reduction='mean',
override_train_ops=None):
super(MinimaxDiscriminatorLossNew, self).__init__(reduction,
override_train_ops)
self.label_smoothing = label_smoothing
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
torchgan/torchgan
|
MinimaxDiscriminatorLoss
| false
| 16,609
|
[
"MIT"
] | 1,300
|
f4139537ac2d3d8609d5aecc859a6fb797b107a1
|
https://github.com/torchgan/torchgan/tree/f4139537ac2d3d8609d5aecc859a6fb797b107a1
|
VirtualBatchNorm
|
import torch
import torch.nn as nn
class VirtualBatchNorm(nn.Module):
"""Virtual Batch Normalization Module as proposed in the paper
`"Improved Techniques for Training GANs by Salimans et. al." <https://arxiv.org/abs/1805.08318>`_
Performs Normalizes the features of a batch based on the statistics collected on a reference
batch of samples that are chosen once and fixed from the start, as opposed to regular
batch normalization that uses the statistics of the batch being normalized
Virtual Batch Normalization requires that the size of the batch being normalized is at least
a multiple of (and ideally equal to) the size of the reference batch. Keep this in mind while
choosing the batch size in ```torch.utils.data.DataLoader``` or use ```drop_last=True```
.. math:: y = \\frac{x - \\mathrm{E}[x_{ref}]}{\\sqrt{\\mathrm{Var}[x_{ref}] + \\epsilon}} * \\gamma + \\beta
where
- :math:`x` : Batch Being Normalized
- :math:`x_{ref}` : Reference Batch
Args:
in_features (int): Size of the input dimension to be normalized
eps (float, optional): Value to be added to variance for numerical stability while normalizing
"""
def __init__(self, in_features, eps=1e-05):
super(VirtualBatchNorm, self).__init__()
self.in_features = in_features
self.scale = nn.Parameter(torch.ones(in_features))
self.bias = nn.Parameter(torch.zeros(in_features))
self.ref_mu = None
self.ref_var = None
self.eps = eps
def _batch_stats(self, x):
"""Computes the statistics of the batch ``x``.
Args:
x (torch.Tensor): Tensor whose statistics need to be computed.
Returns:
A tuple of the mean and variance of the batch ``x``.
"""
mu = torch.mean(x, dim=0, keepdim=True)
var = torch.var(x, dim=0, keepdim=True)
return mu, var
def _normalize(self, x, mu, var):
"""Normalizes the tensor ``x`` using the statistics ``mu`` and ``var``.
Args:
x (torch.Tensor): The Tensor to be normalized.
mu (torch.Tensor): Mean using which the Tensor is to be normalized.
var (torch.Tensor): Variance used in the normalization of ``x``.
Returns:
Normalized Tensor ``x``.
"""
std = torch.sqrt(self.eps + var)
x = (x - mu) / std
sizes = list(x.size())
for dim, i in enumerate(x.size()):
if dim != 1:
sizes[dim] = 1
scale = self.scale.view(*sizes)
bias = self.bias.view(*sizes)
return x * scale + bias
def forward(self, x, clear=True):
"""Computes the output of the Virtual Batch Normalization
Args:
x (torch.Tensor): A Torch Tensor of dimension at least 2 which is to be Normalized
Returns:
Torch Tensor of the same dimension after normalizing with respect to the statistics of the reference batch
"""
assert x.size(1) == self.in_features
if self.ref_mu is None or self.ref_var is None:
self.ref_mu, self.ref_var = self._batch_stats(x)
self.ref_mu = self.ref_mu.clone().detach()
self.ref_var = self.ref_var.clone().detach()
out = self._normalize(x, self.ref_mu, self.ref_var)
else:
out = self._normalize(x, self.ref_mu, self.ref_var)
if clear:
self.ref_mu = None
self.ref_var = None
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mean_var_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp3 = tl.load(in_ptr0 + (128 + x0), xmask)
tmp5 = tl.load(in_ptr0 + (192 + x0), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = 3.0
tmp21 = tmp19 / tmp20
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp21, xmask)
@triton.jit
def triton_poi_fused_add_div_mul_sqrt_sub_1(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex % 64
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 1e-05
tmp5 = tmp3 + tmp4
tmp6 = libdevice.sqrt(tmp5)
tmp7 = tmp2 / tmp6
tmp9 = tmp7 * tmp8
tmp11 = tmp9 + tmp10
tl.store(out_ptr0 + x3, tmp11, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((1, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_var_0[grid(64)](primals_1, buf0, buf1, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_mul_sqrt_sub_1[grid(256)](primals_1, buf0,
buf1, primals_2, primals_3, buf2, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_2
del primals_3
return buf2, buf1, buf0, primals_1, buf0, buf1
class VirtualBatchNormNew(nn.Module):
"""Virtual Batch Normalization Module as proposed in the paper
`"Improved Techniques for Training GANs by Salimans et. al." <https://arxiv.org/abs/1805.08318>`_
Performs Normalizes the features of a batch based on the statistics collected on a reference
batch of samples that are chosen once and fixed from the start, as opposed to regular
batch normalization that uses the statistics of the batch being normalized
Virtual Batch Normalization requires that the size of the batch being normalized is at least
a multiple of (and ideally equal to) the size of the reference batch. Keep this in mind while
choosing the batch size in ```torch.utils.data.DataLoader``` or use ```drop_last=True```
.. math:: y = \\frac{x - \\mathrm{E}[x_{ref}]}{\\sqrt{\\mathrm{Var}[x_{ref}] + \\epsilon}} * \\gamma + \\beta
where
- :math:`x` : Batch Being Normalized
- :math:`x_{ref}` : Reference Batch
Args:
in_features (int): Size of the input dimension to be normalized
eps (float, optional): Value to be added to variance for numerical stability while normalizing
"""
def __init__(self, in_features, eps=1e-05):
super(VirtualBatchNormNew, self).__init__()
self.in_features = in_features
self.scale = nn.Parameter(torch.ones(in_features))
self.bias = nn.Parameter(torch.zeros(in_features))
self.ref_mu = None
self.ref_var = None
self.eps = eps
def _batch_stats(self, x):
"""Computes the statistics of the batch ``x``.
Args:
x (torch.Tensor): Tensor whose statistics need to be computed.
Returns:
A tuple of the mean and variance of the batch ``x``.
"""
mu = torch.mean(x, dim=0, keepdim=True)
var = torch.var(x, dim=0, keepdim=True)
return mu, var
def _normalize(self, x, mu, var):
"""Normalizes the tensor ``x`` using the statistics ``mu`` and ``var``.
Args:
x (torch.Tensor): The Tensor to be normalized.
mu (torch.Tensor): Mean using which the Tensor is to be normalized.
var (torch.Tensor): Variance used in the normalization of ``x``.
Returns:
Normalized Tensor ``x``.
"""
std = torch.sqrt(self.eps + var)
x = (x - mu) / std
sizes = list(x.size())
for dim, i in enumerate(x.size()):
if dim != 1:
sizes[dim] = 1
scale = self.scale.view(*sizes)
bias = self.bias.view(*sizes)
return x * scale + bias
def forward(self, input_0):
primals_2 = self.scale
primals_3 = self.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
torchgan/torchgan
|
VirtualBatchNorm
| false
| 16,610
|
[
"MIT"
] | 1,300
|
f4139537ac2d3d8609d5aecc859a6fb797b107a1
|
https://github.com/torchgan/torchgan/tree/f4139537ac2d3d8609d5aecc859a6fb797b107a1
|
CosineFastRCNNOutputLayers
|
import math
import torch
import numpy as np
import torch.nn as nn
import torch.utils.data
from itertools import product as product
from math import sqrt as sqrt
import torch.nn
class NormalizedLinear(torch.nn.Module):
"""
A advanced Linear layer which supports weight normalization or cosine normalization.
"""
def __init__(self, in_features, out_features, bias=False, feat_norm=
True, scale_mode='learn', scale_init=1.0):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.feat_norm = feat_norm
self.scale_mode = scale_mode
self.scale_init = scale_init
self.weight = torch.nn.Parameter(torch.Tensor(out_features,
in_features))
if bias:
self.bias = torch.nn.Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
if self.scale_mode == 'constant':
self.scale = scale_init
elif self.scale_mode == 'learn':
self.scale = torch.nn.Parameter(torch.ones(1) * scale_init)
else:
raise NotImplementedError
def reset_parameters(self):
torch.nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = torch.nn.init._calculate_fan_in_and_fan_out(self.weight
)
bound = 1 / math.sqrt(fan_in)
torch.nn.init.uniform_(self.bias, -bound, bound)
def forward(self, inputs):
"""
Args:
inputs (torch.Tensor): (N, C)
Return:
output (torch.Tensor): (N, D)
"""
if self.feat_norm:
inputs = torch.nn.functional.normalize(inputs, dim=1)
output = inputs.mm(torch.nn.functional.normalize(self.weight, dim=1
).t())
output = self.scale * output
return output
def extra_repr(self):
s = 'in_features={in_features}, out_features={out_features}'
if self.bias is None:
s += ', bias=False'
s += ', feat_norm={feat_norm}'
s += ', scale_mode={scale_mode}'
s += ', scale_init={scale_init}'
return s.format(**self.__dict__)
class CosineFastRCNNOutputLayers(nn.Module):
"""
Two linear layers for predicting Fast R-CNN outputs:
(1) proposal-to-detection box regression deltas
(2) classification scores
"""
def __init__(self, input_size, num_classes, cls_agnostic_bbox_reg,
box_dim=4, scale_mode='learn', scale_init=20.0):
"""
Args:
input_size (int): channels, or (channels, height, width)
num_classes (int): number of foreground classes
cls_agnostic_bbox_reg (bool): whether to use class agnostic for bbox regression
box_dim (int): the dimension of bounding boxes.
Example box dimensions: 4 for regular XYXY boxes and 5 for rotated XYWHA boxes
"""
super(CosineFastRCNNOutputLayers, self).__init__()
if not isinstance(input_size, int):
input_size = np.prod(input_size)
self.cls_score = NormalizedLinear(input_size, num_classes + 1,
scale_mode=scale_mode, scale_init=scale_init)
num_bbox_reg_classes = 1 if cls_agnostic_bbox_reg else num_classes
self.bbox_pred = nn.Linear(input_size, num_bbox_reg_classes * box_dim)
nn.init.normal_(self.cls_score.weight, std=0.01)
nn.init.normal_(self.bbox_pred.weight, std=0.001)
for layer in [self.cls_score, self.bbox_pred]:
if layer.bias is not None:
nn.init.constant_(layer.bias, 0)
def forward(self, x):
if x.dim() > 2:
x = torch.flatten(x, start_dim=1)
scores = self.cls_score(x)
proposal_deltas = self.bbox_pred(x)
return scores, proposal_deltas
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'num_classes': 4, 'cls_agnostic_bbox_reg': 4}
]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import math
import numpy as np
import torch.nn as nn
import torch.utils.data
from itertools import product as product
from math import sqrt as sqrt
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 20
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 20
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (5, 4), (4, 1))
assert_size_stride(primals_3, (1,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(16)](primals_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((5, 4), (4, 1), torch.float32)
triton_poi_fused_div_1[grid(20)](primals_2, buf1, 20, XBLOCK=32,
num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 5), (5, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(buf1, (4, 5), (1, 4), 0),
out=buf2)
buf3 = reinterpret_tensor(buf1, (4, 5), (5, 1), 0)
del buf1
triton_poi_fused_mul_2[grid(20)](primals_3, buf2, buf3, 20, XBLOCK=
32, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, primals_1, reinterpret_tensor(
primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_4
del primals_5
return buf3, buf4, primals_1, primals_2, primals_3, buf0, buf2
class NormalizedLinear(torch.nn.Module):
"""
A advanced Linear layer which supports weight normalization or cosine normalization.
"""
def __init__(self, in_features, out_features, bias=False, feat_norm=
True, scale_mode='learn', scale_init=1.0):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.feat_norm = feat_norm
self.scale_mode = scale_mode
self.scale_init = scale_init
self.weight = torch.nn.Parameter(torch.Tensor(out_features,
in_features))
if bias:
self.bias = torch.nn.Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
if self.scale_mode == 'constant':
self.scale = scale_init
elif self.scale_mode == 'learn':
self.scale = torch.nn.Parameter(torch.ones(1) * scale_init)
else:
raise NotImplementedError
def reset_parameters(self):
torch.nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = torch.nn.init._calculate_fan_in_and_fan_out(self.weight
)
bound = 1 / math.sqrt(fan_in)
torch.nn.init.uniform_(self.bias, -bound, bound)
def forward(self, inputs):
"""
Args:
inputs (torch.Tensor): (N, C)
Return:
output (torch.Tensor): (N, D)
"""
if self.feat_norm:
inputs = torch.nn.functional.normalize(inputs, dim=1)
output = inputs.mm(torch.nn.functional.normalize(self.weight, dim=1
).t())
output = self.scale * output
return output
def extra_repr(self):
s = 'in_features={in_features}, out_features={out_features}'
if self.bias is None:
s += ', bias=False'
s += ', feat_norm={feat_norm}'
s += ', scale_mode={scale_mode}'
s += ', scale_init={scale_init}'
return s.format(**self.__dict__)
class CosineFastRCNNOutputLayersNew(nn.Module):
"""
Two linear layers for predicting Fast R-CNN outputs:
(1) proposal-to-detection box regression deltas
(2) classification scores
"""
def __init__(self, input_size, num_classes, cls_agnostic_bbox_reg,
box_dim=4, scale_mode='learn', scale_init=20.0):
"""
Args:
input_size (int): channels, or (channels, height, width)
num_classes (int): number of foreground classes
cls_agnostic_bbox_reg (bool): whether to use class agnostic for bbox regression
box_dim (int): the dimension of bounding boxes.
Example box dimensions: 4 for regular XYXY boxes and 5 for rotated XYWHA boxes
"""
super(CosineFastRCNNOutputLayersNew, self).__init__()
if not isinstance(input_size, int):
input_size = np.prod(input_size)
self.cls_score = NormalizedLinear(input_size, num_classes + 1,
scale_mode=scale_mode, scale_init=scale_init)
num_bbox_reg_classes = 1 if cls_agnostic_bbox_reg else num_classes
self.bbox_pred = nn.Linear(input_size, num_bbox_reg_classes * box_dim)
nn.init.normal_(self.cls_score.weight, std=0.01)
nn.init.normal_(self.bbox_pred.weight, std=0.001)
for layer in [self.cls_score, self.bbox_pred]:
if layer.bias is not None:
nn.init.constant_(layer.bias, 0)
def forward(self, input_0):
primals_2 = self.cls_score.weight
primals_3 = self.cls_score.scale
primals_1 = self.bbox_pred.weight
primals_5 = self.bbox_pred.bias
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1]
|
tonysy/cvpods
|
CosineFastRCNNOutputLayers
| false
| 16,611
|
[
"Apache-2.0"
] | 548
|
e322d7842ca0e34b1ef6237ea6d350633efc793a
|
https://github.com/tonysy/cvpods/tree/e322d7842ca0e34b1ef6237ea6d350633efc793a
|
MinibatchDiscrimination1d
|
import torch
import torch.nn as nn
class MinibatchDiscrimination1d(nn.Module):
"""1D Minibatch Discrimination Module as proposed in the paper `"Improved Techniques for
Training GANs by Salimans et. al." <https://arxiv.org/abs/1805.08318>`_
Allows the Discriminator to easily detect mode collapse by augmenting the activations to the succeeding
layer with side information that allows it to determine the 'closeness' of the minibatch examples
with each other
.. math :: M_i = T * f(x_{i})
.. math :: c_b(x_{i}, x_{j}) = \\exp(-||M_{i, b} - M_{j, b}||_1) \\in \\mathbb{R}.
.. math :: o(x_{i})_b &= \\sum_{j=1}^{n} c_b(x_{i},x_{j}) \\in \\mathbb{R} \\\\
.. math :: o(x_{i}) &= \\Big[ o(x_{i})_1, o(x_{i})_2, \\dots, o(x_{i})_B \\Big] \\in \\mathbb{R}^B \\\\
.. math :: o(X) \\in \\mathbb{R}^{n \\times B}
This is followed by concatenating :math:`o(x_{i})` and :math:`f(x_{i})`
where
- :math:`f(x_{i}) \\in \\mathbb{R}^A` : Activations from an intermediate layer
- :math:`f(x_{i}) \\in \\mathbb{R}^A` : Parameter Tensor for generating minibatch discrimination matrix
Args:
in_features (int): Features input corresponding to dimension :math:`A`
out_features (int): Number of output features that are to be concatenated corresponding to dimension :math:`B`
intermediate_features (int): Intermediate number of features corresponding to dimension :math:`C`
Returns:
A Tensor of size :math:`(N, in_features + out_features)` where :math:`N` is the batch size
"""
def __init__(self, in_features, out_features, intermediate_features=16):
super(MinibatchDiscrimination1d, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.intermediate_features = intermediate_features
self.T = nn.Parameter(torch.Tensor(in_features, out_features,
intermediate_features))
nn.init.normal_(self.T)
def forward(self, x):
"""Computes the output of the Minibatch Discrimination Layer
Args:
x (torch.Tensor): A Torch Tensor of dimensions :math: `(N, infeatures)`
Returns:
3D Torch Tensor of size :math: `(N,infeatures + outfeatures)` after applying Minibatch Discrimination
"""
M = torch.mm(x, self.T.view(self.in_features, -1))
M = M.view(-1, self.out_features, self.intermediate_features
).unsqueeze(0)
M_t = M.permute(1, 0, 2, 3)
out = torch.sum(torch.exp(-torch.abs(M - M_t).sum(3)), dim=0) - 1
return torch.cat([x, out], 1)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_abs_exp_neg_sub_sum_0(in_out_ptr0, in_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r3 = rindex
x5 = xindex % 16
x0 = xindex % 4
x2 = xindex // 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r3 + 16 * x5), xmask, eviction_policy=
'evict_last', other=0.0)
tmp1 = tl.load(in_ptr0 + (r3 + 16 * x0 + 64 * x2), xmask,
eviction_policy='evict_last', other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + x4, tmp9, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.load(in_ptr1 + (16 + 4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.load(in_ptr1 + (32 + 4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.load(in_ptr1 + (48 + 4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp15 = tmp13 + tmp14
tmp16 = 1.0
tmp17 = tmp15 - tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp6, tmp17, tmp18)
tmp20 = tl.where(tmp4, tmp5, tmp19)
tl.store(out_ptr0 + x2, tmp20, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 16), (64, 16, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
extern_kernels.mm(primals_2, reinterpret_tensor(primals_1, (4, 64),
(64, 1), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_abs_exp_neg_sub_sum_0[grid(64)](buf2, buf0, 64, 16,
XBLOCK=1, num_warps=2, num_stages=1)
buf3 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
triton_poi_fused_cat_1[grid(32)](primals_2, buf2, buf3, 32, XBLOCK=
32, num_warps=1, num_stages=1)
return buf3, buf0, buf2, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0)
class MinibatchDiscrimination1dNew(nn.Module):
"""1D Minibatch Discrimination Module as proposed in the paper `"Improved Techniques for
Training GANs by Salimans et. al." <https://arxiv.org/abs/1805.08318>`_
Allows the Discriminator to easily detect mode collapse by augmenting the activations to the succeeding
layer with side information that allows it to determine the 'closeness' of the minibatch examples
with each other
.. math :: M_i = T * f(x_{i})
.. math :: c_b(x_{i}, x_{j}) = \\exp(-||M_{i, b} - M_{j, b}||_1) \\in \\mathbb{R}.
.. math :: o(x_{i})_b &= \\sum_{j=1}^{n} c_b(x_{i},x_{j}) \\in \\mathbb{R} \\\\
.. math :: o(x_{i}) &= \\Big[ o(x_{i})_1, o(x_{i})_2, \\dots, o(x_{i})_B \\Big] \\in \\mathbb{R}^B \\\\
.. math :: o(X) \\in \\mathbb{R}^{n \\times B}
This is followed by concatenating :math:`o(x_{i})` and :math:`f(x_{i})`
where
- :math:`f(x_{i}) \\in \\mathbb{R}^A` : Activations from an intermediate layer
- :math:`f(x_{i}) \\in \\mathbb{R}^A` : Parameter Tensor for generating minibatch discrimination matrix
Args:
in_features (int): Features input corresponding to dimension :math:`A`
out_features (int): Number of output features that are to be concatenated corresponding to dimension :math:`B`
intermediate_features (int): Intermediate number of features corresponding to dimension :math:`C`
Returns:
A Tensor of size :math:`(N, in_features + out_features)` where :math:`N` is the batch size
"""
def __init__(self, in_features, out_features, intermediate_features=16):
super(MinibatchDiscrimination1dNew, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.intermediate_features = intermediate_features
self.T = nn.Parameter(torch.Tensor(in_features, out_features,
intermediate_features))
nn.init.normal_(self.T)
def forward(self, input_0):
primals_1 = self.T
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
torchgan/torchgan
|
MinibatchDiscrimination1d
| false
| 16,612
|
[
"MIT"
] | 1,300
|
f4139537ac2d3d8609d5aecc859a6fb797b107a1
|
https://github.com/torchgan/torchgan/tree/f4139537ac2d3d8609d5aecc859a6fb797b107a1
|
GaussMembFunc
|
import torch
def _mk_param(val):
"""Make a torch parameter from a scalar value"""
if isinstance(val, torch.Tensor):
val = val.item()
return torch.nn.Parameter(torch.tensor(val, dtype=torch.float))
class GaussMembFunc(torch.nn.Module):
"""
Gaussian membership functions, defined by two parameters:
mu, the mean (center)
sigma, the standard deviation.
"""
def __init__(self, mu, sigma):
super(GaussMembFunc, self).__init__()
self.register_parameter('mu', _mk_param(mu))
self.register_parameter('sigma', _mk_param(sigma))
def forward(self, x):
val = torch.exp(-torch.pow(x - self.mu, 2) / (2 * self.sigma ** 2))
return val
def pretty(self):
return 'GaussMembFunc {} {}'.format(self.mu, self.sigma)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'mu': 4, 'sigma': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_exp_mul_neg_pow_sub_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp6 = tl.load(in_ptr2 + 0)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp3 = tmp0 - tmp2
tmp4 = tmp3 * tmp3
tmp5 = -tmp4
tmp8 = tmp7 * tmp7
tmp9 = 2.0
tmp10 = tmp8 * tmp9
tmp11 = tmp5 / tmp10
tmp12 = tl_math.exp(tmp11)
tl.store(out_ptr0 + x0, tmp12, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (), ())
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (), ())
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_exp_mul_neg_pow_sub_0[grid(256)](primals_2,
primals_1, primals_3, buf0, 256, XBLOCK=128, num_warps=4,
num_stages=1)
return buf0, primals_1, primals_2, primals_3, buf0
def _mk_param(val):
"""Make a torch parameter from a scalar value"""
if isinstance(val, torch.Tensor):
val = val.item()
return torch.nn.Parameter(torch.tensor(val, dtype=torch.float))
class GaussMembFuncNew(torch.nn.Module):
"""
Gaussian membership functions, defined by two parameters:
mu, the mean (center)
sigma, the standard deviation.
"""
def __init__(self, mu, sigma):
super(GaussMembFuncNew, self).__init__()
self.register_parameter('mu', _mk_param(mu))
self.register_parameter('sigma', _mk_param(sigma))
def pretty(self):
return 'GaussMembFunc {} {}'.format(self.mu, self.sigma)
def forward(self, input_0):
primals_1 = self.mu
primals_3 = self.sigma
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
trituenhantaoio/anfis-pytorch
|
GaussMembFunc
| false
| 16,613
|
[
"MIT"
] | 66
|
7a6bf123d69b550e46abeddd5b4a776243d43aa6
|
https://github.com/trituenhantaoio/anfis-pytorch/tree/7a6bf123d69b550e46abeddd5b4a776243d43aa6
|
DisAlignCosineFastRCNNOutputLayers
|
import math
import torch
import numpy as np
import torch.nn as nn
import torch.utils.data
from itertools import product as product
from math import sqrt as sqrt
import torch.nn
def cat(tensors, dim=0):
"""
Efficient version of torch.cat that avoids a copy if there is only a single element in a list
"""
assert isinstance(tensors, (list, tuple))
if len(tensors) == 1:
return tensors[0]
return torch.cat(tensors, dim)
class NormalizedLinear(torch.nn.Module):
"""
A advanced Linear layer which supports weight normalization or cosine normalization.
"""
def __init__(self, in_features, out_features, bias=False, feat_norm=
True, scale_mode='learn', scale_init=1.0):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.feat_norm = feat_norm
self.scale_mode = scale_mode
self.scale_init = scale_init
self.weight = torch.nn.Parameter(torch.Tensor(out_features,
in_features))
if bias:
self.bias = torch.nn.Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
if self.scale_mode == 'constant':
self.scale = scale_init
elif self.scale_mode == 'learn':
self.scale = torch.nn.Parameter(torch.ones(1) * scale_init)
else:
raise NotImplementedError
def reset_parameters(self):
torch.nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = torch.nn.init._calculate_fan_in_and_fan_out(self.weight
)
bound = 1 / math.sqrt(fan_in)
torch.nn.init.uniform_(self.bias, -bound, bound)
def forward(self, inputs):
"""
Args:
inputs (torch.Tensor): (N, C)
Return:
output (torch.Tensor): (N, D)
"""
if self.feat_norm:
inputs = torch.nn.functional.normalize(inputs, dim=1)
output = inputs.mm(torch.nn.functional.normalize(self.weight, dim=1
).t())
output = self.scale * output
return output
def extra_repr(self):
s = 'in_features={in_features}, out_features={out_features}'
if self.bias is None:
s += ', bias=False'
s += ', feat_norm={feat_norm}'
s += ', scale_mode={scale_mode}'
s += ', scale_init={scale_init}'
return s.format(**self.__dict__)
class DisAlignCosineFastRCNNOutputLayers(nn.Module):
"""
Two linear layers for predicting Fast R-CNN outputs:
(1) proposal-to-detection box regression deltas
(2) classification scores
"""
def __init__(self, input_size, num_classes, cls_agnostic_bbox_reg,
box_dim=4, scale_mode='learn', scale_init=20.0):
"""
Args:
input_size (int): channels, or (channels, height, width)
num_classes (int): number of foreground classes
cls_agnostic_bbox_reg (bool): whether to use class agnostic for bbox regression
box_dim (int): the dimension of bounding boxes.
Example box dimensions: 4 for regular XYXY boxes and 5 for rotated XYWHA boxes
"""
super(DisAlignCosineFastRCNNOutputLayers, self).__init__()
if not isinstance(input_size, int):
input_size = np.prod(input_size)
self.cls_score = NormalizedLinear(input_size, num_classes + 1,
scale_mode=scale_mode, scale_init=scale_init)
num_bbox_reg_classes = 1 if cls_agnostic_bbox_reg else num_classes
self.bbox_pred = nn.Linear(input_size, num_bbox_reg_classes * box_dim)
self.logit_scale = nn.Parameter(torch.ones(num_classes))
self.logit_bias = nn.Parameter(torch.zeros(num_classes))
self.confidence_layer = nn.Linear(input_size, 1)
nn.init.normal_(self.cls_score.weight, std=0.01)
nn.init.normal_(self.confidence_layer.weight, std=0.01)
nn.init.normal_(self.bbox_pred.weight, std=0.001)
for layer in [self.cls_score, self.confidence_layer, self.bbox_pred]:
if layer.bias is not None:
nn.init.constant_(layer.bias, 0)
def forward(self, x):
if x.dim() > 2:
x = torch.flatten(x, start_dim=1)
scores = self.cls_score(x)
confidence = self.confidence_layer(x).sigmoid()
scores_tmp = confidence * (scores[:, :-1] * self.logit_scale + self
.logit_bias)
scores_tmp = scores_tmp + (1 - confidence) * scores[:, :-1]
aligned_scores = cat([scores_tmp, scores[:, -1].view(-1, 1)], dim=1)
proposal_deltas = self.bbox_pred(x)
return aligned_scores, proposal_deltas
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'num_classes': 4, 'cls_agnostic_bbox_reg': 4}
]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import math
import numpy as np
import torch.nn as nn
import torch.utils.data
from itertools import product as product
from math import sqrt as sqrt
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 20
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 20
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = xindex // 5
x2 = xindex
tmp7 = tl.load(in_ptr1 + 0)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tl.sigmoid(tmp5)
tmp9 = tl.load(in_ptr2 + (5 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp10 = tmp8 * tmp9
tmp11 = tl.load(in_ptr3 + x0, tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp12 = tmp10 * tmp11
tmp13 = tl.load(in_ptr4 + x0, tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp14 = tmp12 + tmp13
tmp15 = tmp6 * tmp14
tmp16 = 1.0
tmp17 = tmp16 - tmp6
tmp18 = tmp17 * tmp10
tmp19 = tmp15 + tmp18
tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype)
tmp21 = tl.where(tmp4, tmp19, tmp20)
tmp22 = tmp0 >= tmp3
tl.full([1], 5, tl.int64)
tmp25 = tl.load(in_ptr2 + (4 + 5 * x1), tmp22 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp26 = tmp8 * tmp25
tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype)
tmp28 = tl.where(tmp22, tmp26, tmp27)
tmp29 = tl.where(tmp4, tmp21, tmp28)
tl.store(out_ptr0 + x2, tmp29, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (5, 4), (4, 1))
assert_size_stride(primals_3, (1,), (1,))
assert_size_stride(primals_4, (1, 4), (4, 1))
assert_size_stride(primals_5, (1,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(16)](primals_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((5, 4), (4, 1), torch.float32)
triton_poi_fused_div_1[grid(20)](primals_2, buf1, 20, XBLOCK=32,
num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 5), (5, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(buf1, (4, 5), (1, 4), 0),
out=buf2)
buf4 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_5, primals_1, reinterpret_tensor(
primals_4, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_4
del primals_5
buf5 = reinterpret_tensor(buf1, (4, 5), (5, 1), 0)
del buf1
triton_poi_fused_cat_2[grid(20)](buf4, primals_3, buf2, primals_6,
primals_7, buf5, 20, XBLOCK=32, num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, primals_1, reinterpret_tensor(
primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf6)
del primals_8
del primals_9
return (buf5, buf6, primals_1, primals_2, primals_3, primals_6,
primals_7, buf0, buf2, buf4)
def cat(tensors, dim=0):
"""
Efficient version of torch.cat that avoids a copy if there is only a single element in a list
"""
assert isinstance(tensors, (list, tuple))
if len(tensors) == 1:
return tensors[0]
return torch.cat(tensors, dim)
class NormalizedLinear(torch.nn.Module):
"""
A advanced Linear layer which supports weight normalization or cosine normalization.
"""
def __init__(self, in_features, out_features, bias=False, feat_norm=
True, scale_mode='learn', scale_init=1.0):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.feat_norm = feat_norm
self.scale_mode = scale_mode
self.scale_init = scale_init
self.weight = torch.nn.Parameter(torch.Tensor(out_features,
in_features))
if bias:
self.bias = torch.nn.Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
if self.scale_mode == 'constant':
self.scale = scale_init
elif self.scale_mode == 'learn':
self.scale = torch.nn.Parameter(torch.ones(1) * scale_init)
else:
raise NotImplementedError
def reset_parameters(self):
torch.nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = torch.nn.init._calculate_fan_in_and_fan_out(self.weight
)
bound = 1 / math.sqrt(fan_in)
torch.nn.init.uniform_(self.bias, -bound, bound)
def forward(self, inputs):
"""
Args:
inputs (torch.Tensor): (N, C)
Return:
output (torch.Tensor): (N, D)
"""
if self.feat_norm:
inputs = torch.nn.functional.normalize(inputs, dim=1)
output = inputs.mm(torch.nn.functional.normalize(self.weight, dim=1
).t())
output = self.scale * output
return output
def extra_repr(self):
s = 'in_features={in_features}, out_features={out_features}'
if self.bias is None:
s += ', bias=False'
s += ', feat_norm={feat_norm}'
s += ', scale_mode={scale_mode}'
s += ', scale_init={scale_init}'
return s.format(**self.__dict__)
class DisAlignCosineFastRCNNOutputLayersNew(nn.Module):
"""
Two linear layers for predicting Fast R-CNN outputs:
(1) proposal-to-detection box regression deltas
(2) classification scores
"""
def __init__(self, input_size, num_classes, cls_agnostic_bbox_reg,
box_dim=4, scale_mode='learn', scale_init=20.0):
"""
Args:
input_size (int): channels, or (channels, height, width)
num_classes (int): number of foreground classes
cls_agnostic_bbox_reg (bool): whether to use class agnostic for bbox regression
box_dim (int): the dimension of bounding boxes.
Example box dimensions: 4 for regular XYXY boxes and 5 for rotated XYWHA boxes
"""
super(DisAlignCosineFastRCNNOutputLayersNew, self).__init__()
if not isinstance(input_size, int):
input_size = np.prod(input_size)
self.cls_score = NormalizedLinear(input_size, num_classes + 1,
scale_mode=scale_mode, scale_init=scale_init)
num_bbox_reg_classes = 1 if cls_agnostic_bbox_reg else num_classes
self.bbox_pred = nn.Linear(input_size, num_bbox_reg_classes * box_dim)
self.logit_scale = nn.Parameter(torch.ones(num_classes))
self.logit_bias = nn.Parameter(torch.zeros(num_classes))
self.confidence_layer = nn.Linear(input_size, 1)
nn.init.normal_(self.cls_score.weight, std=0.01)
nn.init.normal_(self.confidence_layer.weight, std=0.01)
nn.init.normal_(self.bbox_pred.weight, std=0.001)
for layer in [self.cls_score, self.confidence_layer, self.bbox_pred]:
if layer.bias is not None:
nn.init.constant_(layer.bias, 0)
def forward(self, input_0):
primals_6 = self.logit_scale
primals_7 = self.logit_bias
primals_2 = self.cls_score.weight
primals_3 = self.cls_score.scale
primals_1 = self.bbox_pred.weight
primals_9 = self.bbox_pred.bias
primals_4 = self.confidence_layer.weight
primals_5 = self.confidence_layer.bias
primals_8 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0], output[1]
|
tonysy/cvpods
|
DisAlignCosineFastRCNNOutputLayers
| false
| 16,615
|
[
"Apache-2.0"
] | 548
|
e322d7842ca0e34b1ef6237ea6d350633efc793a
|
https://github.com/tonysy/cvpods/tree/e322d7842ca0e34b1ef6237ea6d350633efc793a
|
Policy
|
import copy
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
def square(a):
return torch.pow(a, 2.0)
class Policy(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(Policy, self).__init__()
self.affine1 = nn.Linear(num_inputs, 64)
self.affine2 = nn.Linear(64, 64)
self.action_mean = nn.Linear(64, num_outputs)
self.action_mean.weight.data.mul_(0.1)
self.action_mean.bias.data.mul_(0.0)
self.action_log_std = nn.Parameter(torch.zeros(1, num_outputs))
self.module_list_current = [self.affine1, self.affine2, self.
action_mean, self.action_log_std]
self.module_list_old = [None] * len(self.module_list_current)
self.backup()
def backup(self):
for i in range(len(self.module_list_current)):
self.module_list_old[i] = copy.deepcopy(self.module_list_current[i]
)
def kl_div_p_q(self, p_mean, p_std, q_mean, q_std):
"""KL divergence D_{KL}[p(x)||q(x)] for a fully factorized Gaussian"""
numerator = square(p_mean - q_mean) + square(p_std) - square(q_std)
denominator = 2.0 * square(q_std) + eps
return torch.sum(numerator / denominator + torch.log(q_std) - torch
.log(p_std))
def kl_old_new(self):
"""Gives kld from old params to new params"""
kl_div = self.kl_div_p_q(self.module_list_old[-2], self.
module_list_old[-1], self.action_mean, self.action_log_std)
return kl_div
def entropy(self):
"""Gives entropy of current defined prob dist"""
ent = torch.sum(self.action_log_std + 0.5 * torch.log(2.0 * np.pi *
np.e))
return ent
def forward(self, x, old=False):
if old:
x = F.tanh(self.module_list_old[0](x))
x = F.tanh(self.module_list_old[1](x))
action_mean = self.module_list_old[2](x)
action_log_std = self.module_list_old[3].expand_as(action_mean)
action_std = torch.exp(action_log_std)
else:
x = F.tanh(self.affine1(x))
x = F.tanh(self.affine2(x))
action_mean = self.action_mean(x)
action_log_std = self.action_log_std.expand_as(action_mean)
action_std = torch.exp(action_log_std)
return action_mean, action_log_std, action_std
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_inputs': 4, 'num_outputs': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import copy
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, None)
@triton.jit
def triton_poi_fused_exp_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl_math.exp(tmp0)
tl.store(out_ptr0 + x2, tmp1, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (64, 4), (4, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (64, 64), (64, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (4, 64), (64, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (1, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(4096)](buf1, primals_2, 4096, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0),
reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf2
triton_poi_fused_tanh_0[grid(4096)](buf3, primals_5, 4096, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64),
(64, 1), 0), reinterpret_tensor(primals_6, (64, 4), (1, 64), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_exp_1[grid(256)](primals_8, buf5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_8, (4, 4, 4, 4), (0, 0, 0, 1), 0
), buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, buf3, buf5, primals_6, primals_4
def square(a):
return torch.pow(a, 2.0)
class PolicyNew(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(PolicyNew, self).__init__()
self.affine1 = nn.Linear(num_inputs, 64)
self.affine2 = nn.Linear(64, 64)
self.action_mean = nn.Linear(64, num_outputs)
self.action_mean.weight.data.mul_(0.1)
self.action_mean.bias.data.mul_(0.0)
self.action_log_std = nn.Parameter(torch.zeros(1, num_outputs))
self.module_list_current = [self.affine1, self.affine2, self.
action_mean, self.action_log_std]
self.module_list_old = [None] * len(self.module_list_current)
self.backup()
def backup(self):
for i in range(len(self.module_list_current)):
self.module_list_old[i] = copy.deepcopy(self.module_list_current[i]
)
def kl_div_p_q(self, p_mean, p_std, q_mean, q_std):
"""KL divergence D_{KL}[p(x)||q(x)] for a fully factorized Gaussian"""
numerator = square(p_mean - q_mean) + square(p_std) - square(q_std)
denominator = 2.0 * square(q_std) + eps
return torch.sum(numerator / denominator + torch.log(q_std) - torch
.log(p_std))
def kl_old_new(self):
"""Gives kld from old params to new params"""
kl_div = self.kl_div_p_q(self.module_list_old[-2], self.
module_list_old[-1], self.action_mean, self.action_log_std)
return kl_div
def entropy(self):
"""Gives entropy of current defined prob dist"""
ent = torch.sum(self.action_log_std + 0.5 * torch.log(2.0 * np.pi *
np.e))
return ent
def forward(self, input_0):
primals_8 = self.action_log_std
primals_1 = self.affine1.weight
primals_2 = self.affine1.bias
primals_4 = self.affine2.weight
primals_5 = self.affine2.bias
primals_6 = self.action_mean.weight
primals_7 = self.action_mean.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0], output[1], output[2]
|
tpbarron/pytorch-ppo
|
Policy
| false
| 16,616
|
[
"MIT"
] | 47
|
f73226865e34443f93dbec58939329c9278828e8
|
https://github.com/tpbarron/pytorch-ppo/tree/f73226865e34443f93dbec58939329c9278828e8
|
ActorCritic
|
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
class ActorCritic(nn.Module):
def __init__(self, num_inputs, num_outputs, hidden=64):
super(ActorCritic, self).__init__()
self.affine1 = nn.Linear(num_inputs, hidden)
self.affine2 = nn.Linear(hidden, hidden)
self.action_mean = nn.Linear(hidden, num_outputs)
self.action_mean.weight.data.mul_(0.1)
self.action_mean.bias.data.mul_(0.0)
self.action_log_std = nn.Parameter(torch.zeros(1, num_outputs))
self.value_head = nn.Linear(hidden, 1)
self.module_list_current = [self.affine1, self.affine2, self.
action_mean, self.action_log_std, self.value_head]
self.module_list_old = [None] * len(self.module_list_current)
self.backup()
def backup(self):
for i in range(len(self.module_list_current)):
self.module_list_old[i] = copy.deepcopy(self.module_list_current[i]
)
def forward(self, x, old=False):
if old:
x = F.tanh(self.module_list_old[0](x))
x = F.tanh(self.module_list_old[1](x))
action_mean = self.module_list_old[2](x)
action_log_std = self.module_list_old[3].expand_as(action_mean)
action_std = torch.exp(action_log_std)
value = self.module_list_old[4](x)
else:
x = F.tanh(self.affine1(x))
x = F.tanh(self.affine2(x))
action_mean = self.action_mean(x)
action_log_std = self.action_log_std.expand_as(action_mean)
action_std = torch.exp(action_log_std)
value = self.value_head(x)
return action_mean, action_log_std, action_std, value
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_inputs': 4, 'num_outputs': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import copy
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, None)
@triton.jit
def triton_poi_fused_exp_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl_math.exp(tmp0)
tl.store(out_ptr0 + x2, tmp1, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (64, 4), (4, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (64, 64), (64, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (4, 64), (64, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (1, 4), (4, 1))
assert_size_stride(primals_9, (1, 64), (64, 1))
assert_size_stride(primals_10, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(4096)](buf1, primals_2, 4096, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0),
reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf2
triton_poi_fused_tanh_0[grid(4096)](buf3, primals_5, 4096, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64),
(64, 1), 0), reinterpret_tensor(primals_6, (64, 4), (1, 64), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_exp_1[grid(256)](primals_8, buf5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_10, reinterpret_tensor(buf3, (64, 64),
(64, 1), 0), reinterpret_tensor(primals_9, (64, 1), (1, 64), 0),
alpha=1, beta=1, out=buf7)
del primals_10
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_8, (4, 4, 4, 4), (0, 0, 0, 1), 0
), buf5, reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, buf3, buf5, primals_9, primals_6, primals_4
class ActorCriticNew(nn.Module):
def __init__(self, num_inputs, num_outputs, hidden=64):
super(ActorCriticNew, self).__init__()
self.affine1 = nn.Linear(num_inputs, hidden)
self.affine2 = nn.Linear(hidden, hidden)
self.action_mean = nn.Linear(hidden, num_outputs)
self.action_mean.weight.data.mul_(0.1)
self.action_mean.bias.data.mul_(0.0)
self.action_log_std = nn.Parameter(torch.zeros(1, num_outputs))
self.value_head = nn.Linear(hidden, 1)
self.module_list_current = [self.affine1, self.affine2, self.
action_mean, self.action_log_std, self.value_head]
self.module_list_old = [None] * len(self.module_list_current)
self.backup()
def backup(self):
for i in range(len(self.module_list_current)):
self.module_list_old[i] = copy.deepcopy(self.module_list_current[i]
)
def forward(self, input_0):
primals_8 = self.action_log_std
primals_1 = self.affine1.weight
primals_2 = self.affine1.bias
primals_4 = self.affine2.weight
primals_5 = self.affine2.bias
primals_6 = self.action_mean.weight
primals_7 = self.action_mean.bias
primals_9 = self.value_head.weight
primals_10 = self.value_head.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0], output[1], output[2], output[3]
|
tpbarron/pytorch-ppo
|
ActorCritic
| false
| 16,617
|
[
"MIT"
] | 47
|
f73226865e34443f93dbec58939329c9278828e8
|
https://github.com/tpbarron/pytorch-ppo/tree/f73226865e34443f93dbec58939329c9278828e8
|
BellMembFunc
|
import torch
def _mk_param(val):
"""Make a torch parameter from a scalar value"""
if isinstance(val, torch.Tensor):
val = val.item()
return torch.nn.Parameter(torch.tensor(val, dtype=torch.float))
class BellMembFunc(torch.nn.Module):
"""
Generalised Bell membership function; defined by three parameters:
a, the half-width (at the crossover point)
b, controls the slope at the crossover point (which is -b/2a)
c, the center point
"""
def __init__(self, a, b, c):
super(BellMembFunc, self).__init__()
self.register_parameter('a', _mk_param(a))
self.register_parameter('b', _mk_param(b))
self.register_parameter('c', _mk_param(c))
self.b.register_hook(BellMembFunc.b_log_hook)
@staticmethod
def b_log_hook(grad):
"""
Possibility of a log(0) in the grad for b, giving a nan.
Fix this by replacing any nan in the grad with ~0.
"""
grad[torch.isnan(grad)] = 1e-09
return grad
def forward(self, x):
dist = torch.pow((x - self.c) / self.a, 2)
return torch.reciprocal(1 + torch.pow(dist, self.b))
def pretty(self):
return 'BellMembFunc {} {} {}'.format(self.a, self.b, self.c)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'a': 4, 'b': 4, 'c': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_pow_reciprocal_sub_0(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp8 = tl.load(in_ptr3 + 0)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp3 = tmp0 - tmp2
tmp6 = tmp3 / tmp5
tmp7 = tmp6 * tmp6
tmp10 = libdevice.pow(tmp7, tmp9)
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tl.full([1], 1, tl.int32)
tmp14 = tmp13 / tmp12
tl.store(out_ptr0 + x0, tmp14, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (), ())
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (), ())
assert_size_stride(primals_4, (), ())
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_pow_reciprocal_sub_0[grid(256)](primals_2,
primals_1, primals_3, primals_4, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
return buf0, primals_1, primals_2, primals_3, primals_4, buf0
def _mk_param(val):
"""Make a torch parameter from a scalar value"""
if isinstance(val, torch.Tensor):
val = val.item()
return torch.nn.Parameter(torch.tensor(val, dtype=torch.float))
class BellMembFuncNew(torch.nn.Module):
"""
Generalised Bell membership function; defined by three parameters:
a, the half-width (at the crossover point)
b, controls the slope at the crossover point (which is -b/2a)
c, the center point
"""
def __init__(self, a, b, c):
super(BellMembFuncNew, self).__init__()
self.register_parameter('a', _mk_param(a))
self.register_parameter('b', _mk_param(b))
self.register_parameter('c', _mk_param(c))
self.b.register_hook(BellMembFuncNew.b_log_hook)
@staticmethod
def b_log_hook(grad):
"""
Possibility of a log(0) in the grad for b, giving a nan.
Fix this by replacing any nan in the grad with ~0.
"""
grad[torch.isnan(grad)] = 1e-09
return grad
def pretty(self):
return 'BellMembFunc {} {} {}'.format(self.a, self.b, self.c)
def forward(self, input_0):
primals_1 = self.a
primals_3 = self.b
primals_4 = self.c
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
trituenhantaoio/anfis-pytorch
|
BellMembFunc
| false
| 16,618
|
[
"MIT"
] | 66
|
7a6bf123d69b550e46abeddd5b4a776243d43aa6
|
https://github.com/trituenhantaoio/anfis-pytorch/tree/7a6bf123d69b550e46abeddd5b4a776243d43aa6
|
DataEmbedding_wo_pos
|
import math
import torch
import torch.nn as nn
class PositionalEmbedding(nn.Module):
def __init__(self, d_model, max_len=5000):
super(PositionalEmbedding, self).__init__()
pe = torch.zeros(max_len, d_model).float()
pe.require_grad = False
position = torch.arange(0, max_len).float().unsqueeze(1)
div_term = (torch.arange(0, d_model, 2).float() * -(math.log(
10000.0) / d_model)).exp()
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
return self.pe[:, :x.size(1)]
class TokenEmbedding(nn.Module):
def __init__(self, c_in, d_model):
super(TokenEmbedding, self).__init__()
padding = 1 if torch.__version__ >= '1.5.0' else 2
self.tokenConv = nn.Conv1d(in_channels=c_in, out_channels=d_model,
kernel_size=3, padding=padding, padding_mode='circular', bias=False
)
for m in self.modules():
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight, mode='fan_in',
nonlinearity='leaky_relu')
def forward(self, x):
x = self.tokenConv(x.permute(0, 2, 1)).transpose(1, 2)
return x
class FixedEmbedding(nn.Module):
def __init__(self, c_in, d_model):
super(FixedEmbedding, self).__init__()
w = torch.zeros(c_in, d_model).float()
w.require_grad = False
position = torch.arange(0, c_in).float().unsqueeze(1)
div_term = (torch.arange(0, d_model, 2).float() * -(math.log(
10000.0) / d_model)).exp()
w[:, 0::2] = torch.sin(position * div_term)
w[:, 1::2] = torch.cos(position * div_term)
self.emb = nn.Embedding(c_in, d_model)
self.emb.weight = nn.Parameter(w, requires_grad=False)
def forward(self, x):
return self.emb(x).detach()
class TemporalEmbedding(nn.Module):
def __init__(self, d_model, embed_type='fixed', freq='h'):
super(TemporalEmbedding, self).__init__()
minute_size = 4
hour_size = 24
weekday_size = 7
day_size = 32
month_size = 13
Embed = FixedEmbedding if embed_type == 'fixed' else nn.Embedding
if freq == 't':
self.minute_embed = Embed(minute_size, d_model)
self.hour_embed = Embed(hour_size, d_model)
self.weekday_embed = Embed(weekday_size, d_model)
self.day_embed = Embed(day_size, d_model)
self.month_embed = Embed(month_size, d_model)
def forward(self, x):
x = x.long()
minute_x = self.minute_embed(x[:, :, 4]) if hasattr(self,
'minute_embed') else 0.0
hour_x = self.hour_embed(x[:, :, 3])
weekday_x = self.weekday_embed(x[:, :, 2])
day_x = self.day_embed(x[:, :, 1])
month_x = self.month_embed(x[:, :, 0])
return hour_x + weekday_x + day_x + month_x + minute_x
class TimeFeatureEmbedding(nn.Module):
def __init__(self, d_model, embed_type='timeF', freq='h'):
super(TimeFeatureEmbedding, self).__init__()
freq_map = {'h': 4, 't': 5, 's': 6, 'm': 1, 'a': 1, 'w': 2, 'd': 3,
'b': 3}
d_inp = freq_map[freq]
self.embed = nn.Linear(d_inp, d_model, bias=False)
def forward(self, x):
return self.embed(x)
class DataEmbedding_wo_pos(nn.Module):
def __init__(self, c_in, d_model, embed_type='fixed', freq='h', dropout=0.1
):
super(DataEmbedding_wo_pos, self).__init__()
self.value_embedding = TokenEmbedding(c_in=c_in, d_model=d_model)
self.position_embedding = PositionalEmbedding(d_model=d_model)
self.temporal_embedding = TemporalEmbedding(d_model=d_model,
embed_type=embed_type, freq=freq
) if embed_type != 'timeF' else TimeFeatureEmbedding(d_model=
d_model, embed_type=embed_type, freq=freq)
self.dropout = nn.Dropout(p=dropout)
def forward(self, x, x_mark):
x = self.value_embedding(x) + self.temporal_embedding(x_mark)
return self.dropout(x)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'c_in': 4, 'd_model': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_copy_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 24
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
y0 = yindex % 6
x2 = xindex
y1 = yindex // 6
tmp0 = y0
tmp1 = tl.full([1, 1], 5, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.broadcast_to(-4 + y0, [XBLOCK, YBLOCK])
tmp4 = tl.full([1, 1], 1, tl.int64)
tmp5 = tmp3 < tmp4
tmp6 = tmp5 & tmp2
tmp7 = tl.broadcast_to(y0, [XBLOCK, YBLOCK])
tmp8 = tmp7 >= tmp4
tmp9 = tmp7 < tmp1
tmp10 = tmp8 & tmp9
tmp11 = tmp10 & tmp6
tmp12 = tl.load(in_ptr0 + (-4 + x2 + 4 * y0 + 16 * y1), tmp11 & xmask &
ymask, eviction_policy='evict_last', other=0.0)
tmp13 = float('nan')
tmp14 = tl.where(tmp10, tmp12, tmp13)
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp6, tmp14, tmp15)
tmp17 = tmp3 >= tmp4
tmp18 = tmp3 < tmp1
tmp19 = tmp17 & tmp18
tmp20 = tmp19 & tmp2
tmp21 = tl.load(in_ptr0 + (-20 + x2 + 4 * y0 + 16 * y1), tmp20 & xmask &
ymask, eviction_policy='evict_last', other=0.0)
tmp22 = tl.where(tmp19, tmp21, tmp13)
tmp23 = tl.where(tmp5, tmp16, tmp22)
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp2, tmp23, tmp24)
tmp26 = tmp0 < tmp4
tmp27 = tl.broadcast_to(4 + y0, [XBLOCK, YBLOCK])
tmp28 = tmp27 >= tmp4
tmp29 = tmp27 < tmp1
tmp30 = tmp28 & tmp29
tmp31 = tmp30 & tmp26
tmp32 = tl.load(in_ptr0 + (12 + x2 + 4 * y0 + 16 * y1), tmp31 & xmask &
ymask, eviction_policy='evict_last', other=0.0)
tmp33 = tl.where(tmp30, tmp32, tmp13)
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp26, tmp33, tmp34)
tmp36 = tmp0 >= tmp4
tmp37 = tmp0 < tmp1
tmp38 = tmp36 & tmp37
tmp39 = tl.load(in_ptr0 + (-4 + x2 + 4 * y0 + 16 * y1), tmp38 & xmask &
ymask, eviction_policy='evict_last', other=0.0)
tmp40 = tl.where(tmp38, tmp39, tmp13)
tmp41 = tl.where(tmp26, tmp35, tmp40)
tmp42 = tl.where(tmp2, tmp25, tmp41)
tl.store(out_ptr0 + (y0 + 6 * x2 + 24 * y1), tmp42, xmask & ymask)
@triton.jit
def triton_poi_fused_add_embedding_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * x2), xmask, eviction_policy
='evict_last')
tmp9 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * x2), xmask, eviction_policy
='evict_last')
tmp18 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp27 = tl.load(in_ptr0 + (4 * x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1.to(tl.int64)
tmp3 = tl.full([XBLOCK], 24, tl.int32)
tmp4 = tmp2 + tmp3
tmp5 = tmp2 < 0
tmp6 = tl.where(tmp5, tmp4, tmp2)
tl.device_assert((0 <= tmp6) & (tmp6 < 24) | ~xmask,
'index out of bounds: 0 <= tmp6 < 24')
tmp8 = tl.load(in_ptr1 + (x1 + 4 * tmp6), xmask, eviction_policy=
'evict_last')
tmp10 = tmp9.to(tl.int64)
tmp11 = tl.full([XBLOCK], 7, tl.int32)
tmp12 = tmp10 + tmp11
tmp13 = tmp10 < 0
tmp14 = tl.where(tmp13, tmp12, tmp10)
tl.device_assert((0 <= tmp14) & (tmp14 < 7) | ~xmask,
'index out of bounds: 0 <= tmp14 < 7')
tmp16 = tl.load(in_ptr2 + (x1 + 4 * tmp14), xmask, eviction_policy=
'evict_last')
tmp17 = tmp8 + tmp16
tmp19 = tmp18.to(tl.int64)
tmp20 = tl.full([XBLOCK], 32, tl.int32)
tmp21 = tmp19 + tmp20
tmp22 = tmp19 < 0
tmp23 = tl.where(tmp22, tmp21, tmp19)
tl.device_assert((0 <= tmp23) & (tmp23 < 32) | ~xmask,
'index out of bounds: 0 <= tmp23 < 32')
tmp25 = tl.load(in_ptr3 + (x1 + 4 * tmp23), xmask, eviction_policy=
'evict_last')
tmp26 = tmp17 + tmp25
tmp28 = tmp27.to(tl.int64)
tmp29 = tl.full([XBLOCK], 13, tl.int32)
tmp30 = tmp28 + tmp29
tmp31 = tmp28 < 0
tmp32 = tl.where(tmp31, tmp30, tmp28)
tl.device_assert((0 <= tmp32) & (tmp32 < 13) | ~xmask,
'index out of bounds: 0 <= tmp32 < 13')
tmp34 = tl.load(in_ptr4 + (x1 + 4 * tmp32), xmask, eviction_policy=
'evict_last')
tmp35 = tmp26 + tmp34
tmp36 = 0.0
tmp37 = tmp35 + tmp36
tmp38 = tmp0 + tmp37
tl.store(in_out_ptr0 + x3, tmp38, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3), (12, 3, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (24, 4), (4, 1))
assert_size_stride(primals_5, (7, 4), (4, 1))
assert_size_stride(primals_6, (32, 4), (4, 1))
assert_size_stride(primals_7, (13, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 6), (24, 6, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_copy_0[grid(24, 4)](primals_1, buf1, 24, 4, XBLOCK
=4, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4), (16, 4, 1))
buf3 = reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0)
del buf2
triton_poi_fused_add_embedding_1[grid(64)](buf3, primals_3,
primals_4, primals_5, primals_6, primals_7, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_3
del primals_4
del primals_5
del primals_6
del primals_7
return buf3, primals_2, buf1
class PositionalEmbedding(nn.Module):
def __init__(self, d_model, max_len=5000):
super(PositionalEmbedding, self).__init__()
pe = torch.zeros(max_len, d_model).float()
pe.require_grad = False
position = torch.arange(0, max_len).float().unsqueeze(1)
div_term = (torch.arange(0, d_model, 2).float() * -(math.log(
10000.0) / d_model)).exp()
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
return self.pe[:, :x.size(1)]
class TokenEmbedding(nn.Module):
def __init__(self, c_in, d_model):
super(TokenEmbedding, self).__init__()
padding = 1 if torch.__version__ >= '1.5.0' else 2
self.tokenConv = nn.Conv1d(in_channels=c_in, out_channels=d_model,
kernel_size=3, padding=padding, padding_mode='circular', bias=False
)
for m in self.modules():
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight, mode='fan_in',
nonlinearity='leaky_relu')
def forward(self, x):
x = self.tokenConv(x.permute(0, 2, 1)).transpose(1, 2)
return x
class FixedEmbedding(nn.Module):
def __init__(self, c_in, d_model):
super(FixedEmbedding, self).__init__()
w = torch.zeros(c_in, d_model).float()
w.require_grad = False
position = torch.arange(0, c_in).float().unsqueeze(1)
div_term = (torch.arange(0, d_model, 2).float() * -(math.log(
10000.0) / d_model)).exp()
w[:, 0::2] = torch.sin(position * div_term)
w[:, 1::2] = torch.cos(position * div_term)
self.emb = nn.Embedding(c_in, d_model)
self.emb.weight = nn.Parameter(w, requires_grad=False)
def forward(self, x):
return self.emb(x).detach()
class TemporalEmbedding(nn.Module):
def __init__(self, d_model, embed_type='fixed', freq='h'):
super(TemporalEmbedding, self).__init__()
minute_size = 4
hour_size = 24
weekday_size = 7
day_size = 32
month_size = 13
Embed = FixedEmbedding if embed_type == 'fixed' else nn.Embedding
if freq == 't':
self.minute_embed = Embed(minute_size, d_model)
self.hour_embed = Embed(hour_size, d_model)
self.weekday_embed = Embed(weekday_size, d_model)
self.day_embed = Embed(day_size, d_model)
self.month_embed = Embed(month_size, d_model)
def forward(self, x):
x = x.long()
minute_x = self.minute_embed(x[:, :, 4]) if hasattr(self,
'minute_embed') else 0.0
hour_x = self.hour_embed(x[:, :, 3])
weekday_x = self.weekday_embed(x[:, :, 2])
day_x = self.day_embed(x[:, :, 1])
month_x = self.month_embed(x[:, :, 0])
return hour_x + weekday_x + day_x + month_x + minute_x
class TimeFeatureEmbedding(nn.Module):
def __init__(self, d_model, embed_type='timeF', freq='h'):
super(TimeFeatureEmbedding, self).__init__()
freq_map = {'h': 4, 't': 5, 's': 6, 'm': 1, 'a': 1, 'w': 2, 'd': 3,
'b': 3}
d_inp = freq_map[freq]
self.embed = nn.Linear(d_inp, d_model, bias=False)
def forward(self, x):
return self.embed(x)
class DataEmbedding_wo_posNew(nn.Module):
def __init__(self, c_in, d_model, embed_type='fixed', freq='h', dropout=0.1
):
super(DataEmbedding_wo_posNew, self).__init__()
self.value_embedding = TokenEmbedding(c_in=c_in, d_model=d_model)
self.position_embedding = PositionalEmbedding(d_model=d_model)
self.temporal_embedding = TemporalEmbedding(d_model=d_model,
embed_type=embed_type, freq=freq
) if embed_type != 'timeF' else TimeFeatureEmbedding(d_model=
d_model, embed_type=embed_type, freq=freq)
self.dropout = nn.Dropout(p=dropout)
def forward(self, input_0, input_1):
primals_2 = self.value_embedding.tokenConv.weight
primals_4 = self.temporal_embedding.hour_embed.emb.weight
primals_5 = self.temporal_embedding.weekday_embed.emb.weight
primals_6 = self.temporal_embedding.day_embed.emb.weight
primals_7 = self.temporal_embedding.month_embed.emb.weight
primals_1 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
thuml/Autoformer
|
DataEmbedding_wo_pos
| false
| 16,619
|
[
"MIT"
] | 263
|
6bf300d0bf3e7f3cb4d795dd8ed14ede2000a9ab
|
https://github.com/thuml/Autoformer/tree/6bf300d0bf3e7f3cb4d795dd8ed14ede2000a9ab
|
UpsampleConv
|
import torch
import torch.nn.functional as F
import torch.nn as nn
def l2normalize(v, esp=1e-08):
return v / (v.norm() + esp)
def sn_weight(weight, u, height, n_power_iterations):
weight.requires_grad_(False)
for _ in range(n_power_iterations):
v = l2normalize(torch.mv(weight.view(height, -1).t(), u))
u = l2normalize(torch.mv(weight.view(height, -1), v))
weight.requires_grad_(True)
sigma = u.dot(weight.view(height, -1).mv(v))
return torch.div(weight, sigma), u
def get_nonlinearity(nonlinearity=None):
if not nonlinearity:
pass
elif callable(nonlinearity):
if nonlinearity == nn.LeakyReLU:
nonlinearity = nonlinearity(0.02, inplace=True)
elif hasattr(nn, nonlinearity):
nonlinearity = getattr(nn, nonlinearity)
if nonlinearity == 'LeakyReLU':
nonlinearity = nonlinearity(0.02, inplace=True)
else:
nonlinearity = nonlinearity()
elif hasattr(nn.functional, nonlinearity):
nonlinearity = getattr(nn.functional, nonlinearity)
else:
raise ValueError(nonlinearity)
return nonlinearity
class SNConv2d(nn.Conv2d):
def __init__(self, *args, n_power_iterations=1, **kwargs):
super(SNConv2d, self).__init__(*args, **kwargs)
self.n_power_iterations = n_power_iterations
self.height = self.weight.shape[0]
self.register_buffer('u', l2normalize(self.weight.new_empty(self.
height).normal_(0, 1)))
def forward(self, input):
w_sn, self.u = sn_weight(self.weight, self.u, self.height, self.
n_power_iterations)
return F.conv2d(input, w_sn, self.bias, self.stride, self.padding,
self.dilation, self.groups)
class UpsampleConv(nn.Module):
def __init__(self, dim_in, dim_out, f_size, nonlinearity=None, prefix=
'', spectral_norm=False):
super(UpsampleConv, self).__init__()
Conv2d = SNConv2d if spectral_norm else nn.Conv2d
models = nn.Sequential()
nonlinearity = get_nonlinearity(nonlinearity)
name = prefix + '_usc'
models.add_module(name + '_up', nn.Upsample(scale_factor=2))
models.add_module(name, Conv2d(dim_in, dim_out, f_size, 1, 1, bias=
False))
if nonlinearity:
models.add_module('{}_{}'.format(name, nonlinearity.__class__.
__name__), nonlinearity)
self.models = models
def forward(self, x):
x = self.models(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim_in': 4, 'dim_out': 4, 'f_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn.functional as F
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 8 % 8
x0 = xindex % 8
x2 = xindex // 64
x4 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tmp5 = x0
tmp6 = tmp5.to(tl.float32)
tmp7 = tmp6 * tmp2
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.load(in_ptr0 + (tmp8 + 4 * tmp4 + 16 * x2), xmask,
eviction_policy='evict_last')
tl.store(out_ptr0 + x4, tmp9, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__unsafe_index_0[grid(1024)](primals_1, buf0, 1024,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 7, 7), (196, 49, 7, 1))
return buf1, primals_2, buf0
def l2normalize(v, esp=1e-08):
return v / (v.norm() + esp)
def sn_weight(weight, u, height, n_power_iterations):
weight.requires_grad_(False)
for _ in range(n_power_iterations):
v = l2normalize(torch.mv(weight.view(height, -1).t(), u))
u = l2normalize(torch.mv(weight.view(height, -1), v))
weight.requires_grad_(True)
sigma = u.dot(weight.view(height, -1).mv(v))
return torch.div(weight, sigma), u
def get_nonlinearity(nonlinearity=None):
if not nonlinearity:
pass
elif callable(nonlinearity):
if nonlinearity == nn.LeakyReLU:
nonlinearity = nonlinearity(0.02, inplace=True)
elif hasattr(nn, nonlinearity):
nonlinearity = getattr(nn, nonlinearity)
if nonlinearity == 'LeakyReLU':
nonlinearity = nonlinearity(0.02, inplace=True)
else:
nonlinearity = nonlinearity()
elif hasattr(nn.functional, nonlinearity):
nonlinearity = getattr(nn.functional, nonlinearity)
else:
raise ValueError(nonlinearity)
return nonlinearity
class SNConv2d(nn.Conv2d):
def __init__(self, *args, n_power_iterations=1, **kwargs):
super(SNConv2d, self).__init__(*args, **kwargs)
self.n_power_iterations = n_power_iterations
self.height = self.weight.shape[0]
self.register_buffer('u', l2normalize(self.weight.new_empty(self.
height).normal_(0, 1)))
def forward(self, input):
w_sn, self.u = sn_weight(self.weight, self.u, self.height, self.
n_power_iterations)
return F.conv2d(input, w_sn, self.bias, self.stride, self.padding,
self.dilation, self.groups)
class UpsampleConvNew(nn.Module):
def __init__(self, dim_in, dim_out, f_size, nonlinearity=None, prefix=
'', spectral_norm=False):
super(UpsampleConvNew, self).__init__()
Conv2d = SNConv2d if spectral_norm else nn.Conv2d
models = nn.Sequential()
nonlinearity = get_nonlinearity(nonlinearity)
name = prefix + '_usc'
models.add_module(name + '_up', nn.Upsample(scale_factor=2))
models.add_module(name, Conv2d(dim_in, dim_out, f_size, 1, 1, bias=
False))
if nonlinearity:
models.add_module('{}_{}'.format(name, nonlinearity.__class__.
__name__), nonlinearity)
self.models = models
def forward(self, input_0):
primals_1 = self.models._usc.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
tsirif/cortex
|
UpsampleConv
| false
| 16,620
|
[
"BSD-3-Clause"
] | 109
|
2837b220f9fb73279df3815bb18b274106412c08
|
https://github.com/tsirif/cortex/tree/2837b220f9fb73279df3815bb18b274106412c08
|
DQN_RAM
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class DQN_RAM(nn.Module):
def __init__(self, in_features=4, num_actions=18):
"""
Initialize a deep Q-learning network for testing algorithm
in_features: number of features of input.
num_actions: number of action-value to output, one-to-one correspondence to action in game.
"""
super(DQN_RAM, self).__init__()
self.fc1 = nn.Linear(in_features, 256)
self.fc2 = nn.Linear(256, 128)
self.fc3 = nn.Linear(128, 64)
self.fc4 = nn.Linear(64, num_actions)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
return self.fc4(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (128, 256), (256, 1))
assert_size_stride(primals_5, (128,), (1,))
assert_size_stride(primals_6, (64, 128), (128, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (18, 64), (64, 1))
assert_size_stride(primals_9, (18,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf0
buf9 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf1,
primals_2, buf9, 16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0),
reinterpret_tensor(primals_4, (256, 128), (1, 256), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf2
buf8 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(8192)](buf3,
primals_5, buf8, 8192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 128), (128, 1), 0),
reinterpret_tensor(primals_6, (128, 64), (1, 128), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf4
buf7 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool
)
triton_poi_fused_relu_threshold_backward_2[grid(4096)](buf5,
primals_7, buf7, 4096, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((64, 18), (18, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 64),
(64, 1), 0), reinterpret_tensor(primals_8, (64, 18), (1, 64), 0
), alpha=1, beta=1, out=buf6)
del primals_9
return reinterpret_tensor(buf6, (4, 4, 4, 18), (288, 72, 18, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 256), (256, 1), 0
), reinterpret_tensor(buf3, (64, 128), (128, 1), 0
), reinterpret_tensor(buf5, (64, 64), (64, 1), 0
), primals_8, buf7, primals_6, buf8, primals_4, buf9
class DQN_RAMNew(nn.Module):
def __init__(self, in_features=4, num_actions=18):
"""
Initialize a deep Q-learning network for testing algorithm
in_features: number of features of input.
num_actions: number of action-value to output, one-to-one correspondence to action in game.
"""
super(DQN_RAMNew, self).__init__()
self.fc1 = nn.Linear(in_features, 256)
self.fc2 = nn.Linear(256, 128)
self.fc3 = nn.Linear(128, 64)
self.fc4 = nn.Linear(64, num_actions)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_8 = self.fc4.weight
primals_9 = self.fc4.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
transedward/pytoch-dqn
|
DQN_RAM
| false
| 16,621
|
[
"MIT"
] | 358
|
1ffda6f3724b3bb37c3195b09b651b1682d4d4fd
|
https://github.com/transedward/pytoch-dqn/tree/1ffda6f3724b3bb37c3195b09b651b1682d4d4fd
|
MySimpleNet
|
import torch
import torch.nn.functional as F
from torch import nn
class MySimpleNet(nn.Module):
"""
Very simple 2-layer net, slightly adapted from the docs:
https://skorch.readthedocs.io/en/stable/user/quickstart.html
"""
def __init__(self, num_in, num_feat, num_hidden=10, nonlin=F.relu):
super(MySimpleNet, self).__init__()
self.dense0 = nn.Linear(num_in, num_hidden)
self.nonlin = nonlin
self.dropout = nn.Dropout(0.5)
self.dense1 = nn.Linear(num_hidden, num_feat)
self.output = nn.Linear(num_feat, 2)
def forward(self, X, **kwargs):
X = self.nonlin(self.dense0(X))
X = self.dropout(X)
X = F.relu(self.dense1(X))
X = F.softmax(self.output(X))
return X
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_in': 4, 'num_feat': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn.functional as F
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 640
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 10
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 8
x2 = xindex // 32
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 32 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (8 + x0 + 32 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (16 + x0 + 32 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (24 + x0 + 32 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 8
x2 = xindex // 32
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 32 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (8 + x0 + 32 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (16 + x0 + 32 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (24 + x0 + 32 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (10, 4), (4, 1))
assert_size_stride(primals_2, (10,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 10), (10, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (2, 4), (4, 1))
assert_size_stride(primals_7, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 10), (10, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 10), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 10), (160, 40, 10, 1), 0)
del buf0
buf8 = empty_strided_cuda((4, 4, 4, 10), (160, 40, 10, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(640)](buf1,
primals_2, buf8, 640, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 10), (10, 1), 0),
reinterpret_tensor(primals_4, (10, 4), (1, 10), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(256)](buf3,
primals_5, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 2), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
triton_poi_fused__softmax_2[grid(128)](buf4, buf5, 128, XBLOCK=128,
num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 2), (32, 8, 2, 1), 0)
del buf4
triton_poi_fused__softmax_3[grid(128)](buf5, buf6, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del buf5
return buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 10), (10, 1), 0), reinterpret_tensor(
buf3, (64, 4), (4, 1), 0), buf6, primals_6, buf7, primals_4, buf8
class MySimpleNetNew(nn.Module):
"""
Very simple 2-layer net, slightly adapted from the docs:
https://skorch.readthedocs.io/en/stable/user/quickstart.html
"""
def __init__(self, num_in, num_feat, num_hidden=10, nonlin=F.relu):
super(MySimpleNetNew, self).__init__()
self.dense0 = nn.Linear(num_in, num_hidden)
self.nonlin = nonlin
self.dropout = nn.Dropout(0.5)
self.dense1 = nn.Linear(num_hidden, num_feat)
self.output = nn.Linear(num_feat, 2)
def forward(self, input_0):
primals_1 = self.dense0.weight
primals_2 = self.dense0.bias
primals_4 = self.dense1.weight
primals_5 = self.dense1.bias
primals_6 = self.output.weight
primals_7 = self.output.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
trituenhantaoio/anfis-pytorch
|
MySimpleNet
| false
| 16,622
|
[
"MIT"
] | 66
|
7a6bf123d69b550e46abeddd5b4a776243d43aa6
|
https://github.com/trituenhantaoio/anfis-pytorch/tree/7a6bf123d69b550e46abeddd5b4a776243d43aa6
|
MeanPoolConv
|
import torch
import torch.nn.functional as F
import torch.nn as nn
def l2normalize(v, esp=1e-08):
return v / (v.norm() + esp)
def sn_weight(weight, u, height, n_power_iterations):
weight.requires_grad_(False)
for _ in range(n_power_iterations):
v = l2normalize(torch.mv(weight.view(height, -1).t(), u))
u = l2normalize(torch.mv(weight.view(height, -1), v))
weight.requires_grad_(True)
sigma = u.dot(weight.view(height, -1).mv(v))
return torch.div(weight, sigma), u
def get_nonlinearity(nonlinearity=None):
if not nonlinearity:
pass
elif callable(nonlinearity):
if nonlinearity == nn.LeakyReLU:
nonlinearity = nonlinearity(0.02, inplace=True)
elif hasattr(nn, nonlinearity):
nonlinearity = getattr(nn, nonlinearity)
if nonlinearity == 'LeakyReLU':
nonlinearity = nonlinearity(0.02, inplace=True)
else:
nonlinearity = nonlinearity()
elif hasattr(nn.functional, nonlinearity):
nonlinearity = getattr(nn.functional, nonlinearity)
else:
raise ValueError(nonlinearity)
return nonlinearity
class SNConv2d(nn.Conv2d):
def __init__(self, *args, n_power_iterations=1, **kwargs):
super(SNConv2d, self).__init__(*args, **kwargs)
self.n_power_iterations = n_power_iterations
self.height = self.weight.shape[0]
self.register_buffer('u', l2normalize(self.weight.new_empty(self.
height).normal_(0, 1)))
def forward(self, input):
w_sn, self.u = sn_weight(self.weight, self.u, self.height, self.
n_power_iterations)
return F.conv2d(input, w_sn, self.bias, self.stride, self.padding,
self.dilation, self.groups)
class MeanPoolConv(nn.Module):
def __init__(self, dim_in, dim_out, f_size, nonlinearity=None, prefix=
'', spectral_norm=False):
super(MeanPoolConv, self).__init__()
Conv2d = SNConv2d if spectral_norm else nn.Conv2d
models = nn.Sequential()
nonlinearity = get_nonlinearity(nonlinearity)
name = 'mpc' + prefix
models.add_module(name + '_pool', nn.AvgPool2d(2, count_include_pad
=False))
models.add_module(name, Conv2d(dim_in, dim_out, f_size, 1, 1, bias=
False))
if nonlinearity:
models.add_module('{}_{}'.format(name, nonlinearity.__class__.
__name__), nonlinearity)
self.models = models
def forward(self, x):
x = self.models(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim_in': 4, 'dim_out': 4, 'f_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn.functional as F
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_avg_pool2d_0[grid(64)](primals_1, buf0, 64, XBLOCK
=64, num_warps=1, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1))
return buf1, primals_2, buf0
def l2normalize(v, esp=1e-08):
return v / (v.norm() + esp)
def sn_weight(weight, u, height, n_power_iterations):
weight.requires_grad_(False)
for _ in range(n_power_iterations):
v = l2normalize(torch.mv(weight.view(height, -1).t(), u))
u = l2normalize(torch.mv(weight.view(height, -1), v))
weight.requires_grad_(True)
sigma = u.dot(weight.view(height, -1).mv(v))
return torch.div(weight, sigma), u
def get_nonlinearity(nonlinearity=None):
if not nonlinearity:
pass
elif callable(nonlinearity):
if nonlinearity == nn.LeakyReLU:
nonlinearity = nonlinearity(0.02, inplace=True)
elif hasattr(nn, nonlinearity):
nonlinearity = getattr(nn, nonlinearity)
if nonlinearity == 'LeakyReLU':
nonlinearity = nonlinearity(0.02, inplace=True)
else:
nonlinearity = nonlinearity()
elif hasattr(nn.functional, nonlinearity):
nonlinearity = getattr(nn.functional, nonlinearity)
else:
raise ValueError(nonlinearity)
return nonlinearity
class SNConv2d(nn.Conv2d):
def __init__(self, *args, n_power_iterations=1, **kwargs):
super(SNConv2d, self).__init__(*args, **kwargs)
self.n_power_iterations = n_power_iterations
self.height = self.weight.shape[0]
self.register_buffer('u', l2normalize(self.weight.new_empty(self.
height).normal_(0, 1)))
def forward(self, input):
w_sn, self.u = sn_weight(self.weight, self.u, self.height, self.
n_power_iterations)
return F.conv2d(input, w_sn, self.bias, self.stride, self.padding,
self.dilation, self.groups)
class MeanPoolConvNew(nn.Module):
def __init__(self, dim_in, dim_out, f_size, nonlinearity=None, prefix=
'', spectral_norm=False):
super(MeanPoolConvNew, self).__init__()
Conv2d = SNConv2d if spectral_norm else nn.Conv2d
models = nn.Sequential()
nonlinearity = get_nonlinearity(nonlinearity)
name = 'mpc' + prefix
models.add_module(name + '_pool', nn.AvgPool2d(2, count_include_pad
=False))
models.add_module(name, Conv2d(dim_in, dim_out, f_size, 1, 1, bias=
False))
if nonlinearity:
models.add_module('{}_{}'.format(name, nonlinearity.__class__.
__name__), nonlinearity)
self.models = models
def forward(self, input_0):
primals_1 = self.models.mpc.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
tsirif/cortex
|
MeanPoolConv
| false
| 16,623
|
[
"BSD-3-Clause"
] | 109
|
2837b220f9fb73279df3815bb18b274106412c08
|
https://github.com/tsirif/cortex/tree/2837b220f9fb73279df3815bb18b274106412c08
|
ECB
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class SeqConv3x3(nn.Module):
def __init__(self, seq_type, inp_planes, out_planes, depth_multiplier):
super(SeqConv3x3, self).__init__()
self.type = seq_type
self.inp_planes = inp_planes
self.out_planes = out_planes
if self.type == 'conv1x1-conv3x3':
self.mid_planes = int(out_planes * depth_multiplier)
conv0 = torch.nn.Conv2d(self.inp_planes, self.mid_planes,
kernel_size=1, padding=0)
self.k0 = conv0.weight
self.b0 = conv0.bias
conv1 = torch.nn.Conv2d(self.mid_planes, self.out_planes,
kernel_size=3)
self.k1 = conv1.weight
self.b1 = conv1.bias
elif self.type == 'conv1x1-sobelx':
conv0 = torch.nn.Conv2d(self.inp_planes, self.out_planes,
kernel_size=1, padding=0)
self.k0 = conv0.weight
self.b0 = conv0.bias
scale = torch.randn(size=(self.out_planes, 1, 1, 1)) * 0.001
self.scale = nn.Parameter(scale)
bias = torch.randn(self.out_planes) * 0.001
bias = torch.reshape(bias, (self.out_planes,))
self.bias = nn.Parameter(bias)
self.mask = torch.zeros((self.out_planes, 1, 3, 3), dtype=torch
.float32)
for i in range(self.out_planes):
self.mask[i, 0, 0, 0] = 1.0
self.mask[i, 0, 1, 0] = 2.0
self.mask[i, 0, 2, 0] = 1.0
self.mask[i, 0, 0, 2] = -1.0
self.mask[i, 0, 1, 2] = -2.0
self.mask[i, 0, 2, 2] = -1.0
self.mask = nn.Parameter(data=self.mask, requires_grad=False)
elif self.type == 'conv1x1-sobely':
conv0 = torch.nn.Conv2d(self.inp_planes, self.out_planes,
kernel_size=1, padding=0)
self.k0 = conv0.weight
self.b0 = conv0.bias
scale = torch.randn(size=(self.out_planes, 1, 1, 1)) * 0.001
self.scale = nn.Parameter(torch.FloatTensor(scale))
bias = torch.randn(self.out_planes) * 0.001
bias = torch.reshape(bias, (self.out_planes,))
self.bias = nn.Parameter(torch.FloatTensor(bias))
self.mask = torch.zeros((self.out_planes, 1, 3, 3), dtype=torch
.float32)
for i in range(self.out_planes):
self.mask[i, 0, 0, 0] = 1.0
self.mask[i, 0, 0, 1] = 2.0
self.mask[i, 0, 0, 2] = 1.0
self.mask[i, 0, 2, 0] = -1.0
self.mask[i, 0, 2, 1] = -2.0
self.mask[i, 0, 2, 2] = -1.0
self.mask = nn.Parameter(data=self.mask, requires_grad=False)
elif self.type == 'conv1x1-laplacian':
conv0 = torch.nn.Conv2d(self.inp_planes, self.out_planes,
kernel_size=1, padding=0)
self.k0 = conv0.weight
self.b0 = conv0.bias
scale = torch.randn(size=(self.out_planes, 1, 1, 1)) * 0.001
self.scale = nn.Parameter(torch.FloatTensor(scale))
bias = torch.randn(self.out_planes) * 0.001
bias = torch.reshape(bias, (self.out_planes,))
self.bias = nn.Parameter(torch.FloatTensor(bias))
self.mask = torch.zeros((self.out_planes, 1, 3, 3), dtype=torch
.float32)
for i in range(self.out_planes):
self.mask[i, 0, 0, 1] = 1.0
self.mask[i, 0, 1, 0] = 1.0
self.mask[i, 0, 1, 2] = 1.0
self.mask[i, 0, 2, 1] = 1.0
self.mask[i, 0, 1, 1] = -4.0
self.mask = nn.Parameter(data=self.mask, requires_grad=False)
else:
raise ValueError('the type of seqconv is not supported!')
def forward(self, x):
if self.type == 'conv1x1-conv3x3':
y0 = F.conv2d(input=x, weight=self.k0, bias=self.b0, stride=1)
y0 = F.pad(y0, (1, 1, 1, 1), 'constant', 0)
b0_pad = self.b0.view(1, -1, 1, 1)
y0[:, :, 0:1, :] = b0_pad
y0[:, :, -1:, :] = b0_pad
y0[:, :, :, 0:1] = b0_pad
y0[:, :, :, -1:] = b0_pad
y1 = F.conv2d(input=y0, weight=self.k1, bias=self.b1, stride=1)
else:
y0 = F.conv2d(input=x, weight=self.k0, bias=self.b0, stride=1)
y0 = F.pad(y0, (1, 1, 1, 1), 'constant', 0)
b0_pad = self.b0.view(1, -1, 1, 1)
y0[:, :, 0:1, :] = b0_pad
y0[:, :, -1:, :] = b0_pad
y0[:, :, :, 0:1] = b0_pad
y0[:, :, :, -1:] = b0_pad
y1 = F.conv2d(input=y0, weight=self.scale * self.mask, bias=
self.bias, stride=1, groups=self.out_planes)
return y1
def rep_params(self):
device = self.k0.get_device()
if device < 0:
device = None
if self.type == 'conv1x1-conv3x3':
RK = F.conv2d(input=self.k1, weight=self.k0.permute(1, 0, 2, 3))
RB = torch.ones(1, self.mid_planes, 3, 3, device=device
) * self.b0.view(1, -1, 1, 1)
RB = F.conv2d(input=RB, weight=self.k1).view(-1) + self.b1
else:
tmp = self.scale * self.mask
k1 = torch.zeros((self.out_planes, self.out_planes, 3, 3),
device=device)
for i in range(self.out_planes):
k1[i, i, :, :] = tmp[i, 0, :, :]
b1 = self.bias
RK = F.conv2d(input=k1, weight=self.k0.permute(1, 0, 2, 3))
RB = torch.ones(1, self.out_planes, 3, 3, device=device
) * self.b0.view(1, -1, 1, 1)
RB = F.conv2d(input=RB, weight=k1).view(-1) + b1
return RK, RB
class ECB(nn.Module):
def __init__(self, inp_planes, out_planes, depth_multiplier, act_type=
'prelu', with_idt=False):
super(ECB, self).__init__()
self.depth_multiplier = depth_multiplier
self.inp_planes = inp_planes
self.out_planes = out_planes
self.act_type = act_type
if with_idt and self.inp_planes == self.out_planes:
self.with_idt = True
else:
self.with_idt = False
self.conv3x3 = torch.nn.Conv2d(self.inp_planes, self.out_planes,
kernel_size=3, padding=1)
self.conv1x1_3x3 = SeqConv3x3('conv1x1-conv3x3', self.inp_planes,
self.out_planes, self.depth_multiplier)
self.conv1x1_sbx = SeqConv3x3('conv1x1-sobelx', self.inp_planes,
self.out_planes, -1)
self.conv1x1_sby = SeqConv3x3('conv1x1-sobely', self.inp_planes,
self.out_planes, -1)
self.conv1x1_lpl = SeqConv3x3('conv1x1-laplacian', self.inp_planes,
self.out_planes, -1)
if self.act_type == 'prelu':
self.act = nn.PReLU(num_parameters=self.out_planes)
elif self.act_type == 'relu':
self.act = nn.ReLU(inplace=True)
elif self.act_type == 'rrelu':
self.act = nn.RReLU(lower=-0.05, upper=0.05)
elif self.act_type == 'softplus':
self.act = nn.Softplus()
elif self.act_type == 'linear':
pass
else:
raise ValueError('The type of activation if not support!')
def forward(self, x):
if self.training:
y = self.conv3x3(x) + self.conv1x1_3x3(x) + self.conv1x1_sbx(x
) + self.conv1x1_sby(x) + self.conv1x1_lpl(x)
if self.with_idt:
y += x
else:
RK, RB = self.rep_params()
y = F.conv2d(input=x, weight=RK, bias=RB, stride=1, padding=1)
if self.act_type != 'linear':
y = self.act(y)
return y
def rep_params(self):
K0, B0 = self.conv3x3.weight, self.conv3x3.bias
K1, B1 = self.conv1x1_3x3.rep_params()
K2, B2 = self.conv1x1_sbx.rep_params()
K3, B3 = self.conv1x1_sby.rep_params()
K4, B4 = self.conv1x1_lpl.rep_params()
RK, RB = K0 + K1 + K2 + K3 + K4, B0 + B1 + B2 + B3 + B4
if self.with_idt:
device = RK.get_device()
if device < 0:
device = None
K_idt = torch.zeros(self.out_planes, self.out_planes, 3, 3,
device=device)
for i in range(self.out_planes):
K_idt[i, i, 1, 1] = 1.0
B_idt = 0.0
RK, RB = RK + K_idt, RB + B_idt
return RK, RB
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'inp_planes': 4, 'out_planes': 4, 'depth_multiplier': 1}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_mul_ones_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 36
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 9
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp1 * tmp0
tl.store(out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 36
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 9
x0 = xindex % 9
x2 = xindex
tmp3 = tl.load(in_ptr0 + 2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp5 = tl.load(in_ptr1 + (18 + x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + 1)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp12 = tl.load(in_ptr1 + (9 + x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + 0)
tmp18 = tl.broadcast_to(tmp17, [XBLOCK])
tmp19 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp0 = x1
tmp1 = tl.full([1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp6 = tmp4 * tmp5
tmp7 = tl.full([1], 1, tl.int32)
tmp8 = tmp1 == tmp7
tmp9 = tmp0 == tmp7
tmp13 = tmp11 * tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = tmp7 == tmp14
tmp16 = tmp0 == tmp14
tmp20 = tmp18 * tmp19
tmp21 = 0.0
tmp22 = tl.where(tmp16, tmp20, tmp21)
tmp23 = tl.where(tmp15, tmp22, tmp21)
tmp24 = tl.where(tmp9, tmp13, tmp23)
tmp25 = tmp1 == tmp14
tmp26 = tl.where(tmp25, tmp22, tmp21)
tmp27 = tl.where(tmp8, tmp24, tmp26)
tmp28 = tl.where(tmp2, tmp6, tmp27)
tl.store(out_ptr0 + x2, tmp28, xmask)
@triton.jit
def triton_poi_fused_zeros_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 36
x3 = xindex % 36
x1 = xindex // 9 % 4
x0 = xindex % 9
x5 = xindex
tmp3 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + 1)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp10 = tl.load(in_ptr2 + (9 + x0), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + 0)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp17 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp0 = x2
tmp1 = tl.full([1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = tl.full([1], 1, tl.int32)
tmp5 = tmp0 == tmp4
tmp6 = x1
tmp7 = tmp6 == tmp4
tmp11 = tmp9 * tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = tmp4 == tmp12
tmp14 = tmp6 == tmp12
tmp18 = tmp16 * tmp17
tmp19 = 0.0
tmp20 = tl.where(tmp14, tmp18, tmp19)
tmp21 = tl.where(tmp13, tmp20, tmp19)
tmp22 = tl.where(tmp7, tmp11, tmp21)
tmp23 = tmp0 == tmp12
tmp24 = tl.where(tmp23, tmp20, tmp19)
tmp25 = tl.where(tmp5, tmp22, tmp24)
tmp26 = tl.where(tmp2, tmp3, tmp25)
tl.store(out_ptr0 + x5, tmp26, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 36
x1 = xindex // 9 % 4
x0 = xindex % 9
x4 = xindex % 36
x5 = xindex
tmp5 = tl.load(in_ptr0 + 3)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp7 = tl.load(in_ptr1 + (27 + x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (108 + x4), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr2 + x5, xmask)
tmp0 = x2
tmp1 = tl.full([1], 3, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = x1
tmp4 = tmp3 == tmp1
tmp8 = tmp6 * tmp7
tmp10 = tl.where(tmp4, tmp8, tmp9)
tmp12 = tl.where(tmp2, tmp10, tmp11)
tl.store(out_ptr0 + x5, tmp12, xmask)
@triton.jit
def triton_poi_fused_add_5(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3,
xnumel, XBLOCK: tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_out_ptr0 + x0, xmask)
tmp3 = tl.load(in_ptr1 + x0, xmask)
tmp5 = tl.load(in_ptr2 + x0, xmask)
tmp7 = tl.load(in_ptr3 + x0, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tl.store(in_out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, in_ptr5, in_ptr6, in_ptr7, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask)
tmp5 = tl.load(in_out_ptr0 + x0, xmask)
tmp6 = tl.load(in_ptr3 + x0, xmask)
tmp9 = tl.load(in_ptr4 + x0, xmask)
tmp10 = tl.load(in_ptr5 + x0, xmask)
tmp13 = tl.load(in_ptr6 + x0, xmask)
tmp14 = tl.load(in_ptr7 + x0, xmask)
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp4 + tmp7
tmp11 = tmp9 + tmp10
tmp12 = tmp8 + tmp11
tmp15 = tmp13 + tmp14
tmp16 = tmp12 + tmp15
tl.store(in_out_ptr0 + x0, tmp16, xmask)
@triton.jit
def triton_poi_fused__prelu_kernel_convolution_7(in_out_ptr0, in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp6 = tmp5 * tmp2
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp7, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22, primals_23
) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_8, (4, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_9, (4, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_13, (4, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_14, (4, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_15, (4,), (1,))
assert_size_stride(primals_16, (4,), (1,))
assert_size_stride(primals_17, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_18, (4, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_19, (4, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_20, (4,), (1,))
assert_size_stride(primals_21, (4,), (1,))
assert_size_stride(primals_22, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_23, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(4, 4)](primals_3, buf0, 4, 4,
XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1)
buf1 = extern_kernels.convolution(primals_4, buf0, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 3, 3), (36, 9, 3, 1))
buf2 = empty_strided_cuda((1, 4, 3, 3), (36, 9, 3, 1), torch.float32)
triton_poi_fused_mul_ones_1[grid(36)](primals_5, buf2, 36, XBLOCK=
64, num_warps=1, num_stages=1)
del primals_5
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (1, 4, 1, 1), (4, 1, 1, 1))
buf4 = empty_strided_cuda((4, 3, 3), (9, 3, 1), torch.float32)
triton_poi_fused_2[grid(36)](primals_8, primals_9, buf4, 36, XBLOCK
=64, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32)
triton_poi_fused_zeros_3[grid(144)](buf4, primals_8, primals_9,
buf5, 144, XBLOCK=256, num_warps=4, num_stages=1)
buf6 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32)
triton_poi_fused_4[grid(144)](primals_8, primals_9, buf5, buf6, 144,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_8
buf7 = buf0
del buf0
triton_poi_fused_convolution_0[grid(4, 4)](primals_7, buf7, 4, 4,
XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1)
buf8 = extern_kernels.convolution(buf6, buf7, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 4, 3, 3), (36, 9, 3, 1))
buf9 = reinterpret_tensor(buf4, (1, 4, 3, 3), (36, 9, 3, 1), 0)
del buf4
triton_poi_fused_mul_ones_1[grid(36)](primals_11, buf9, 36, XBLOCK=
64, num_warps=1, num_stages=1)
del primals_11
buf10 = extern_kernels.convolution(buf9, buf6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (1, 4, 1, 1), (4, 1, 1, 1))
buf11 = empty_strided_cuda((4, 3, 3), (9, 3, 1), torch.float32)
triton_poi_fused_2[grid(36)](primals_13, primals_14, buf11, 36,
XBLOCK=64, num_warps=1, num_stages=1)
buf12 = buf5
del buf5
triton_poi_fused_zeros_3[grid(144)](buf11, primals_13, primals_14,
buf12, 144, XBLOCK=256, num_warps=4, num_stages=1)
buf13 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32)
triton_poi_fused_4[grid(144)](primals_13, primals_14, buf12, buf13,
144, XBLOCK=256, num_warps=4, num_stages=1)
del primals_13
buf14 = buf7
del buf7
triton_poi_fused_convolution_0[grid(4, 4)](primals_12, buf14, 4, 4,
XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1)
buf15 = extern_kernels.convolution(buf13, buf14, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 4, 3, 3), (36, 9, 3, 1))
buf16 = reinterpret_tensor(buf11, (1, 4, 3, 3), (36, 9, 3, 1), 0)
del buf11
triton_poi_fused_mul_ones_1[grid(36)](primals_16, buf16, 36, XBLOCK
=64, num_warps=1, num_stages=1)
del primals_16
buf17 = extern_kernels.convolution(buf16, buf13, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf17, (1, 4, 1, 1), (4, 1, 1, 1))
buf18 = empty_strided_cuda((4, 3, 3), (9, 3, 1), torch.float32)
triton_poi_fused_2[grid(36)](primals_18, primals_19, buf18, 36,
XBLOCK=64, num_warps=1, num_stages=1)
buf19 = buf12
del buf12
triton_poi_fused_zeros_3[grid(144)](buf18, primals_18, primals_19,
buf19, 144, XBLOCK=256, num_warps=4, num_stages=1)
buf20 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32)
triton_poi_fused_4[grid(144)](primals_18, primals_19, buf19, buf20,
144, XBLOCK=256, num_warps=4, num_stages=1)
del buf19
del primals_18
buf21 = buf14
del buf14
triton_poi_fused_convolution_0[grid(4, 4)](primals_17, buf21, 4, 4,
XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1)
buf22 = extern_kernels.convolution(buf20, buf21, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 4, 3, 3), (36, 9, 3, 1))
del buf21
buf23 = reinterpret_tensor(buf18, (1, 4, 3, 3), (36, 9, 3, 1), 0)
del buf18
triton_poi_fused_mul_ones_1[grid(36)](primals_21, buf23, 36, XBLOCK
=64, num_warps=1, num_stages=1)
del primals_21
buf24 = extern_kernels.convolution(buf23, buf20, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (1, 4, 1, 1), (4, 1, 1, 1))
buf25 = buf1
del buf1
triton_poi_fused_add_5[grid(144)](buf25, primals_1, buf8, buf15,
buf22, 144, XBLOCK=256, num_warps=4, num_stages=1)
del buf15
del buf22
del buf8
del primals_1
buf26 = reinterpret_tensor(buf10, (4,), (1,), 0)
del buf10
triton_poi_fused_add_6[grid(4)](buf26, primals_2, buf3, primals_6,
primals_10, buf17, primals_15, buf24, primals_20, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del buf17
del buf24
del buf3
del primals_10
del primals_15
del primals_2
del primals_20
del primals_6
buf27 = extern_kernels.convolution(primals_22, buf25, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf27, (4, 4, 4, 4), (64, 16, 4, 1))
buf28 = buf27
del buf27
buf29 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__prelu_kernel_convolution_7[grid(256)](buf28,
buf26, primals_23, buf29, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf26
return (buf29, primals_4, primals_9, primals_14, primals_19, primals_22,
primals_23, reinterpret_tensor(primals_3, (4, 4, 1, 1), (1, 4, 1, 1
), 0), buf2, buf6, reinterpret_tensor(primals_7, (4, 4, 1, 1), (1,
4, 1, 1), 0), buf9, buf13, reinterpret_tensor(primals_12, (4, 4, 1,
1), (1, 4, 1, 1), 0), buf16, buf20, reinterpret_tensor(primals_17,
(4, 4, 1, 1), (1, 4, 1, 1), 0), buf23, buf25, buf28)
class SeqConv3x3(nn.Module):
def __init__(self, seq_type, inp_planes, out_planes, depth_multiplier):
super(SeqConv3x3, self).__init__()
self.type = seq_type
self.inp_planes = inp_planes
self.out_planes = out_planes
if self.type == 'conv1x1-conv3x3':
self.mid_planes = int(out_planes * depth_multiplier)
conv0 = torch.nn.Conv2d(self.inp_planes, self.mid_planes,
kernel_size=1, padding=0)
self.k0 = conv0.weight
self.b0 = conv0.bias
conv1 = torch.nn.Conv2d(self.mid_planes, self.out_planes,
kernel_size=3)
self.k1 = conv1.weight
self.b1 = conv1.bias
elif self.type == 'conv1x1-sobelx':
conv0 = torch.nn.Conv2d(self.inp_planes, self.out_planes,
kernel_size=1, padding=0)
self.k0 = conv0.weight
self.b0 = conv0.bias
scale = torch.randn(size=(self.out_planes, 1, 1, 1)) * 0.001
self.scale = nn.Parameter(scale)
bias = torch.randn(self.out_planes) * 0.001
bias = torch.reshape(bias, (self.out_planes,))
self.bias = nn.Parameter(bias)
self.mask = torch.zeros((self.out_planes, 1, 3, 3), dtype=torch
.float32)
for i in range(self.out_planes):
self.mask[i, 0, 0, 0] = 1.0
self.mask[i, 0, 1, 0] = 2.0
self.mask[i, 0, 2, 0] = 1.0
self.mask[i, 0, 0, 2] = -1.0
self.mask[i, 0, 1, 2] = -2.0
self.mask[i, 0, 2, 2] = -1.0
self.mask = nn.Parameter(data=self.mask, requires_grad=False)
elif self.type == 'conv1x1-sobely':
conv0 = torch.nn.Conv2d(self.inp_planes, self.out_planes,
kernel_size=1, padding=0)
self.k0 = conv0.weight
self.b0 = conv0.bias
scale = torch.randn(size=(self.out_planes, 1, 1, 1)) * 0.001
self.scale = nn.Parameter(torch.FloatTensor(scale))
bias = torch.randn(self.out_planes) * 0.001
bias = torch.reshape(bias, (self.out_planes,))
self.bias = nn.Parameter(torch.FloatTensor(bias))
self.mask = torch.zeros((self.out_planes, 1, 3, 3), dtype=torch
.float32)
for i in range(self.out_planes):
self.mask[i, 0, 0, 0] = 1.0
self.mask[i, 0, 0, 1] = 2.0
self.mask[i, 0, 0, 2] = 1.0
self.mask[i, 0, 2, 0] = -1.0
self.mask[i, 0, 2, 1] = -2.0
self.mask[i, 0, 2, 2] = -1.0
self.mask = nn.Parameter(data=self.mask, requires_grad=False)
elif self.type == 'conv1x1-laplacian':
conv0 = torch.nn.Conv2d(self.inp_planes, self.out_planes,
kernel_size=1, padding=0)
self.k0 = conv0.weight
self.b0 = conv0.bias
scale = torch.randn(size=(self.out_planes, 1, 1, 1)) * 0.001
self.scale = nn.Parameter(torch.FloatTensor(scale))
bias = torch.randn(self.out_planes) * 0.001
bias = torch.reshape(bias, (self.out_planes,))
self.bias = nn.Parameter(torch.FloatTensor(bias))
self.mask = torch.zeros((self.out_planes, 1, 3, 3), dtype=torch
.float32)
for i in range(self.out_planes):
self.mask[i, 0, 0, 1] = 1.0
self.mask[i, 0, 1, 0] = 1.0
self.mask[i, 0, 1, 2] = 1.0
self.mask[i, 0, 2, 1] = 1.0
self.mask[i, 0, 1, 1] = -4.0
self.mask = nn.Parameter(data=self.mask, requires_grad=False)
else:
raise ValueError('the type of seqconv is not supported!')
def forward(self, x):
if self.type == 'conv1x1-conv3x3':
y0 = F.conv2d(input=x, weight=self.k0, bias=self.b0, stride=1)
y0 = F.pad(y0, (1, 1, 1, 1), 'constant', 0)
b0_pad = self.b0.view(1, -1, 1, 1)
y0[:, :, 0:1, :] = b0_pad
y0[:, :, -1:, :] = b0_pad
y0[:, :, :, 0:1] = b0_pad
y0[:, :, :, -1:] = b0_pad
y1 = F.conv2d(input=y0, weight=self.k1, bias=self.b1, stride=1)
else:
y0 = F.conv2d(input=x, weight=self.k0, bias=self.b0, stride=1)
y0 = F.pad(y0, (1, 1, 1, 1), 'constant', 0)
b0_pad = self.b0.view(1, -1, 1, 1)
y0[:, :, 0:1, :] = b0_pad
y0[:, :, -1:, :] = b0_pad
y0[:, :, :, 0:1] = b0_pad
y0[:, :, :, -1:] = b0_pad
y1 = F.conv2d(input=y0, weight=self.scale * self.mask, bias=
self.bias, stride=1, groups=self.out_planes)
return y1
def rep_params(self):
device = self.k0.get_device()
if device < 0:
device = None
if self.type == 'conv1x1-conv3x3':
RK = F.conv2d(input=self.k1, weight=self.k0.permute(1, 0, 2, 3))
RB = torch.ones(1, self.mid_planes, 3, 3, device=device
) * self.b0.view(1, -1, 1, 1)
RB = F.conv2d(input=RB, weight=self.k1).view(-1) + self.b1
else:
tmp = self.scale * self.mask
k1 = torch.zeros((self.out_planes, self.out_planes, 3, 3),
device=device)
for i in range(self.out_planes):
k1[i, i, :, :] = tmp[i, 0, :, :]
b1 = self.bias
RK = F.conv2d(input=k1, weight=self.k0.permute(1, 0, 2, 3))
RB = torch.ones(1, self.out_planes, 3, 3, device=device
) * self.b0.view(1, -1, 1, 1)
RB = F.conv2d(input=RB, weight=k1).view(-1) + b1
return RK, RB
class ECBNew(nn.Module):
def __init__(self, inp_planes, out_planes, depth_multiplier, act_type=
'prelu', with_idt=False):
super(ECBNew, self).__init__()
self.depth_multiplier = depth_multiplier
self.inp_planes = inp_planes
self.out_planes = out_planes
self.act_type = act_type
if with_idt and self.inp_planes == self.out_planes:
self.with_idt = True
else:
self.with_idt = False
self.conv3x3 = torch.nn.Conv2d(self.inp_planes, self.out_planes,
kernel_size=3, padding=1)
self.conv1x1_3x3 = SeqConv3x3('conv1x1-conv3x3', self.inp_planes,
self.out_planes, self.depth_multiplier)
self.conv1x1_sbx = SeqConv3x3('conv1x1-sobelx', self.inp_planes,
self.out_planes, -1)
self.conv1x1_sby = SeqConv3x3('conv1x1-sobely', self.inp_planes,
self.out_planes, -1)
self.conv1x1_lpl = SeqConv3x3('conv1x1-laplacian', self.inp_planes,
self.out_planes, -1)
if self.act_type == 'prelu':
self.act = nn.PReLU(num_parameters=self.out_planes)
elif self.act_type == 'relu':
self.act = nn.ReLU(inplace=True)
elif self.act_type == 'rrelu':
self.act = nn.RReLU(lower=-0.05, upper=0.05)
elif self.act_type == 'softplus':
self.act = nn.Softplus()
elif self.act_type == 'linear':
pass
else:
raise ValueError('The type of activation if not support!')
def rep_params(self):
K0, B0 = self.conv3x3.weight, self.conv3x3.bias
K1, B1 = self.conv1x1_3x3.rep_params()
K2, B2 = self.conv1x1_sbx.rep_params()
K3, B3 = self.conv1x1_sby.rep_params()
K4, B4 = self.conv1x1_lpl.rep_params()
RK, RB = K0 + K1 + K2 + K3 + K4, B0 + B1 + B2 + B3 + B4
if self.with_idt:
device = RK.get_device()
if device < 0:
device = None
K_idt = torch.zeros(self.out_planes, self.out_planes, 3, 3,
device=device)
for i in range(self.out_planes):
K_idt[i, i, 1, 1] = 1.0
B_idt = 0.0
RK, RB = RK + K_idt, RB + B_idt
return RK, RB
def forward(self, input_0):
primals_1 = self.conv3x3.weight
primals_2 = self.conv3x3.bias
primals_3 = self.conv1x1_3x3.k0
primals_5 = self.conv1x1_3x3.b0
primals_4 = self.conv1x1_3x3.k1
primals_6 = self.conv1x1_3x3.b1
primals_7 = self.conv1x1_sbx.k0
primals_10 = self.conv1x1_sbx.b0
primals_8 = self.conv1x1_sbx.scale
primals_11 = self.conv1x1_sbx.bias
primals_9 = self.conv1x1_sbx.mask
primals_12 = self.conv1x1_sby.k0
primals_15 = self.conv1x1_sby.b0
primals_13 = self.conv1x1_sby.scale
primals_16 = self.conv1x1_sby.bias
primals_14 = self.conv1x1_sby.mask
primals_17 = self.conv1x1_lpl.k0
primals_20 = self.conv1x1_lpl.b0
primals_18 = self.conv1x1_lpl.scale
primals_21 = self.conv1x1_lpl.bias
primals_19 = self.conv1x1_lpl.mask
primals_23 = self.act.weight
primals_22 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23])
return output[0]
|
thinkreed/ECBSR
|
ECB
| false
| 16,624
|
[
"Apache-2.0"
] | 162
|
152b9fef9b9fb61b6e5a93677229143652ef305d
|
https://github.com/thinkreed/ECBSR/tree/152b9fef9b9fb61b6e5a93677229143652ef305d
|
XTanhLoss
|
import torch
class XTanhLoss(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y_t, y_prime_t):
ey_t = y_t - y_prime_t
return torch.mean(ey_t * torch.tanh(ey_t))
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mean_mul_sub_tanh_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = libdevice.tanh(tmp2)
tmp4 = tmp2 * tmp3
tmp5 = tl.broadcast_to(tmp4, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = 256.0
tmp9 = tmp7 / tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp9, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_mul_sub_tanh_0[grid(1)](buf1, arg0_1, arg1_1,
1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class XTanhLossNew(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
tuantle/regression-losses-pytorch
|
XTanhLoss
| false
| 16,625
|
[
"MIT"
] | 82
|
2893f4439ada5df239e3afd0ec7e781dd61403e9
|
https://github.com/tuantle/regression-losses-pytorch/tree/2893f4439ada5df239e3afd0ec7e781dd61403e9
|
BondEnergyModule
|
import torch
import torch.nn
import torch.nn as nn
from itertools import repeat
def gen(src, index, dim=-1, out=None, dim_size=None, fill_value=0):
dim = range(src.dim())[dim]
if index.dim() == 1:
index_size = list(repeat(1, src.dim()))
index_size[dim] = src.size(dim)
index = index.view(index_size).expand_as(src)
if out is None:
dim_size = index.max().item() + 1 if dim_size is None else dim_size
out_size = list(src.size())
out_size[dim] = dim_size
out = src.new_full(out_size, fill_value)
return src, out, index, dim
def scatter_add(src, index, dim=-1, out=None, dim_size=None, fill_value=0):
src, out, index, dim = gen(src, index, dim, out, dim_size, fill_value)
return out.scatter_add_(dim, index, src)
class BondEnergyModule(nn.Module):
def __init__(self, batch=True):
super().__init__()
def forward(self, xyz, bond_adj, bond_len, bond_par):
e = (xyz[bond_adj[:, 0]] - xyz[bond_adj[:, 1]]).pow(2).sum(1).sqrt()[
:, None]
ebond = bond_par * (e - bond_len) ** 2
energy = 0.5 * scatter_add(src=ebond, index=bond_adj[:, 0], dim=0,
dim_size=xyz.shape[0])
energy += 0.5 * scatter_add(src=ebond, index=bond_adj[:, 1], dim=0,
dim_size=xyz.shape[0])
return energy
def get_inputs():
return [torch.ones([4, 4], dtype=torch.int64), torch.ones([4, 4], dtype
=torch.int64), torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn
import torch.nn as nn
from itertools import repeat
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_index_pow_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 4) | ~xmask,
'index out of bounds: 0 <= tmp4 < 4')
tmp6 = tl.load(in_ptr1 + 4 * tmp4, xmask, eviction_policy='evict_last')
tmp8 = tmp7 + tmp1
tmp9 = tmp7 < 0
tmp10 = tl.where(tmp9, tmp8, tmp7)
tl.device_assert((0 <= tmp10) & (tmp10 < 4) | ~xmask,
'index out of bounds: 0 <= tmp10 < 4')
tmp12 = tl.load(in_ptr1 + 4 * tmp10, xmask, eviction_policy='evict_last')
tmp13 = tmp6 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.load(in_ptr1 + (1 + 4 * tmp4), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr1 + (1 + 4 * tmp10), xmask, eviction_policy=
'evict_last')
tmp17 = tmp15 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp14 + tmp18
tmp20 = tl.load(in_ptr1 + (2 + 4 * tmp4), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr1 + (2 + 4 * tmp10), xmask, eviction_policy=
'evict_last')
tmp22 = tmp20 - tmp21
tmp23 = tmp22 * tmp22
tmp24 = tmp19 + tmp23
tmp25 = tl.load(in_ptr1 + (3 + 4 * tmp4), xmask, eviction_policy=
'evict_last')
tmp26 = tl.load(in_ptr1 + (3 + 4 * tmp10), xmask, eviction_policy=
'evict_last')
tmp27 = tmp25 - tmp26
tmp28 = tmp27 * tmp27
tmp29 = tmp24 + tmp28
tl.store(out_ptr0 + x0, tmp29, xmask)
@triton.jit
def triton_poi_fused_mul_new_full_pow_scatter_add_sub_1(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_mul_new_full_pow_scatter_add_sub_2(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr3 + x2, xmask)
tmp10 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tl.device_assert((0 <= tmp0) & (tmp0 < 4) | ~xmask,
'index out of bounds: 0 <= tmp0 < 4')
tmp4 = tmp3.to(tl.float32)
tmp5 = libdevice.sqrt(tmp4)
tmp7 = tmp5 - tmp6
tmp8 = tmp7 * tmp7
tmp9 = tmp2 * tmp8
tl.device_assert((0 <= tmp10) & (tmp10 < 4) | ~xmask,
'index out of bounds: 0 <= tmp10 < 4')
tl.atomic_add(out_ptr0 + (x0 + 4 * tmp0), tmp9, xmask, sem='relaxed')
tl.atomic_add(out_ptr1 + (x0 + 4 * tmp10), tmp9, xmask, sem='relaxed')
@triton.jit
def triton_poi_fused_add_mul_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp3 = tl.load(in_ptr1 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp5 = tmp2 + tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
assert_size_stride(arg3_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.int64)
get_raw_stream(0)
triton_poi_fused_index_pow_sub_sum_0[grid(4)](arg0_1, arg1_1, buf0,
4, XBLOCK=4, num_warps=1, num_stages=1)
del arg1_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_mul_new_full_pow_scatter_add_sub_1[grid(16)](buf1,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_mul_new_full_pow_scatter_add_sub_1[grid(16)](buf3,
16, XBLOCK=16, num_warps=1, num_stages=1)
triton_poi_fused_mul_new_full_pow_scatter_add_sub_2[grid(16)](arg0_1,
arg3_1, buf0, arg2_1, buf1, buf3, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del arg0_1
del arg2_1
del arg3_1
del buf0
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_mul_3[grid(16)](buf1, buf3, buf5, 16, XBLOCK=
16, num_warps=1, num_stages=1)
del buf1
del buf3
return buf5,
def gen(src, index, dim=-1, out=None, dim_size=None, fill_value=0):
dim = range(src.dim())[dim]
if index.dim() == 1:
index_size = list(repeat(1, src.dim()))
index_size[dim] = src.size(dim)
index = index.view(index_size).expand_as(src)
if out is None:
dim_size = index.max().item() + 1 if dim_size is None else dim_size
out_size = list(src.size())
out_size[dim] = dim_size
out = src.new_full(out_size, fill_value)
return src, out, index, dim
def scatter_add(src, index, dim=-1, out=None, dim_size=None, fill_value=0):
src, out, index, dim = gen(src, index, dim, out, dim_size, fill_value)
return out.scatter_add_(dim, index, src)
class BondEnergyModuleNew(nn.Module):
def __init__(self, batch=True):
super().__init__()
def forward(self, input_0, input_1, input_2, input_3):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0]
|
torchmd/mdgrad
|
BondEnergyModule
| false
| 16,626
|
[
"MIT"
] | 54
|
77bd7685b74b41acf54a9483546e1e8cb545eb01
|
https://github.com/torchmd/mdgrad/tree/77bd7685b74b41acf54a9483546e1e8cb545eb01
|
ConvMeanPool
|
import torch
import torch.nn.functional as F
import torch.nn as nn
def l2normalize(v, esp=1e-08):
return v / (v.norm() + esp)
def sn_weight(weight, u, height, n_power_iterations):
weight.requires_grad_(False)
for _ in range(n_power_iterations):
v = l2normalize(torch.mv(weight.view(height, -1).t(), u))
u = l2normalize(torch.mv(weight.view(height, -1), v))
weight.requires_grad_(True)
sigma = u.dot(weight.view(height, -1).mv(v))
return torch.div(weight, sigma), u
def get_nonlinearity(nonlinearity=None):
if not nonlinearity:
pass
elif callable(nonlinearity):
if nonlinearity == nn.LeakyReLU:
nonlinearity = nonlinearity(0.02, inplace=True)
elif hasattr(nn, nonlinearity):
nonlinearity = getattr(nn, nonlinearity)
if nonlinearity == 'LeakyReLU':
nonlinearity = nonlinearity(0.02, inplace=True)
else:
nonlinearity = nonlinearity()
elif hasattr(nn.functional, nonlinearity):
nonlinearity = getattr(nn.functional, nonlinearity)
else:
raise ValueError(nonlinearity)
return nonlinearity
class SNConv2d(nn.Conv2d):
def __init__(self, *args, n_power_iterations=1, **kwargs):
super(SNConv2d, self).__init__(*args, **kwargs)
self.n_power_iterations = n_power_iterations
self.height = self.weight.shape[0]
self.register_buffer('u', l2normalize(self.weight.new_empty(self.
height).normal_(0, 1)))
def forward(self, input):
w_sn, self.u = sn_weight(self.weight, self.u, self.height, self.
n_power_iterations)
return F.conv2d(input, w_sn, self.bias, self.stride, self.padding,
self.dilation, self.groups)
class ConvMeanPool(nn.Module):
def __init__(self, dim_in, dim_out, f_size, nonlinearity=None, prefix=
'', spectral_norm=False):
super(ConvMeanPool, self).__init__()
Conv2d = SNConv2d if spectral_norm else nn.Conv2d
models = nn.Sequential()
nonlinearity = get_nonlinearity(nonlinearity)
name = 'cmp' + prefix
models.add_module(name, Conv2d(dim_in, dim_out, f_size, 1, 1, bias=
False))
models.add_module(name + '_pool', nn.AvgPool2d(2, count_include_pad
=False))
if nonlinearity:
models.add_module('{}_{}'.format(name, nonlinearity.__class__.
__name__), nonlinearity)
self.models = models
def forward(self, x):
x = self.models(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim_in': 4, 'dim_out': 4, 'f_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn.functional as F
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 9 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 9 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (3 + 9 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (4 + 9 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 3, 3), (36, 9, 3, 1))
buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_avg_pool2d_0[grid(16)](buf0, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
return buf1, primals_1, primals_2, buf0
def l2normalize(v, esp=1e-08):
return v / (v.norm() + esp)
def sn_weight(weight, u, height, n_power_iterations):
weight.requires_grad_(False)
for _ in range(n_power_iterations):
v = l2normalize(torch.mv(weight.view(height, -1).t(), u))
u = l2normalize(torch.mv(weight.view(height, -1), v))
weight.requires_grad_(True)
sigma = u.dot(weight.view(height, -1).mv(v))
return torch.div(weight, sigma), u
def get_nonlinearity(nonlinearity=None):
if not nonlinearity:
pass
elif callable(nonlinearity):
if nonlinearity == nn.LeakyReLU:
nonlinearity = nonlinearity(0.02, inplace=True)
elif hasattr(nn, nonlinearity):
nonlinearity = getattr(nn, nonlinearity)
if nonlinearity == 'LeakyReLU':
nonlinearity = nonlinearity(0.02, inplace=True)
else:
nonlinearity = nonlinearity()
elif hasattr(nn.functional, nonlinearity):
nonlinearity = getattr(nn.functional, nonlinearity)
else:
raise ValueError(nonlinearity)
return nonlinearity
class SNConv2d(nn.Conv2d):
def __init__(self, *args, n_power_iterations=1, **kwargs):
super(SNConv2d, self).__init__(*args, **kwargs)
self.n_power_iterations = n_power_iterations
self.height = self.weight.shape[0]
self.register_buffer('u', l2normalize(self.weight.new_empty(self.
height).normal_(0, 1)))
def forward(self, input):
w_sn, self.u = sn_weight(self.weight, self.u, self.height, self.
n_power_iterations)
return F.conv2d(input, w_sn, self.bias, self.stride, self.padding,
self.dilation, self.groups)
class ConvMeanPoolNew(nn.Module):
def __init__(self, dim_in, dim_out, f_size, nonlinearity=None, prefix=
'', spectral_norm=False):
super(ConvMeanPoolNew, self).__init__()
Conv2d = SNConv2d if spectral_norm else nn.Conv2d
models = nn.Sequential()
nonlinearity = get_nonlinearity(nonlinearity)
name = 'cmp' + prefix
models.add_module(name, Conv2d(dim_in, dim_out, f_size, 1, 1, bias=
False))
models.add_module(name + '_pool', nn.AvgPool2d(2, count_include_pad
=False))
if nonlinearity:
models.add_module('{}_{}'.format(name, nonlinearity.__class__.
__name__), nonlinearity)
self.models = models
def forward(self, input_0):
primals_1 = self.models.cmp.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
tsirif/cortex
|
ConvMeanPool
| false
| 16,627
|
[
"BSD-3-Clause"
] | 109
|
2837b220f9fb73279df3815bb18b274106412c08
|
https://github.com/tsirif/cortex/tree/2837b220f9fb73279df3815bb18b274106412c08
|
XSigmoidLoss
|
import torch
class XSigmoidLoss(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y_t, y_prime_t):
ey_t = y_t - y_prime_t
return torch.mean(2 * ey_t * torch.sigmoid(ey_t) - ey_t)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mean_mul_sigmoid_sub_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = 2.0
tmp4 = tmp2 * tmp3
tmp5 = tl.sigmoid(tmp2)
tmp6 = tmp4 * tmp5
tmp7 = tmp6 - tmp2
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = 256.0
tmp12 = tmp10 / tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_mul_sigmoid_sub_0[grid(1)](buf1, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class XSigmoidLossNew(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
tuantle/regression-losses-pytorch
|
XSigmoidLoss
| false
| 16,628
|
[
"MIT"
] | 82
|
2893f4439ada5df239e3afd0ec7e781dd61403e9
|
https://github.com/tuantle/regression-losses-pytorch/tree/2893f4439ada5df239e3afd0ec7e781dd61403e9
|
BiAttention
|
import torch
from typing import Optional
import torch.nn as nn
from torch.nn.parameter import Parameter
class BiAttention(nn.Module):
def __init__(self, input_size_encoder: 'int', input_size_decoder: 'int',
num_labels: 'int', biaffine: 'bool'=True, **kwargs) ->None:
super(BiAttention, self).__init__()
self.input_size_encoder = input_size_encoder
self.input_size_decoder = input_size_decoder
self.num_labels = num_labels
self.biaffine = biaffine
self.W_e = Parameter(torch.Tensor(self.num_labels, self.
input_size_encoder))
self.W_d = Parameter(torch.Tensor(self.num_labels, self.
input_size_decoder))
self.b = Parameter(torch.Tensor(self.num_labels, 1, 1))
if self.biaffine:
self.U = Parameter(torch.Tensor(self.num_labels, self.
input_size_decoder, self.input_size_encoder))
else:
self.register_parameter('U', None)
self.reset_parameters()
def reset_parameters(self) ->None:
nn.init.xavier_uniform_(self.W_e)
nn.init.xavier_uniform_(self.W_d)
nn.init.constant_(self.b, 0.0)
if self.biaffine:
nn.init.xavier_uniform_(self.U)
def forward(self, input_d: 'torch.Tensor', input_e: 'torch.Tensor',
mask_d: 'Optional[torch.Tensor]'=None, mask_e:
'Optional[torch.Tensor]'=None) ->torch.Tensor:
assert input_d.size(0) == input_e.size(0)
_batch, _length_decoder, _ = input_d.size()
_, _length_encoder, _ = input_e.size()
out_d = torch.matmul(self.W_d, input_d.transpose(1, 2)).unsqueeze(3)
out_e = torch.matmul(self.W_e, input_e.transpose(1, 2)).unsqueeze(2)
if self.biaffine:
output = torch.matmul(input_d.unsqueeze(1), self.U)
output = torch.matmul(output, input_e.unsqueeze(1).transpose(2, 3))
output = output + out_d + out_e + self.b
else:
output = out_d + out_d + self.b
if mask_d is not None:
output = output * mask_d.unsqueeze(1).unsqueeze(3
) * mask_e.unsqueeze(1).unsqueeze(2)
return output
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'input_size_encoder': 4, 'input_size_decoder': 4,
'num_labels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1 + 4 * x0 + 16 * x3), xmask,
eviction_policy='evict_last')
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + (x2 + 4 * x1 + 16 * x3), xmask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2 + 4 * x0 + 16 * x3), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tl.store(in_out_ptr0 + x4, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_6, (4, 1, 1), (1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0)
del primals_3
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(256)](primals_1, buf2, 256, XBLOCK=
256, num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_1[grid(256)](primals_5, buf3, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), out=buf4)
buf5 = buf3
del buf3
triton_poi_fused_clone_2[grid(256)](primals_2, buf5, 256, XBLOCK=
128, num_warps=4, num_stages=1)
buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf4, reinterpret_tensor(buf5, (16, 4, 4), (16,
4, 1), 0), out=buf6)
del buf4
buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf6
triton_poi_fused_add_3[grid(256)](buf7, buf0, buf1, primals_6, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del buf1
del primals_6
return buf7, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0
), reinterpret_tensor(buf5, (16, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf2, (16, 4, 4), (16, 1, 4), 0)
class BiAttentionNew(nn.Module):
def __init__(self, input_size_encoder: 'int', input_size_decoder: 'int',
num_labels: 'int', biaffine: 'bool'=True, **kwargs) ->None:
super(BiAttentionNew, self).__init__()
self.input_size_encoder = input_size_encoder
self.input_size_decoder = input_size_decoder
self.num_labels = num_labels
self.biaffine = biaffine
self.W_e = Parameter(torch.Tensor(self.num_labels, self.
input_size_encoder))
self.W_d = Parameter(torch.Tensor(self.num_labels, self.
input_size_decoder))
self.b = Parameter(torch.Tensor(self.num_labels, 1, 1))
if self.biaffine:
self.U = Parameter(torch.Tensor(self.num_labels, self.
input_size_decoder, self.input_size_encoder))
else:
self.register_parameter('U', None)
self.reset_parameters()
def reset_parameters(self) ->None:
nn.init.xavier_uniform_(self.W_e)
nn.init.xavier_uniform_(self.W_d)
nn.init.constant_(self.b, 0.0)
if self.biaffine:
nn.init.xavier_uniform_(self.U)
def forward(self, input_0, input_1):
primals_3 = self.W_e
primals_4 = self.W_d
primals_6 = self.b
primals_1 = self.U
primals_2 = input_0
primals_5 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
tucan9389/KLUE-baseline
|
BiAttention
| false
| 16,629
|
[
"Apache-2.0"
] | 71
|
add61158e61f86adfca65087237443828b650090
|
https://github.com/tucan9389/KLUE-baseline/tree/add61158e61f86adfca65087237443828b650090
|
AlgebraicLoss
|
import torch
class AlgebraicLoss(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y_t, y_prime_t):
ey_t = y_t - y_prime_t
return torch.mean(ey_t * ey_t / torch.sqrt(1 + ey_t * ey_t))
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mean_mul_sqrt_sub_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = 1.0
tmp5 = tmp3 + tmp4
tmp6 = libdevice.sqrt(tmp5)
tmp7 = tmp3 / tmp6
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = 256.0
tmp12 = tmp10 / tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mean_mul_sqrt_sub_0[grid(1)](buf1, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class AlgebraicLossNew(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
tuantle/regression-losses-pytorch
|
AlgebraicLoss
| false
| 16,630
|
[
"MIT"
] | 82
|
2893f4439ada5df239e3afd0ec7e781dd61403e9
|
https://github.com/tuantle/regression-losses-pytorch/tree/2893f4439ada5df239e3afd0ec7e781dd61403e9
|
GPT2Postprocessing
|
from _paritybench_helpers import _mock_config
import torch
from torch import nn
class GPT2Postprocessing(nn.Module):
def __init__(self, config):
super().__init__()
self.ln_f = nn.LayerNorm(config.hidden_size, eps=config.
layer_norm_epsilon)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size,
bias=False)
def forward(self, hidden_states):
hidden_states = self.ln_f(hidden_states)
lm_logits = self.lm_head(hidden_states)
return lm_logits
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, layer_norm_epsilon=1,
vocab_size=4)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1.0
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(64)](primals_3, buf0,
buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(256)](primals_3, buf0,
buf1, primals_1, primals_2, buf2, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf0
del buf1
del primals_1
del primals_2
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_3, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), primals_4
class GPT2PostprocessingNew(nn.Module):
def __init__(self, config):
super().__init__()
self.ln_f = nn.LayerNorm(config.hidden_size, eps=config.
layer_norm_epsilon)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size,
bias=False)
def forward(self, input_0):
primals_1 = self.ln_f.weight
primals_2 = self.ln_f.bias
primals_4 = self.lm_head.weight
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
tunib-ai/large-scale-lm-tutorials
|
GPT2Postprocessing
| false
| 16,631
|
[
"Apache-2.0"
] | 128
|
ca29ff9f945a59abcc3e3f1000c4d83de97973d4
|
https://github.com/tunib-ai/large-scale-lm-tutorials/tree/ca29ff9f945a59abcc3e3f1000c4d83de97973d4
|
KLDivergenceLoss
|
from torch.nn import Module
import torch
import torch.utils.data
import torch.nn.functional
import torch.autograd
class KLDivergenceLoss(Module):
"""
<a id="KLDivergenceLoss"></a>
## KL Divergence Regularization Loss
This tries to shrink the total evidence to zero if the sample cannot be correctly classified.
First we calculate $ ilde{lpha}_k = y_k + (1 - y_k) extcolor{orange}{lpha_k}$ the
Dirichlet parameters after remove the correct evidence.
egin{align}
&KL \\Big[ D(\\mathbf{p} ert \\mathbf{ ilde{lpha}}) \\Big \\Vert
D(\\mathbf{p} ert <1, \\dots, 1>\\Big] \\
&= \\log \\Bigg( rac{\\Gamma \\Big( \\sum_{k=1}^K ilde{lpha}_k \\Big)}
{\\Gamma(K) \\prod_{k=1}^K \\Gamma( ilde{lpha}_k)} \\Bigg)
+ \\sum_{k=1}^K ( ilde{lpha}_k - 1)
\\Big[ \\psi( ilde{lpha}_k) - \\psi( ilde{S}) \\Big]
\\end{align}
where $\\Gamma(\\cdot)$ is the gamma function,
$\\psi(\\cdot)$ is the $digamma$ function and
$ ilde{S} = \\sum_{k=1}^K ilde{lpha}_k$
"""
def forward(self, evidence: 'torch.Tensor', target: 'torch.Tensor'):
"""
* `evidence` is $\\mathbf{e} \\ge 0$ with shape `[batch_size, n_classes]`
* `target` is $\\mathbf{y}$ with shape `[batch_size, n_classes]`
"""
alpha = evidence + 1.0
n_classes = evidence.shape[-1]
alpha_tilde = target + (1 - target) * alpha
strength_tilde = alpha_tilde.sum(dim=-1)
first = torch.lgamma(alpha_tilde.sum(dim=-1)) - torch.lgamma(
alpha_tilde.new_tensor(float(n_classes))) - torch.lgamma(
alpha_tilde).sum(dim=-1)
second = ((alpha_tilde - 1) * (torch.digamma(alpha_tilde) - torch.
digamma(strength_tilde)[:, None])).sum(dim=-1)
loss = first + second
return loss.mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch.nn import Module
import torch.utils.data
import torch.nn.functional
import torch.autograd
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_rsub_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp3 = tl.load(in_ptr1 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp3 + tmp1
tmp5 = tmp2 * tmp4
tmp6 = tmp0 + tmp5
tl.store(out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_sum_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tl.store(out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_per_fused_add_lgamma_mean_mul_rsub_sub_sum_2(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
r1 = rindex % 4
r3 = rindex // 16
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp39 = tl.load(in_ptr2 + 4 * r0, None, eviction_policy='evict_last')
tmp40 = tl.load(in_ptr3 + (4 * r1 + 16 * r3), None, eviction_policy=
'evict_last')
tmp44 = tl.load(in_ptr2 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp45 = tl.load(in_ptr3 + (1 + 4 * r1 + 16 * r3), None, eviction_policy
='evict_last')
tmp50 = tl.load(in_ptr2 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp51 = tl.load(in_ptr3 + (2 + 4 * r1 + 16 * r3), None, eviction_policy
='evict_last')
tmp56 = tl.load(in_ptr2 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp57 = tl.load(in_ptr3 + (3 + 4 * r1 + 16 * r3), None, eviction_policy
='evict_last')
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp3 + tmp1
tmp5 = tmp2 * tmp4
tmp6 = tmp0 + tmp5
tmp8 = tmp1 - tmp7
tmp10 = tmp9 + tmp1
tmp11 = tmp8 * tmp10
tmp12 = tmp7 + tmp11
tmp13 = tmp6 + tmp12
tmp15 = tmp1 - tmp14
tmp17 = tmp16 + tmp1
tmp18 = tmp15 * tmp17
tmp19 = tmp14 + tmp18
tmp20 = tmp13 + tmp19
tmp22 = tmp1 - tmp21
tmp24 = tmp23 + tmp1
tmp25 = tmp22 * tmp24
tmp26 = tmp21 + tmp25
tmp27 = tmp20 + tmp26
tmp28 = libdevice.lgamma(tmp27)
tmp29 = 1.7917594909667969
tmp30 = tmp28 - tmp29
tmp31 = libdevice.lgamma(tmp6)
tmp32 = libdevice.lgamma(tmp12)
tmp33 = tmp31 + tmp32
tmp34 = libdevice.lgamma(tmp19)
tmp35 = tmp33 + tmp34
tmp36 = libdevice.lgamma(tmp26)
tmp37 = tmp35 + tmp36
tmp38 = tmp6 - tmp1
tmp41 = tmp39 - tmp40
tmp42 = tmp38 * tmp41
tmp43 = tmp12 - tmp1
tmp46 = tmp44 - tmp45
tmp47 = tmp43 * tmp46
tmp48 = tmp42 + tmp47
tmp49 = tmp19 - tmp1
tmp52 = tmp50 - tmp51
tmp53 = tmp49 * tmp52
tmp54 = tmp48 + tmp53
tmp55 = tmp26 - tmp1
tmp58 = tmp56 - tmp57
tmp59 = tmp55 * tmp58
tmp60 = tmp54 + tmp59
tmp61 = tmp30 - tmp37
tmp62 = tmp61 + tmp60
tmp63 = tl.broadcast_to(tmp62, [XBLOCK, RBLOCK])
tmp65 = tl.sum(tmp63, 1)[:, None]
tmp66 = 64.0
tmp67 = tmp65 / tmp66
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp67, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_rsub_0[grid(256)](arg1_1, arg0_1, buf2,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf3 = torch.ops.aten.digamma.default(buf2)
buf4 = buf3
del buf3
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_sum_1[grid(64)](buf2, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf2
buf6 = torch.ops.aten.digamma.default(buf5)
del buf5
buf7 = buf6
del buf6
buf9 = empty_strided_cuda((), (), torch.float32)
buf10 = buf9
del buf9
triton_per_fused_add_lgamma_mean_mul_rsub_sub_sum_2[grid(1)](buf10,
arg1_1, arg0_1, buf4, buf7, 1, 64, XBLOCK=1, num_warps=2,
num_stages=1)
del arg0_1
del arg1_1
del buf4
del buf7
return buf10,
class KLDivergenceLossNew(Module):
"""
<a id="KLDivergenceLoss"></a>
## KL Divergence Regularization Loss
This tries to shrink the total evidence to zero if the sample cannot be correctly classified.
First we calculate $ ilde{lpha}_k = y_k + (1 - y_k) extcolor{orange}{lpha_k}$ the
Dirichlet parameters after remove the correct evidence.
egin{align}
&KL \\Big[ D(\\mathbf{p} ert \\mathbf{ ilde{lpha}}) \\Big \\Vert
D(\\mathbf{p} ert <1, \\dots, 1>\\Big] \\
&= \\log \\Bigg( rac{\\Gamma \\Big( \\sum_{k=1}^K ilde{lpha}_k \\Big)}
{\\Gamma(K) \\prod_{k=1}^K \\Gamma( ilde{lpha}_k)} \\Bigg)
+ \\sum_{k=1}^K ( ilde{lpha}_k - 1)
\\Big[ \\psi( ilde{lpha}_k) - \\psi( ilde{S}) \\Big]
\\end{align}
where $\\Gamma(\\cdot)$ is the gamma function,
$\\psi(\\cdot)$ is the $digamma$ function and
$ ilde{S} = \\sum_{k=1}^K ilde{lpha}_k$
"""
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
techthiyanes/annotated_deep_learning_paper_implementations
|
KLDivergenceLoss
| false
| 16,632
|
[
"MIT"
] | 3,714
|
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
|
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
|
Conv2dZeroInit
|
import torch
import torch.utils.data
import torch
import torch.nn as nn
import torch.nn
class Conv2dZeroInit(nn.Conv2d):
def __init__(self, channels_in, channels_out, filter_size, stride=1,
padding=0, logscale=3.0):
super().__init__(channels_in, channels_out, filter_size, stride=
stride, padding=padding)
self.register_parameter('logs', nn.Parameter(torch.zeros(
channels_out, 1, 1)))
self.logscale_factor = logscale
def reset_parameters(self):
self.weight.data.zero_()
self.bias.data.zero_()
def forward(self, input):
out = super().forward(input)
return out * torch.exp(self.logs * self.logscale_factor)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels_in': 4, 'channels_out': 4, 'filter_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.utils.data
import torch
import torch.nn as nn
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_exp_mul_0(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = 3.0
tmp5 = tmp3 * tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp2 * tmp6
tl.store(in_out_ptr0 + x2, tmp2, xmask)
tl.store(out_ptr0 + x2, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 1, 1), (1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_exp_mul_0[grid(16)](buf1, primals_2,
primals_4, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
return buf2, primals_1, primals_3, primals_4, buf1
class Conv2dZeroInitNew(nn.Conv2d):
def __init__(self, channels_in, channels_out, filter_size, stride=1,
padding=0, logscale=3.0):
super().__init__(channels_in, channels_out, filter_size, stride=
stride, padding=padding)
self.register_parameter('logs', nn.Parameter(torch.zeros(
channels_out, 1, 1)))
self.logscale_factor = logscale
def reset_parameters(self):
self.weight.data.zero_()
self.bias.data.zero_()
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_4 = self.logs
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
tychovdo/RevGAN
|
Conv2dZeroInit
| false
| 16,633
|
[
"BSD-3-Clause"
] | 79
|
2af25e6a8176eaab3d424db45fb6ee2cfc5dc9a3
|
https://github.com/tychovdo/RevGAN/tree/2af25e6a8176eaab3d424db45fb6ee2cfc5dc9a3
|
netmodel
|
import torch
import numpy as np
from torch.nn import Parameter
class netmodel(torch.nn.Module):
def __init__(self):
super(netmodel, self).__init__()
self.w0 = Parameter(torch.Tensor(1))
self.w1 = Parameter(torch.Tensor(1))
self.w0.data.uniform_(-1, 1)
self.w1.data.uniform_(-1, 1)
def forward(self, inputs):
y = torch.stack([100 * self.w0 * inputs[:, 0], 0.1 * self.w1 *
inputs[:, 1]])
y = torch.t(y)
return y.contiguous()
def extract_grad(self):
tot_size = self.count_parameters()
pvec = np.zeros(tot_size, np.float32)
count = 0
for param in self.parameters():
sz = param.grad.data.numpy().flatten().shape[0]
pvec[count:count + sz] = param.grad.data.numpy().flatten()
count += sz
return pvec.copy()
def extract_parameters(self):
tot_size = self.count_parameters()
pvec = np.zeros(tot_size, np.float32)
count = 0
for param in self.parameters():
sz = param.data.numpy().flatten().shape[0]
pvec[count:count + sz] = param.data.numpy().flatten()
count += sz
return pvec.copy()
def inject_parameters(self, pvec):
self.count_parameters()
count = 0
for param in self.parameters():
sz = param.data.numpy().flatten().shape[0]
raw = pvec[count:count + sz]
reshaped = raw.reshape(param.data.numpy().shape)
param.data = torch.from_numpy(reshaped)
count += sz
return pvec
def count_parameters(self):
count = 0
for param in self.parameters():
count += param.data.numpy().flatten().shape[0]
return count
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import numpy as np
from torch.nn import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp5 = tl.load(in_ptr0 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp16 = tl.load(in_ptr2 + 0)
tmp17 = tl.broadcast_to(tmp16, [XBLOCK])
tmp0 = x1 + 4 * x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp7 = 100.0
tmp8 = tmp6 * tmp7
tmp9 = tl.load(in_ptr1 + 4 * (x1 + 4 * x0), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tmp8 * tmp9
tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype)
tmp12 = tl.where(tmp4, tmp10, tmp11)
tmp13 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp18 = 0.1
tmp19 = tmp17 * tmp18
tmp20 = tl.load(in_ptr1 + (1 + 4 * (-4 + x1 + 4 * x0)), tmp13 & xmask,
eviction_policy='evict_last', other=0.0)
tmp21 = tmp19 * tmp20
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp13, tmp21, tmp22)
tmp24 = tl.where(tmp4, tmp12, tmp23)
tl.store(out_ptr0 + x2, tmp24, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1,), (1,))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(8)](primals_1, primals_2, primals_3,
buf0, 8, XBLOCK=8, num_warps=1, num_stages=1)
del primals_1
del primals_3
return buf0, reinterpret_tensor(primals_2, (4,), (4,), 0
), reinterpret_tensor(primals_2, (4,), (4,), 1)
class netmodelNew(torch.nn.Module):
def __init__(self):
super(netmodelNew, self).__init__()
self.w0 = Parameter(torch.Tensor(1))
self.w1 = Parameter(torch.Tensor(1))
self.w0.data.uniform_(-1, 1)
self.w1.data.uniform_(-1, 1)
def extract_grad(self):
tot_size = self.count_parameters()
pvec = np.zeros(tot_size, np.float32)
count = 0
for param in self.parameters():
sz = param.grad.data.numpy().flatten().shape[0]
pvec[count:count + sz] = param.grad.data.numpy().flatten()
count += sz
return pvec.copy()
def extract_parameters(self):
tot_size = self.count_parameters()
pvec = np.zeros(tot_size, np.float32)
count = 0
for param in self.parameters():
sz = param.data.numpy().flatten().shape[0]
pvec[count:count + sz] = param.data.numpy().flatten()
count += sz
return pvec.copy()
def inject_parameters(self, pvec):
self.count_parameters()
count = 0
for param in self.parameters():
sz = param.data.numpy().flatten().shape[0]
raw = pvec[count:count + sz]
reshaped = raw.reshape(param.data.numpy().shape)
param.data = torch.from_numpy(reshaped)
count += sz
return pvec
def count_parameters(self):
count = 0
for param in self.parameters():
count += param.data.numpy().flatten().shape[0]
return count
def forward(self, input_0):
primals_1 = self.w0
primals_3 = self.w1
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
uber-common/safemutations
|
netmodel
| false
| 16,634
|
[
"MIT"
] | 91
|
40e5fd03a244f89bf157d4bedf79201e706aedc1
|
https://github.com/uber-common/safemutations/tree/40e5fd03a244f89bf157d4bedf79201e706aedc1
|
DSC_loss
|
import torch
import torch.nn as nn
class DSC_loss(nn.Module):
def __init__(self):
super(DSC_loss, self).__init__()
self.epsilon = 1e-06
return
def forward(self, pred, target):
batch_num = pred.shape[0]
pred = pred.contiguous().view(batch_num, -1)
target = target.contiguous().view(batch_num, -1)
DSC = (2 * (pred * target).sum(1) + self.epsilon) / ((pred + target
).sum(1) + self.epsilon)
return 1 - DSC.sum() / float(batch_num)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tmp0 + tmp1
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp10 = tl.where(xmask, tmp8, 0)
tmp11 = tl.sum(tmp10, 1)[:, None]
tl.store(out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr1 + x0, tmp11, xmask)
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_1(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp5 = tl.load(in_ptr1 + r0, None)
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp3 = 1e-06
tmp4 = tmp2 + tmp3
tmp6 = tmp5 + tmp3
tmp7 = tmp4 / tmp6
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp10 = tl.sum(tmp8, 1)[:, None]
tmp11 = 0.25
tmp12 = tmp10 * tmp11
tmp13 = 1.0
tmp14 = tmp13 - tmp12
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp14, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
get_raw_stream(0)
triton_per_fused_add_mul_sum_0[grid(4)](arg0_1, arg1_1, buf0, buf1,
4, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
triton_per_fused_add_div_mul_rsub_sum_1[grid(1)](buf3, buf0, buf1,
1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf1
return buf3,
class DSC_lossNew(nn.Module):
def __init__(self):
super(DSC_lossNew, self).__init__()
self.epsilon = 1e-06
return
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
twni2016/OrganSegRSTN_PyTorch
|
DSC_loss
| false
| 16,635
|
[
"MIT"
] | 100
|
bf571320e718c8f138e04d48645e3b4dfe75801d
|
https://github.com/twni2016/OrganSegRSTN_PyTorch/tree/bf571320e718c8f138e04d48645e3b4dfe75801d
|
ConformerConvBlock
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
import torch.cuda
class ConformerConvBlock(nn.Module):
def __init__(self, channels, kernel_size, activation=nn.ReLU(), bias=True):
super(ConformerConvBlock, self).__init__()
assert (kernel_size - 1) % 2 == 0
self.pointwise_conv1 = nn.Conv1d(channels, 2 * channels,
kernel_size=1, stride=1, padding=0, bias=bias)
self.depthwise_conv = nn.Conv1d(channels, channels, kernel_size,
stride=1, padding=(kernel_size - 1) // 2, groups=channels, bias
=bias)
self.pointwise_conv2 = nn.Conv1d(channels, channels, kernel_size=1,
stride=1, padding=0, bias=bias)
self.activation = activation
self.reset_parameters()
def reset_parameters(self):
nn.init.kaiming_normal_(self.pointwise_conv1.weight, nonlinearity=
'relu')
nn.init.kaiming_normal_(self.depthwise_conv.weight, nonlinearity='relu'
)
nn.init.kaiming_normal_(self.pointwise_conv2.weight, nonlinearity=
'relu')
nn.init.constant_(self.pointwise_conv1.bias, 0)
nn.init.constant_(self.pointwise_conv2.bias, 0)
nn.init.constant_(self.depthwise_conv.bias, 0)
def forward(self, x):
"""
:param x: [seq_len x bsz x hidden_size]
:return:
"""
x = x.transpose(0, 1).transpose(1, 2)
x = self.pointwise_conv1(x)
x = F.glu(x, dim=1)
x = self.depthwise_conv(x)
x = self.activation(x)
x = self.pointwise_conv2(x)
x = x.transpose(1, 2).transpose(0, 1)
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4, 'kernel_size': 1}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
import torch.cuda
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x1), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 8
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_glu_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 32 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 32 * x1), xmask)
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (8, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (8,), (1,))
assert_size_stride(primals_4, (4, 1, 1), (1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 8, 4), (32, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(128)](buf2, primals_3, 128,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf3 = buf0
del buf0
triton_poi_fused_glu_2[grid(64)](buf2, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=4, bias=None)
assert_size_stride(buf4, (4, 4, 4), (16, 4, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_3[grid(64)](buf5, primals_5, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_5
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 4), (16, 4, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_4[grid(64)](buf7, primals_7, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_7
return reinterpret_tensor(buf7, (4, 4, 4), (1, 16, 4), 0
), primals_2, primals_4, primals_6, reinterpret_tensor(primals_1, (
4, 4, 4), (4, 1, 16), 0), buf2, buf3, buf5
class ConformerConvBlockNew(nn.Module):
def __init__(self, channels, kernel_size, activation=nn.ReLU(), bias=True):
super(ConformerConvBlockNew, self).__init__()
assert (kernel_size - 1) % 2 == 0
self.pointwise_conv1 = nn.Conv1d(channels, 2 * channels,
kernel_size=1, stride=1, padding=0, bias=bias)
self.depthwise_conv = nn.Conv1d(channels, channels, kernel_size,
stride=1, padding=(kernel_size - 1) // 2, groups=channels, bias
=bias)
self.pointwise_conv2 = nn.Conv1d(channels, channels, kernel_size=1,
stride=1, padding=0, bias=bias)
self.activation = activation
self.reset_parameters()
def reset_parameters(self):
nn.init.kaiming_normal_(self.pointwise_conv1.weight, nonlinearity=
'relu')
nn.init.kaiming_normal_(self.depthwise_conv.weight, nonlinearity='relu'
)
nn.init.kaiming_normal_(self.pointwise_conv2.weight, nonlinearity=
'relu')
nn.init.constant_(self.pointwise_conv1.bias, 0)
nn.init.constant_(self.pointwise_conv2.bias, 0)
nn.init.constant_(self.depthwise_conv.bias, 0)
def forward(self, input_0):
primals_2 = self.pointwise_conv1.weight
primals_3 = self.pointwise_conv1.bias
primals_4 = self.depthwise_conv.weight
primals_5 = self.depthwise_conv.bias
primals_6 = self.pointwise_conv2.weight
primals_7 = self.pointwise_conv2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
tuannamnguyen93/NMTGMinor
|
ConformerConvBlock
| false
| 16,636
|
[
"MIT"
] | 75
|
acde3454343bda7060fae541c110d0ad1a8ac4f4
|
https://github.com/tuannamnguyen93/NMTGMinor/tree/acde3454343bda7060fae541c110d0ad1a8ac4f4
|
N2
|
import torch
from typing import Tuple
from abc import ABC
from abc import abstractmethod
from torch import nn
class Regularizer(nn.Module, ABC):
@abstractmethod
def forward(self, factors: 'Tuple[torch.Tensor]'):
pass
class N2(Regularizer):
def __init__(self, weight: 'float'):
super(N2, self).__init__()
self.weight = weight
def forward(self, factors):
norm = 0
for f in factors:
norm += self.weight * torch.sum(torch.norm(f, 2, 1) ** 3)
return norm / factors[0].shape[0]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'weight': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from typing import Tuple
from abc import ABC
from abc import abstractmethod
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_linalg_vector_norm_mul_pow_sum_0(in_out_ptr0,
in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 4
r1 = rindex // 4
tmp0 = tl.load(in_ptr0 + (r0 + 16 * r1), None)
tmp2 = tl.load(in_ptr0 + (4 + r0 + 16 * r1), None)
tmp5 = tl.load(in_ptr0 + (8 + r0 + 16 * r1), None)
tmp8 = tl.load(in_ptr0 + (12 + r0 + 16 * r1), None)
tmp17 = tl.load(in_ptr0 + (64 + r0 + 16 * r1), None)
tmp19 = tl.load(in_ptr0 + (68 + r0 + 16 * r1), None)
tmp22 = tl.load(in_ptr0 + (72 + r0 + 16 * r1), None)
tmp25 = tl.load(in_ptr0 + (76 + r0 + 16 * r1), None)
tmp34 = tl.load(in_ptr0 + (128 + r0 + 16 * r1), None)
tmp36 = tl.load(in_ptr0 + (132 + r0 + 16 * r1), None)
tmp39 = tl.load(in_ptr0 + (136 + r0 + 16 * r1), None)
tmp42 = tl.load(in_ptr0 + (140 + r0 + 16 * r1), None)
tmp51 = tl.load(in_ptr0 + (192 + r0 + 16 * r1), None)
tmp53 = tl.load(in_ptr0 + (196 + r0 + 16 * r1), None)
tmp56 = tl.load(in_ptr0 + (200 + r0 + 16 * r1), None)
tmp59 = tl.load(in_ptr0 + (204 + r0 + 16 * r1), None)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = libdevice.sqrt(tmp10)
tmp12 = tmp11 * tmp11
tmp13 = tmp12 * tmp11
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = tl.sum(tmp14, 1)[:, None]
tmp18 = tmp17 * tmp17
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = libdevice.sqrt(tmp27)
tmp29 = tmp28 * tmp28
tmp30 = tmp29 * tmp28
tmp31 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK])
tmp33 = tl.sum(tmp31, 1)[:, None]
tmp35 = tmp34 * tmp34
tmp37 = tmp36 * tmp36
tmp38 = tmp35 + tmp37
tmp40 = tmp39 * tmp39
tmp41 = tmp38 + tmp40
tmp43 = tmp42 * tmp42
tmp44 = tmp41 + tmp43
tmp45 = libdevice.sqrt(tmp44)
tmp46 = tmp45 * tmp45
tmp47 = tmp46 * tmp45
tmp48 = tl.broadcast_to(tmp47, [XBLOCK, RBLOCK])
tmp50 = tl.sum(tmp48, 1)[:, None]
tmp52 = tmp51 * tmp51
tmp54 = tmp53 * tmp53
tmp55 = tmp52 + tmp54
tmp57 = tmp56 * tmp56
tmp58 = tmp55 + tmp57
tmp60 = tmp59 * tmp59
tmp61 = tmp58 + tmp60
tmp62 = libdevice.sqrt(tmp61)
tmp63 = tmp62 * tmp62
tmp64 = tmp63 * tmp62
tmp65 = tl.broadcast_to(tmp64, [XBLOCK, RBLOCK])
tmp67 = tl.sum(tmp65, 1)[:, None]
tmp68 = 4.0
tmp69 = tmp16 * tmp68
tmp70 = 0.0
tmp71 = tmp69 + tmp70
tmp72 = tmp33 * tmp68
tmp73 = tmp71 + tmp72
tmp74 = tmp50 * tmp68
tmp75 = tmp73 + tmp74
tmp76 = tmp67 * tmp68
tmp77 = tmp75 + tmp76
tmp78 = 0.25
tmp79 = tmp77 * tmp78
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp79, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf4 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_linalg_vector_norm_mul_pow_sum_0[grid(1)](buf4
, arg0_1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf4,
class Regularizer(nn.Module, ABC):
@abstractmethod
def forward(self, factors: 'Tuple[torch.Tensor]'):
pass
class N2New(Regularizer):
def __init__(self, weight: 'float'):
super(N2New, self).__init__()
self.weight = weight
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
uclnlp/cqd
|
N2
| false
| 16,637
|
[
"MIT"
] | 59
|
36148c110f336415250c98873fc27ca847741a78
|
https://github.com/uclnlp/cqd/tree/36148c110f336415250c98873fc27ca847741a78
|
L2LossWithLogit
|
import torch
import torch.utils.data
import torch
from torch import nn
class L2LossWithLogit(nn.Module):
def __init__(self):
super(L2LossWithLogit, self).__init__()
self.mse = nn.MSELoss(reduction='sum')
def forward(self, logits, targets):
p = torch.sigmoid(logits)
return self.mse(p, targets)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
import torch
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mse_loss_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp2 = tl.load(in_ptr1 + r0, None)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 - tmp2
tmp4 = tmp3 * tmp3
tmp5 = tl.broadcast_to(tmp4, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp7, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_mse_loss_sigmoid_0[grid(1)](arg0_1, arg1_1, buf0,
1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class L2LossWithLogitNew(nn.Module):
def __init__(self):
super(L2LossWithLogitNew, self).__init__()
self.mse = nn.MSELoss(reduction='sum')
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
ucas-vg/TinyBenchmark
|
L2LossWithLogit
| false
| 16,638
|
[
"MIT"
] | 495
|
36436df3716d842b6148fb6f6bc7715a2fbdfd92
|
https://github.com/ucas-vg/TinyBenchmark/tree/36436df3716d842b6148fb6f6bc7715a2fbdfd92
|
LayerNorm
|
import torch
import torch.nn as nn
class LayerNorm(nn.Module):
def __init__(self, features, eps=1e-06):
super(LayerNorm, self).__init__()
self.gamma = nn.Parameter(torch.ones(1))
self.beta = nn.Parameter(torch.zeros(1))
self.eps = eps
def forward(self, x):
mean = x.mean(-1).expand_as(x)
std = x.std(-1).expand_as(x)
return self.gamma.expand_as(x) * (x - mean) / (std + self.eps
) + self.beta.expand_as(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'features': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mul_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr2 + 0)
tmp32 = tl.broadcast_to(tmp31, [XBLOCK])
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp9 = tmp7 + tmp8
tmp10 = 4.0
tmp11 = tmp9 / tmp10
tmp12 = tmp2 - tmp11
tmp13 = tmp1 * tmp12
tmp14 = tmp3 - tmp11
tmp15 = tmp14 * tmp14
tmp16 = tmp4 - tmp11
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp6 - tmp11
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp8 - tmp11
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = 3.0
tmp26 = tmp24 / tmp25
tmp27 = libdevice.sqrt(tmp26)
tmp28 = 1e-06
tmp29 = tmp27 + tmp28
tmp30 = tmp13 / tmp29
tmp33 = tmp30 + tmp32
tl.store(out_ptr0 + x2, tmp33, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mul_sub_0[grid(256)](primals_2, primals_1,
primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
del primals_3
return buf0, primals_1
class LayerNormNew(nn.Module):
def __init__(self, features, eps=1e-06):
super(LayerNormNew, self).__init__()
self.gamma = nn.Parameter(torch.ones(1))
self.beta = nn.Parameter(torch.zeros(1))
self.eps = eps
def forward(self, input_0):
primals_2 = self.gamma
primals_3 = self.beta
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
uber-common/safemutations
|
LayerNorm
| false
| 16,639
|
[
"MIT"
] | 91
|
40e5fd03a244f89bf157d4bedf79201e706aedc1
|
https://github.com/uber-common/safemutations/tree/40e5fd03a244f89bf157d4bedf79201e706aedc1
|
HammingLoss
|
import torch
class HammingLoss(torch.nn.Module):
def forward(self, suggested, target):
errors = suggested * (1.0 - target) + (1.0 - suggested) * target
return errors.mean(dim=0).sum()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mean_mul_rsub_sum_0(in_ptr0, in_ptr1, out_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp8 = tl.load(in_ptr0 + (64 + r0), None)
tmp9 = tl.load(in_ptr1 + (64 + r0), None)
tmp16 = tl.load(in_ptr0 + (128 + r0), None)
tmp17 = tl.load(in_ptr1 + (128 + r0), None)
tmp24 = tl.load(in_ptr0 + (192 + r0), None)
tmp25 = tl.load(in_ptr1 + (192 + r0), None)
tmp2 = 1.0
tmp3 = tmp2 - tmp1
tmp4 = tmp0 * tmp3
tmp5 = tmp2 - tmp0
tmp6 = tmp5 * tmp1
tmp7 = tmp4 + tmp6
tmp10 = tmp2 - tmp9
tmp11 = tmp8 * tmp10
tmp12 = tmp2 - tmp8
tmp13 = tmp12 * tmp9
tmp14 = tmp11 + tmp13
tmp15 = tmp7 + tmp14
tmp18 = tmp2 - tmp17
tmp19 = tmp16 * tmp18
tmp20 = tmp2 - tmp16
tmp21 = tmp20 * tmp17
tmp22 = tmp19 + tmp21
tmp23 = tmp15 + tmp22
tmp26 = tmp2 - tmp25
tmp27 = tmp24 * tmp26
tmp28 = tmp2 - tmp24
tmp29 = tmp28 * tmp25
tmp30 = tmp27 + tmp29
tmp31 = tmp23 + tmp30
tmp32 = 4.0
tmp33 = tmp31 / tmp32
tmp34 = tl.broadcast_to(tmp33, [XBLOCK, RBLOCK])
tmp36 = tl.sum(tmp34, 1)[:, None]
tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp36, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_add_mean_mul_rsub_sum_0[grid(1)](arg1_1, arg0_1,
buf1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class HammingLossNew(torch.nn.Module):
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
uclnlp/torch-imle
|
HammingLoss
| false
| 16,640
|
[
"MIT"
] | 205
|
f595cd8d527466f6b5db79276f6ceee01d100a1c
|
https://github.com/uclnlp/torch-imle/tree/f595cd8d527466f6b5db79276f6ceee01d100a1c
|
FCGenerator
|
from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
import torch.nn.functional as F
class FCGenerator(nn.Module):
def __init__(self, options):
"""
The fully connected generator is initialized by creating a chain of
fully connected layers that perform transformations
d -> 2 * d -> ... -> 2^(k - 1) * d -> n * n
where
d = options.state_size
k = options.generator_layers
n = options.image_size
"""
super(FCGenerator, self).__init__()
self.dropout = options.generator_dropout
self.layers = options.generator_layers
sizes = []
size = options.state_size
for i in range(options.generator_layers):
sizes.append(size)
size *= 2
sizes.append(options.image_size * options.image_size)
for i in range(options.generator_layers):
layer = nn.Linear(sizes[i], sizes[i + 1])
self.add_module(f'linear_{i}', layer)
def forward(self, x):
layers = {}
for name, module in self.named_children():
layers[name] = module
for i in range(self.layers):
layer = layers[f'linear_{i}']
x = layer(x)
if i < self.layers - 1:
x = F.leaky_relu(x, 0.2)
if self.dropout is not None:
x = F.dropout(x, self.dropout)
return torch.tanh(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'options': _mock_config(generator_dropout=0.5,
generator_layers=1, state_size=4, image_size=4)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (16, 4), (4, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 16), (256, 64, 16, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(1024)](buf1, primals_2, 1024, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_2
return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1
class FCGeneratorNew(nn.Module):
def __init__(self, options):
"""
The fully connected generator is initialized by creating a chain of
fully connected layers that perform transformations
d -> 2 * d -> ... -> 2^(k - 1) * d -> n * n
where
d = options.state_size
k = options.generator_layers
n = options.image_size
"""
super(FCGeneratorNew, self).__init__()
self.dropout = options.generator_dropout
self.layers = options.generator_layers
sizes = []
size = options.state_size
for i in range(options.generator_layers):
sizes.append(size)
size *= 2
sizes.append(options.image_size * options.image_size)
for i in range(options.generator_layers):
layer = nn.Linear(sizes[i], sizes[i + 1])
self.add_module(f'linear_{i}', layer)
def forward(self, input_0):
primals_1 = self.linear_0.weight
primals_2 = self.linear_0.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
unicredit/ganzo
|
FCGenerator
| false
| 16,641
|
[
"Apache-2.0"
] | 73
|
fb1d270f5091073e8f27da76ab508ab24e5d40e9
|
https://github.com/unicredit/ganzo/tree/fb1d270f5091073e8f27da76ab508ab24e5d40e9
|
FusedDownsample
|
import torch
import torch.nn.functional as F
from torch import nn
from math import sqrt
class FusedDownsample(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, padding=0):
super().__init__()
weight = torch.randn(out_channel, in_channel, kernel_size, kernel_size)
bias = torch.zeros(out_channel)
fan_in = in_channel * kernel_size * kernel_size
self.multiplier = sqrt(2 / fan_in)
self.weight = nn.Parameter(weight)
self.bias = nn.Parameter(bias)
self.pad = padding
def forward(self, input):
weight = F.pad(self.weight * self.multiplier, [1, 1, 1, 1])
weight = (weight[:, :, 1:, 1:] + weight[:, :, :-1, 1:] + weight[:,
:, 1:, :-1] + weight[:, :, :-1, :-1]) / 4
out = F.conv2d(input, weight, self.bias, stride=2, padding=self.pad)
return out
def get_inputs():
return [torch.rand([4, 4, 64, 64])]
def get_init_inputs():
return [[], {'in_channel': 4, 'out_channel': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
from math import sqrt
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 5 % 5
x0 = xindex % 5
x2 = xindex // 25
x4 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp10 & xmask, other=0.0
)
tmp12 = 0.1767766952966369
tmp13 = tmp11 * tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp10, tmp13, tmp14)
tmp16 = -1 + x1
tmp17 = tmp16 >= tmp1
tmp18 = tmp16 < tmp3
tmp19 = tmp17 & tmp18
tmp20 = tmp19 & tmp6
tmp21 = tmp20 & tmp7
tmp22 = tl.load(in_ptr0 + (-4 + x0 + 4 * x1 + 16 * x2), tmp21 & xmask,
other=0.0)
tmp23 = tmp22 * tmp12
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp21, tmp23, tmp24)
tmp26 = tmp15 + tmp25
tmp27 = -1 + x0
tmp28 = tmp27 >= tmp1
tmp29 = tmp27 < tmp3
tmp30 = tmp8 & tmp28
tmp31 = tmp30 & tmp29
tmp32 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1 + 16 * x2), tmp31 & xmask,
other=0.0)
tmp33 = tmp32 * tmp12
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp31, tmp33, tmp34)
tmp36 = tmp26 + tmp35
tmp37 = tmp19 & tmp28
tmp38 = tmp37 & tmp29
tmp39 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp38 & xmask,
other=0.0)
tmp40 = tmp39 * tmp12
tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype)
tmp42 = tl.where(tmp38, tmp40, tmp41)
tmp43 = tmp36 + tmp42
tmp44 = 0.25
tmp45 = tmp43 * tmp44
tl.store(in_out_ptr0 + x4, tmp45, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 14400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 900 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_add_div_0[grid(400)](buf1, primals_1, 400, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_1
buf2 = extern_kernels.convolution(primals_3, buf1, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 30, 30), (3600, 900, 30, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_1[grid(14400)](buf3, primals_2, 14400,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
return buf3, primals_3, buf1
class FusedDownsampleNew(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, padding=0):
super().__init__()
weight = torch.randn(out_channel, in_channel, kernel_size, kernel_size)
bias = torch.zeros(out_channel)
fan_in = in_channel * kernel_size * kernel_size
self.multiplier = sqrt(2 / fan_in)
self.weight = nn.Parameter(weight)
self.bias = nn.Parameter(bias)
self.pad = padding
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
uzielroy/StyleGan_FewShot
|
FusedDownsample
| false
| 16,642
|
[
"MIT"
] | 76
|
94e4c49dbf39d1c6299f33787afb3e471ece11e3
|
https://github.com/uzielroy/StyleGan_FewShot/tree/94e4c49dbf39d1c6299f33787afb3e471ece11e3
|
L1Loss
|
import torch
import torch.nn.functional as F
import torch.onnx
class L1Loss(torch.nn.Module):
"""
L1 loss
"""
def __init__(self, **kwargs):
super(L1Loss, self).__init__()
self.loss_w = kwargs.get('loss_weight', 1)
def forward(self, preds, gts):
return F.l1_loss(preds.view(-1), gts.view(-1)) * self.loss_w
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_mean_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 256.0
tmp8 = tmp6 / tmp7
tmp9 = 1.0
tmp10 = tmp8 * tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_abs_mean_mul_sub_0[grid(1)](buf1, arg0_1, arg1_1,
1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class L1LossNew(torch.nn.Module):
"""
L1 loss
"""
def __init__(self, **kwargs):
super(L1LossNew, self).__init__()
self.loss_w = kwargs.get('loss_weight', 1)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
usutdzxych/CenseoQoE
|
L1Loss
| false
| 16,643
|
[
"BSD-3-Clause"
] | 75
|
3f653296b223da6190e1e1781e7b9b54ff877102
|
https://github.com/usutdzxych/CenseoQoE/tree/3f653296b223da6190e1e1781e7b9b54ff877102
|
Linear
|
import torch
import torch.nn.functional as F
import torch.nn as nn
class Linear(nn.Linear):
def forward(self, x):
weight = self.weight
weight_mean = weight.mean(dim=1, keepdim=True)
weight = weight - weight_mean
std = weight.std(dim=1, keepdim=True) + 1e-05
weight = weight / std.expand_as(weight)
return F.linear(x, weight, self.bias)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mean_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp1 - tmp9
tmp11 = tmp10 * tmp10
tmp12 = tmp2 - tmp9
tmp13 = tmp12 * tmp12
tmp14 = tmp11 + tmp13
tmp15 = tmp4 - tmp9
tmp16 = tmp15 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = tmp6 - tmp9
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp21 = 3.0
tmp22 = tmp20 / tmp21
tmp23 = libdevice.sqrt(tmp22)
tmp24 = 1e-05
tmp25 = tmp23 + tmp24
tmp26 = tmp0 / tmp25
tl.store(out_ptr0 + x2, tmp26, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_sub_0[grid(16)](primals_1, buf0, 16, XBLOCK=
16, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_div_1[grid(16)](buf0, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf0
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(buf1, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del buf1
del primals_2
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0)
class LinearNew(nn.Linear):
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
untitled-ai/self_supervised
|
Linear
| false
| 16,644
|
[
"MIT"
] | 370
|
6d14ca0402ecc13feda9b3a9fdc056fd1ac24473
|
https://github.com/untitled-ai/self_supervised/tree/6d14ca0402ecc13feda9b3a9fdc056fd1ac24473
|
FusedUpsample
|
import torch
import torch.nn.functional as F
from torch import nn
from math import sqrt
class FusedUpsample(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, padding=0):
super().__init__()
weight = torch.randn(in_channel, out_channel, kernel_size, kernel_size)
bias = torch.zeros(out_channel)
fan_in = in_channel * kernel_size * kernel_size
self.multiplier = sqrt(2 / fan_in)
self.weight = nn.Parameter(weight)
self.bias = nn.Parameter(bias)
self.pad = padding
def forward(self, input):
weight = F.pad(self.weight * self.multiplier, [1, 1, 1, 1])
weight = (weight[:, :, 1:, 1:] + weight[:, :, :-1, 1:] + weight[:,
:, 1:, :-1] + weight[:, :, :-1, :-1]) / 4
out = F.conv_transpose2d(input, weight, self.bias, stride=2,
padding=self.pad)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channel': 4, 'out_channel': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
from math import sqrt
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 5 % 5
x0 = xindex % 5
x2 = xindex // 25
x4 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp10 & xmask, other=0.0
)
tmp12 = 0.1767766952966369
tmp13 = tmp11 * tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp10, tmp13, tmp14)
tmp16 = -1 + x1
tmp17 = tmp16 >= tmp1
tmp18 = tmp16 < tmp3
tmp19 = tmp17 & tmp18
tmp20 = tmp19 & tmp6
tmp21 = tmp20 & tmp7
tmp22 = tl.load(in_ptr0 + (-4 + x0 + 4 * x1 + 16 * x2), tmp21 & xmask,
other=0.0)
tmp23 = tmp22 * tmp12
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp21, tmp23, tmp24)
tmp26 = tmp15 + tmp25
tmp27 = -1 + x0
tmp28 = tmp27 >= tmp1
tmp29 = tmp27 < tmp3
tmp30 = tmp8 & tmp28
tmp31 = tmp30 & tmp29
tmp32 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1 + 16 * x2), tmp31 & xmask,
other=0.0)
tmp33 = tmp32 * tmp12
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp31, tmp33, tmp34)
tmp36 = tmp26 + tmp35
tmp37 = tmp19 & tmp28
tmp38 = tmp37 & tmp29
tmp39 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp38 & xmask,
other=0.0)
tmp40 = tmp39 * tmp12
tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype)
tmp42 = tl.where(tmp38, tmp40, tmp41)
tmp43 = tmp36 + tmp42
tmp44 = 0.25
tmp45 = tmp43 * tmp44
tl.store(in_out_ptr0 + x4, tmp45, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 1936
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 121 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_add_div_0[grid(400)](buf1, primals_1, 400, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_1
buf2 = extern_kernels.convolution(primals_3, buf1, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 11, 11), (484, 121, 11, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_1[grid(1936)](buf3, primals_2, 1936,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
return buf3, primals_3, buf1
class FusedUpsampleNew(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, padding=0):
super().__init__()
weight = torch.randn(in_channel, out_channel, kernel_size, kernel_size)
bias = torch.zeros(out_channel)
fan_in = in_channel * kernel_size * kernel_size
self.multiplier = sqrt(2 / fan_in)
self.weight = nn.Parameter(weight)
self.bias = nn.Parameter(bias)
self.pad = padding
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
uzielroy/StyleGan_FewShot
|
FusedUpsample
| false
| 16,645
|
[
"MIT"
] | 76
|
94e4c49dbf39d1c6299f33787afb3e471ece11e3
|
https://github.com/uzielroy/StyleGan_FewShot/tree/94e4c49dbf39d1c6299f33787afb3e471ece11e3
|
AlbertAttentionWithoutSkipConnection
|
from _paritybench_helpers import _mock_config
import math
import torch
import torch.utils.checkpoint
from torch import nn
class AlbertAttentionWithoutSkipConnection(nn.Module):
def __init__(self, config):
super().__init__()
if (config.hidden_size % config.num_attention_heads != 0 and not
hasattr(config, 'embedding_size')):
raise ValueError(
'The hidden size (%d) is not a multiple of the number of attention heads (%d)'
% (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.hidden_size = config.hidden_size
self.attention_head_size = (config.hidden_size // config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.attention_dropout = nn.Dropout(config.attention_probs_dropout_prob
)
self.output_dropout = nn.Dropout(config.hidden_dropout_prob)
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.
layer_norm_eps)
self.pruned_heads = set()
self.position_embedding_type = getattr(config,
'position_embedding_type', 'absolute')
if (self.position_embedding_type == 'relative_key' or self.
position_embedding_type == 'relative_key_query'):
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.
max_position_embeddings - 1, self.attention_head_size)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask=None, head_mask=None,
output_attentions=False):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1,
-2))
attention_scores = attention_scores / math.sqrt(self.
attention_head_size)
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
if (self.position_embedding_type == 'relative_key' or self.
position_embedding_type == 'relative_key_query'):
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long,
device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long,
device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.
max_position_embeddings - 1)
positional_embedding = positional_embedding
if self.position_embedding_type == 'relative_key':
relative_position_scores = torch.einsum('bhld,lrd->bhlr',
query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == 'relative_key_query':
relative_position_scores_query = torch.einsum('bhld,lrd->bhlr',
query_layer, positional_embedding)
relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr',
key_layer, positional_embedding)
attention_scores = (attention_scores +
relative_position_scores_query +
relative_position_scores_key)
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.attention_dropout(attention_probs)
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
w = self.dense.weight.t().view(self.num_attention_heads, self.
attention_head_size, self.hidden_size)
b = self.dense.bias
projected_context_layer = torch.einsum('bfnd,ndh->bfh',
context_layer, w) + b
projected_context_layer_dropout = self.output_dropout(
projected_context_layer)
layernormed_context_layer = self.LayerNorm(
projected_context_layer_dropout)
return (layernormed_context_layer, attention_probs
) if output_attentions else (layernormed_context_layer,)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, num_attention_heads=
4, attention_probs_dropout_prob=0.5, hidden_dropout_prob=0.5,
layer_norm_eps=1, position_embedding_type=4)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.utils.checkpoint
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr1 + x2, xmask)
tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = float('-inf')
tmp2 = tmp0 == tmp1
tmp3 = tmp2 == 0
tmp4 = tmp3.to(tl.int64)
tmp5 = tmp4 != 0
tmp7 = tmp6 == tmp1
tmp8 = tmp7 == 0
tmp9 = tmp8.to(tl.int64)
tmp10 = tmp9 != 0
tmp11 = tmp5 | tmp10
tmp13 = tmp12 == tmp1
tmp14 = tmp13 == 0
tmp15 = tmp14.to(tl.int64)
tmp16 = tmp15 != 0
tmp17 = tmp11 | tmp16
tmp19 = tmp18 == tmp1
tmp20 = tmp19 == 0
tmp21 = tmp20.to(tl.int64)
tmp22 = tmp21 != 0
tmp23 = tmp17 | tmp22
tmp24 = tmp23 == 0
tmp28 = tmp26 + tmp27
tmp30 = tmp28 + tmp29
tmp32 = tmp30 + tmp31
tmp33 = tmp25 / tmp32
tmp34 = 0.0
tmp35 = tl.where(tmp24, tmp34, tmp33)
tl.store(out_ptr0 + x2, tmp35, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + 1)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + 2)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp14 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr1 + 3)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp7 = tmp4 + tmp6
tmp8 = tmp3 + tmp7
tmp12 = tmp9 + tmp11
tmp13 = tmp8 + tmp12
tmp17 = tmp14 + tmp16
tmp18 = tmp13 + tmp17
tmp19 = 4.0
tmp20 = tmp18 / tmp19
tmp21 = tmp3 - tmp20
tmp22 = tmp21 * tmp21
tmp23 = tmp7 - tmp20
tmp24 = tmp23 * tmp23
tmp25 = tmp22 + tmp24
tmp26 = tmp12 - tmp20
tmp27 = tmp26 * tmp26
tmp28 = tmp25 + tmp27
tmp29 = tmp17 - tmp20
tmp30 = tmp29 * tmp29
tmp31 = tmp28 + tmp30
tmp32 = tmp31 / tmp19
tl.store(out_ptr0 + x0, tmp20, xmask)
tl.store(out_ptr1 + x0, tmp32, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_2
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf0
triton_poi_fused_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_1[grid(256)](buf5, buf6, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_2[grid(256)](buf5, buf6, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf5
del buf6
buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf1
triton_poi_fused_3[grid(16, 4)](buf2, primals_7, buf8, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_7
buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_4[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf11 = reinterpret_tensor(buf9, (1, 16, 4), (64, 4, 1), 0)
del buf9
extern_kernels.bmm(reinterpret_tensor(buf10, (1, 16, 4), (0, 4, 1),
0), reinterpret_tensor(primals_8, (1, 4, 4), (0, 1, 4), 0), out
=buf11)
buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_native_layer_norm_5[grid(16)](buf11, primals_9,
buf12, buf13, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_6[grid(64)](buf11, primals_9,
buf12, buf13, primals_10, primals_11, buf14, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf12
del buf13
del primals_11
return buf14, primals_9, primals_10, reinterpret_tensor(primals_3, (16,
4), (4, 1), 0), buf7, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0
), buf11, reinterpret_tensor(buf10, (1, 4, 16), (64, 1, 4), 0
), reinterpret_tensor(primals_8, (1, 4, 4), (4, 4, 1), 0)
class AlbertAttentionWithoutSkipConnectionNew(nn.Module):
def __init__(self, config):
super().__init__()
if (config.hidden_size % config.num_attention_heads != 0 and not
hasattr(config, 'embedding_size')):
raise ValueError(
'The hidden size (%d) is not a multiple of the number of attention heads (%d)'
% (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.hidden_size = config.hidden_size
self.attention_head_size = (config.hidden_size // config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.attention_dropout = nn.Dropout(config.attention_probs_dropout_prob
)
self.output_dropout = nn.Dropout(config.hidden_dropout_prob)
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.
layer_norm_eps)
self.pruned_heads = set()
self.position_embedding_type = getattr(config,
'position_embedding_type', 'absolute')
if (self.position_embedding_type == 'relative_key' or self.
position_embedding_type == 'relative_key_query'):
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.
max_position_embeddings - 1, self.attention_head_size)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, input_0):
primals_1 = self.query.weight
primals_2 = self.query.bias
primals_4 = self.key.weight
primals_5 = self.key.bias
primals_6 = self.value.weight
primals_7 = self.value.bias
primals_8 = self.dense.weight
primals_9 = self.dense.bias
primals_10 = self.LayerNorm.weight
primals_11 = self.LayerNorm.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
twistedcubic/attention-rank-collapse
|
AlbertAttentionWithoutSkipConnection
| false
| 16,646
|
[
"Apache-2.0"
] | 118
|
38b5df6dc2add25f6d945e48a6baf96862368c20
|
https://github.com/twistedcubic/attention-rank-collapse/tree/38b5df6dc2add25f6d945e48a6baf96862368c20
|
FCDiscriminator
|
from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
import torch.nn.functional as F
class FCDiscriminator(nn.Module):
def __init__(self, options):
"""
The fully connected generator is initialized by creating a chain of
fully connected layers that perform transformations
n * n -> 2^(k - 1) * d -> ... -> 2 * d -> 1
where
d = options.state_size
k = options.discriminator_layers
n = options.image_size
"""
super(FCDiscriminator, self).__init__()
self.dropout = options.discriminator_dropout
self.layers = options.discriminator_layers
sizes = [options.image_size * options.image_size]
size = options.state_size * 2 ** (options.discriminator_layers - 1)
for i in range(options.discriminator_layers - 1):
sizes.append(size)
size //= 2
sizes.append(1)
for i in range(options.discriminator_layers):
layer = nn.Linear(sizes[i], sizes[i + 1])
self.add_module(f'linear_{i}', layer)
def forward(self, x):
batch_size = x.size()[0]
x = x.view(batch_size, -1)
layers = {}
for name, module in self.named_children():
layers[name] = module
for i in range(self.layers):
layer = layers[f'linear_{i}']
x = layer(x)
if i < self.layers - 1:
x = F.leaky_relu(x, 0.2)
if self.dropout is not None:
x = F.dropout(x, self.dropout)
return torch.sigmoid(x)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'options': _mock_config(discriminator_dropout=0.5,
discriminator_layers=1, image_size=4, state_size=4)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (1, 16), (16, 1))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 16), (16, 1), 0
), reinterpret_tensor(primals_2, (16, 1), (1, 16), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_sigmoid_0[grid(4)](buf1, primals_3, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_3
return buf1, reinterpret_tensor(primals_1, (4, 16), (16, 1), 0), buf1
class FCDiscriminatorNew(nn.Module):
def __init__(self, options):
"""
The fully connected generator is initialized by creating a chain of
fully connected layers that perform transformations
n * n -> 2^(k - 1) * d -> ... -> 2 * d -> 1
where
d = options.state_size
k = options.discriminator_layers
n = options.image_size
"""
super(FCDiscriminatorNew, self).__init__()
self.dropout = options.discriminator_dropout
self.layers = options.discriminator_layers
sizes = [options.image_size * options.image_size]
size = options.state_size * 2 ** (options.discriminator_layers - 1)
for i in range(options.discriminator_layers - 1):
sizes.append(size)
size //= 2
sizes.append(1)
for i in range(options.discriminator_layers):
layer = nn.Linear(sizes[i], sizes[i + 1])
self.add_module(f'linear_{i}', layer)
def forward(self, input_0):
primals_2 = self.linear_0.weight
primals_3 = self.linear_0.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
unicredit/ganzo
|
FCDiscriminator
| false
| 16,647
|
[
"Apache-2.0"
] | 73
|
fb1d270f5091073e8f27da76ab508ab24e5d40e9
|
https://github.com/unicredit/ganzo/tree/fb1d270f5091073e8f27da76ab508ab24e5d40e9
|
Highway
|
import torch
from torch import nn
import torch.utils.data
class Highway(nn.Module):
def __init__(self, input_dim, dropout):
super(Highway, self).__init__()
self.input_linear = nn.Linear(input_dim, input_dim)
self.relu = nn.ReLU()
self.gate_linear = nn.Linear(input_dim, input_dim)
self.sigmoid = nn.Sigmoid()
self.dropout = nn.Dropout(dropout)
def forward(self, input_):
input_ = self.dropout(input_)
output = self.relu(self.input_linear(input_))
gate = self.sigmoid(self.gate_linear(input_))
output = input_ * gate + output * (1.0 - gate)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_relu_rsub_sigmoid_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask)
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = 1.0
tmp8 = tmp7 - tmp2
tmp9 = tmp6 * tmp8
tmp10 = tmp3 + tmp9
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64,
4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_relu_rsub_sigmoid_0[grid(256)](primals_1,
buf1, buf0, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
return buf2, primals_1, buf0, buf1
class HighwayNew(nn.Module):
def __init__(self, input_dim, dropout):
super(HighwayNew, self).__init__()
self.input_linear = nn.Linear(input_dim, input_dim)
self.relu = nn.ReLU()
self.gate_linear = nn.Linear(input_dim, input_dim)
self.sigmoid = nn.Sigmoid()
self.dropout = nn.Dropout(dropout)
def forward(self, input_0):
primals_2 = self.input_linear.weight
primals_3 = self.input_linear.bias
primals_4 = self.gate_linear.weight
primals_5 = self.gate_linear.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
uwnlp/piqa
|
Highway
| false
| 16,648
|
[
"Apache-2.0"
] | 89
|
e18f2189c93965c94655d5cc943dcecdc2c1ea57
|
https://github.com/uwnlp/piqa/tree/e18f2189c93965c94655d5cc943dcecdc2c1ea57
|
Router
|
from torch.nn import Module
import torch
from torch import nn
import torch.utils.data
import torch.nn.functional
import torch.autograd
class Squash(Module):
'\n ## Squash\n\n This is **squashing** function from paper, given by equation $(1)$.\n\n $$\\mathbf{v}_j = \x0crac{{\\lVert \\mathbf{s}_j \rVert}^2}{1 + {\\lVert \\mathbf{s}_j \rVert}^2}\n \x0crac{\\mathbf{s}_j}{\\lVert \\mathbf{s}_j \rVert}$$\n\n $\x0crac{\\mathbf{s}_j}{\\lVert \\mathbf{s}_j \rVert}$\n normalizes the length of all the capsules, whilst\n $\x0crac{{\\lVert \\mathbf{s}_j \rVert}^2}{1 + {\\lVert \\mathbf{s}_j \rVert}^2}$\n shrinks the capsules that have a length smaller than one .\n '
def __init__(self, epsilon=1e-08):
super().__init__()
self.epsilon = epsilon
def forward(self, s: 'torch.Tensor'):
"""
The shape of `s` is `[batch_size, n_capsules, n_features]`
"""
s2 = (s ** 2).sum(dim=-1, keepdims=True)
return s2 / (1 + s2) * (s / torch.sqrt(s2 + self.epsilon))
class Router(Module):
"""
## Routing Algorithm
This is the routing mechanism described in the paper.
You can use multiple routing layers in your models.
This combines calculating $\\mathbf{s}_j$ for this layer and
the routing algorithm described in *Procedure 1*.
"""
def __init__(self, in_caps: 'int', out_caps: 'int', in_d: 'int', out_d:
'int', iterations: 'int'):
"""
`in_caps` is the number of capsules, and `in_d` is the number of features per capsule from the layer below.
`out_caps` and `out_d` are the same for this layer.
`iterations` is the number of routing iterations, symbolized by $r$ in the paper.
"""
super().__init__()
self.in_caps = in_caps
self.out_caps = out_caps
self.iterations = iterations
self.softmax = nn.Softmax(dim=1)
self.squash = Squash()
self.weight = nn.Parameter(torch.randn(in_caps, out_caps, in_d,
out_d), requires_grad=True)
def forward(self, u: 'torch.Tensor'):
"""
The shape of `u` is `[batch_size, n_capsules, n_features]`.
These are the capsules from the lower layer.
"""
u_hat = torch.einsum('ijnm,bin->bijm', self.weight, u)
b = u.new_zeros(u.shape[0], self.in_caps, self.out_caps)
v = None
for i in range(self.iterations):
c = self.softmax(b)
s = torch.einsum('bij,bijm->bjm', c, u_hat)
v = self.squash(s)
a = torch.einsum('bjm,bijm->bij', v, u_hat)
b = b + a
return v
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_caps': 4, 'out_caps': 4, 'in_d': 4, 'out_d': 4,
'iterations': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch.nn import Module
from torch import nn
import torch.utils.data
import torch.nn.functional
import torch.autograd
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_1(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tmp1 = tl_math.exp(tmp0)
tmp2 = tmp1 + tmp1
tmp3 = tmp2 + tmp1
tmp4 = tmp3 + tmp1
tmp5 = tmp1 / tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 64
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex % 4
x2 = xindex // 4 % 4
x3 = xindex // 16
y0 = yindex
x4 = xindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1 + 16 * x3 + 64 * x2), xmask &
ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x4 + 64 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_div_mul_pow_sqrt_sum_3(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp10 / tmp12
tmp15 = 1e-08
tmp16 = tmp10 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tmp18 = tmp14 / tmp17
tmp19 = tmp13 * tmp18
tl.store(out_ptr0 + x2, tmp19, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 64
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex % 4
x2 = xindex // 4
y0 = yindex
x3 = xindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * x1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x3 + 64 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2 + 4 * y3), tmp8, xmask & ymask)
@triton.jit
def triton_poi_fused_bmm_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4 * x1 + 16 * (x0 // 4) + x0 % 4), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused__softmax_add_8(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = triton_helpers.maximum(tmp2, tmp5)
tmp9 = tmp7 + tmp8
tmp10 = triton_helpers.maximum(tmp6, tmp9)
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp10, tmp13)
tmp15 = tmp2 - tmp14
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp5 - tmp14
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp16 + tmp18
tmp20 = tmp9 - tmp14
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp23 = tmp13 - tmp14
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tl.store(out_ptr0 + x0, tmp14, xmask)
tl.store(out_ptr1 + x0, tmp25, xmask)
@triton.jit
def triton_poi_fused__softmax_add_clone_9(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp7 = tmp5 / tmp6
tl.store(out_ptr0 + x2, tmp7, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused__softmax_add_10(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp7 = tmp5 + tmp6
tmp9 = tmp7 + tmp8
tmp10 = triton_helpers.maximum(tmp4, tmp9)
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = triton_helpers.maximum(tmp10, tmp15)
tmp19 = tmp17 + tmp18
tmp21 = tmp19 + tmp20
tmp22 = triton_helpers.maximum(tmp16, tmp21)
tmp23 = tmp4 - tmp22
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp9 - tmp22
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp24 + tmp26
tmp28 = tmp15 - tmp22
tmp29 = tl_math.exp(tmp28)
tmp30 = tmp27 + tmp29
tmp31 = tmp21 - tmp22
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp30 + tmp32
tl.store(out_ptr0 + x0, tmp22, xmask)
tl.store(out_ptr1 + x0, tmp33, xmask)
@triton.jit
def triton_poi_fused__softmax_add_clone_11(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp5 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp7 / tmp8
tl.store(in_out_ptr0 + x2, tmp9, xmask)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_transpose_12(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (4 * x1 + 16 * (y0 // 4) + y0 % 4), xmask &
ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch
.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64, 4)](primals_1, buf0, 64, 4,
XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 16, 4), (64, 4, 1),
0), reinterpret_tensor(primals_2, (4, 4, 4), (4, 1, 16), 0),
out=buf1)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_1[grid(64)](buf2, 64, XBLOCK=64, num_warps=1,
num_stages=1)
buf3 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused_clone_2[grid(4, 64)](buf1, buf3, 4, 64, XBLOCK=32,
YBLOCK=4, num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf2, (16, 1, 4), (4, 0, 1),
0), reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_div_mul_pow_sqrt_sum_3[grid(64)](buf4, buf5,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_4[grid(4, 64)](buf1, buf6, 4, 64, XBLOCK=32,
YBLOCK=4, num_warps=4, num_stages=1)
del buf1
buf7 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1),
0), reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf7, buf8, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_6[grid(16, 4)](buf8, buf9, 16, 4, XBLOCK=
4, YBLOCK=16, num_warps=1, num_stages=1)
buf10 = reinterpret_tensor(buf8, (16, 1, 4), (1, 64, 16), 0)
del buf8
triton_poi_fused_bmm_7[grid(64)](buf9, buf10, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf11 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(buf10, reinterpret_tensor(buf3, (16, 4, 4), (16,
4, 1), 0), out=buf11)
buf12 = reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0)
del buf10
triton_poi_fused_add_div_mul_pow_sqrt_sum_3[grid(64)](buf11, buf12,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf13 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf12, (16, 1, 4), (4, 0, 1),
0), reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), out=buf13)
buf14 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf15 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
triton_poi_fused__softmax_add_8[grid(16)](buf7, buf13, buf14, buf15,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf16 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
buf17 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused__softmax_add_clone_9[grid(64)](buf7, buf13, buf14,
buf15, buf16, buf17, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf18 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf17, (16, 1, 4), (4, 0, 1),
0), reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), out=buf18)
buf19 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_div_mul_pow_sqrt_sum_3[grid(64)](buf18, buf19,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf20 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf19, (16, 1, 4), (4, 0, 1),
0), reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), out=buf20)
buf21 = buf15
del buf15
buf22 = buf14
del buf14
triton_poi_fused__softmax_add_10[grid(16)](buf7, buf13, buf20,
buf21, buf22, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf23 = reinterpret_tensor(buf13, (4, 4, 4), (16, 1, 4), 0)
del buf13
buf24 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused__softmax_add_clone_11[grid(64)](buf23, buf7, buf20,
buf21, buf22, buf24, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf21
del buf22
buf25 = buf20
del buf20
extern_kernels.bmm(reinterpret_tensor(buf24, (16, 1, 4), (4, 0, 1),
0), reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), out=buf25)
buf26 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_div_mul_pow_sqrt_sum_3[grid(64)](buf25, buf26,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf27 = empty_strided_cuda((16, 4, 1), (4, 1, 4), torch.float32)
triton_poi_fused_transpose_12[grid(16, 4)](buf9, buf27, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
del buf9
return (buf26, buf4, buf7, buf11, buf16, buf18, buf23, buf25,
reinterpret_tensor(buf24, (16, 4, 1), (4, 1, 4), 0),
reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0),
reinterpret_tensor(buf19, (16, 4, 1), (4, 1, 4), 0),
reinterpret_tensor(buf6, (16, 4, 4), (16, 1, 4), 0),
reinterpret_tensor(buf17, (16, 4, 1), (4, 1, 4), 0),
reinterpret_tensor(buf12, (16, 4, 1), (4, 1, 4), 0), buf27,
reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0),
reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 4), 0),
reinterpret_tensor(primals_2, (4, 4, 4), (4, 16, 1), 0))
class Squash(Module):
'\n ## Squash\n\n This is **squashing** function from paper, given by equation $(1)$.\n\n $$\\mathbf{v}_j = \x0crac{{\\lVert \\mathbf{s}_j \rVert}^2}{1 + {\\lVert \\mathbf{s}_j \rVert}^2}\n \x0crac{\\mathbf{s}_j}{\\lVert \\mathbf{s}_j \rVert}$$\n\n $\x0crac{\\mathbf{s}_j}{\\lVert \\mathbf{s}_j \rVert}$\n normalizes the length of all the capsules, whilst\n $\x0crac{{\\lVert \\mathbf{s}_j \rVert}^2}{1 + {\\lVert \\mathbf{s}_j \rVert}^2}$\n shrinks the capsules that have a length smaller than one .\n '
def __init__(self, epsilon=1e-08):
super().__init__()
self.epsilon = epsilon
def forward(self, s: 'torch.Tensor'):
"""
The shape of `s` is `[batch_size, n_capsules, n_features]`
"""
s2 = (s ** 2).sum(dim=-1, keepdims=True)
return s2 / (1 + s2) * (s / torch.sqrt(s2 + self.epsilon))
class RouterNew(Module):
"""
## Routing Algorithm
This is the routing mechanism described in the paper.
You can use multiple routing layers in your models.
This combines calculating $\\mathbf{s}_j$ for this layer and
the routing algorithm described in *Procedure 1*.
"""
def __init__(self, in_caps: 'int', out_caps: 'int', in_d: 'int', out_d:
'int', iterations: 'int'):
"""
`in_caps` is the number of capsules, and `in_d` is the number of features per capsule from the layer below.
`out_caps` and `out_d` are the same for this layer.
`iterations` is the number of routing iterations, symbolized by $r$ in the paper.
"""
super().__init__()
self.in_caps = in_caps
self.out_caps = out_caps
self.iterations = iterations
self.softmax = nn.Softmax(dim=1)
self.squash = Squash()
self.weight = nn.Parameter(torch.randn(in_caps, out_caps, in_d,
out_d), requires_grad=True)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
techthiyanes/annotated_deep_learning_paper_implementations
|
Router
| false
| 16,649
|
[
"MIT"
] | 3,714
|
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
|
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
|
NoiseInjection
|
import torch
from torch import nn
class NoiseInjection(nn.Module):
def __init__(self, channel):
super().__init__()
self.weight = nn.Parameter(torch.zeros(1, channel, 1, 1))
def forward(self, image, noise):
return image + self.weight * noise
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channel': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x3, xmask)
tmp3 = tmp1 * tmp2
tmp4 = tmp0 + tmp3
tl.store(out_ptr0 + x3, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_0[grid(256)](primals_3, primals_1,
primals_2, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_3
return buf0, primals_2
class NoiseInjectionNew(nn.Module):
def __init__(self, channel):
super().__init__()
self.weight = nn.Parameter(torch.zeros(1, channel, 1, 1))
def forward(self, input_0, input_1):
primals_1 = self.weight
primals_2 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
|
uzielroy/StyleGan_FewShot
|
NoiseInjection
| false
| 16,650
|
[
"MIT"
] | 76
|
94e4c49dbf39d1c6299f33787afb3e471ece11e3
|
https://github.com/uzielroy/StyleGan_FewShot/tree/94e4c49dbf39d1c6299f33787afb3e471ece11e3
|
L2Normalize
|
import torch
import torch.nn
import torch.nn.parallel
import torch.backends.cudnn
import torch.distributed
import torch.multiprocessing
import torch.nn as nn
import torch.nn.functional as F
import torch.optim
class L2Normalize(nn.Module):
def __init__(self, dim):
super(L2Normalize, self).__init__()
self.dim = dim
def forward(self, x):
return F.normalize(x, p=2, dim=self.dim)
def get_inputs():
return [torch.rand([4, 4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn
import torch.nn.parallel
import torch.backends.cudnn
import torch.distributed
import torch.multiprocessing
import torch.nn as nn
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(1024)](arg0_1, buf0, 1024, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class L2NormalizeNew(nn.Module):
def __init__(self, dim):
super(L2NormalizeNew, self).__init__()
self.dim = dim
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
valeoai/obow
|
L2Normalize
| false
| 16,651
|
[
"Apache-2.0"
] | 84
|
3758504f5e058275725c35ca7faca3731572b911
|
https://github.com/valeoai/obow/tree/3758504f5e058275725c35ca7faca3731572b911
|
LDS
|
import torch
import torch.nn as nn
from math import sqrt as sqrt
from itertools import product as product
class LDS(nn.Module):
def __init__(self):
super(LDS, self).__init__()
self.pool1 = nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=0)
self.pool2 = nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=0)
self.pool3 = nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=0)
def forward(self, x):
x_pool1 = self.pool1(x)
x_pool2 = self.pool2(x_pool1)
x_pool3 = self.pool3(x_pool2)
return x_pool3
def get_inputs():
return [torch.rand([4, 4, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from math import sqrt as sqrt
from itertools import product as product
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_2(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), xmask, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 64, 64), (16384, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 32, 32), (4096, 1024, 32, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0[grid(16384)](arg0_1,
buf0, 16384, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch
.float32)
triton_poi_fused_max_pool2d_with_indices_1[grid(4096)](buf0, buf1,
4096, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
buf2 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
triton_poi_fused_max_pool2d_with_indices_2[grid(1024)](buf1, buf2,
1024, XBLOCK=256, num_warps=4, num_stages=1)
del buf1
return buf2,
class LDSNew(nn.Module):
def __init__(self):
super(LDSNew, self).__init__()
self.pool1 = nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=0)
self.pool2 = nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=0)
self.pool3 = nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=0)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
vaesl/LRF-Net
|
LDS
| false
| 16,653
|
[
"MIT"
] | 180
|
e44b120dd55288c02852f8e58cda31313525d748
|
https://github.com/vaesl/LRF-Net/tree/e44b120dd55288c02852f8e58cda31313525d748
|
conv2d
|
import torch
import torch.nn as nn
from torch.autograd import Variable
def spectral_norm(module, name='weight'):
SpectralNorm.apply(module, name)
return module
class SpectralNorm:
def __init__(self, name):
self.name = name
def compute_weight(self, module):
weight = getattr(module, self.name + '_orig')
u = getattr(module, self.name + '_u')
size = weight.size()
weight_mat = weight.contiguous().view(size[0], -1)
if weight_mat.is_cuda:
u = u
v = weight_mat.t() @ u
v = v / v.norm()
u = weight_mat @ v
u = u / u.norm()
weight_sn = weight_mat / (u.t() @ weight_mat @ v)
weight_sn = weight_sn.view(*size)
return weight_sn, Variable(u.data)
@staticmethod
def apply(module, name):
fn = SpectralNorm(name)
weight = getattr(module, name)
del module._parameters[name]
module.register_parameter(name + '_orig', nn.Parameter(weight.data))
input_size = weight.size(0)
u = Variable(torch.randn(input_size, 1) * 0.1, requires_grad=False)
setattr(module, name + '_u', u)
setattr(module, name, fn.compute_weight(module)[0])
module.register_forward_pre_hook(fn)
return fn
def __call__(self, module, input):
weight_sn, u = self.compute_weight(module)
setattr(module, self.name, weight_sn)
setattr(module, self.name + '_u', u)
class conv2d(nn.Module):
def __init__(self, in_channels, out_channels, padding, kernel_size=4,
stride=2, spectral_normed=False):
super(conv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding=padding)
if spectral_normed:
self.conv = spectral_norm(self.conv)
def forward(self, input):
out = self.conv(input)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'padding': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from torch.autograd import Variable
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 25 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2,
2), padding=(4, 4), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 5, 5), (100, 25, 5, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(400)](buf1, primals_2, 400,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return buf1, primals_1, primals_3
def spectral_norm(module, name='weight'):
SpectralNorm.apply(module, name)
return module
class SpectralNorm:
def __init__(self, name):
self.name = name
def compute_weight(self, module):
weight = getattr(module, self.name + '_orig')
u = getattr(module, self.name + '_u')
size = weight.size()
weight_mat = weight.contiguous().view(size[0], -1)
if weight_mat.is_cuda:
u = u
v = weight_mat.t() @ u
v = v / v.norm()
u = weight_mat @ v
u = u / u.norm()
weight_sn = weight_mat / (u.t() @ weight_mat @ v)
weight_sn = weight_sn.view(*size)
return weight_sn, Variable(u.data)
@staticmethod
def apply(module, name):
fn = SpectralNorm(name)
weight = getattr(module, name)
del module._parameters[name]
module.register_parameter(name + '_orig', nn.Parameter(weight.data))
input_size = weight.size(0)
u = Variable(torch.randn(input_size, 1) * 0.1, requires_grad=False)
setattr(module, name + '_u', u)
setattr(module, name, fn.compute_weight(module)[0])
module.register_forward_pre_hook(fn)
return fn
def __call__(self, module, input):
weight_sn, u = self.compute_weight(module)
setattr(module, self.name, weight_sn)
setattr(module, self.name + '_u', u)
class conv2dNew(nn.Module):
def __init__(self, in_channels, out_channels, padding, kernel_size=4,
stride=2, spectral_normed=False):
super(conv2dNew, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding=padding)
if spectral_normed:
self.conv = spectral_norm(self.conv)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
vandit15/Self-Supervised-Gans-Pytorch
|
conv2d
| false
| 16,654
|
[
"MIT"
] | 66
|
01408fcce3e6cf4795d90c0f9d27e6906d5b59f3
|
https://github.com/vandit15/Self-Supervised-Gans-Pytorch/tree/01408fcce3e6cf4795d90c0f9d27e6906d5b59f3
|
EntropyLossEncap
|
import torch
from torch import nn
def feature_map_permute(input):
s = input.data.shape
l = len(s)
if l == 2:
x = input
elif l == 3:
x = input.permute(0, 2, 1)
elif l == 4:
x = input.permute(0, 2, 3, 1)
elif l == 5:
x = input.permute(0, 2, 3, 4, 1)
else:
x = []
None
x = x.contiguous()
x = x.view(-1, s[1])
return x
class EntropyLoss(nn.Module):
def __init__(self, eps=1e-12):
super(EntropyLoss, self).__init__()
self.eps = eps
def forward(self, x):
b = x * torch.log(x + self.eps)
b = -1.0 * b.sum(dim=1)
b = b.mean()
return b
class EntropyLossEncap(nn.Module):
def __init__(self, eps=1e-12):
super(EntropyLossEncap, self).__init__()
self.eps = eps
self.entropy_loss = EntropyLoss(eps)
def forward(self, input):
score = feature_map_permute(input)
ent_loss_val = self.entropy_loss(score)
return ent_loss_val
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_log_mean_mul_sum_0(in_out_ptr0, in_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (64 * (r0 // 16) + r0 % 16), None)
tmp5 = tl.load(in_ptr0 + (16 + 64 * (r0 // 16) + r0 % 16), None)
tmp10 = tl.load(in_ptr0 + (32 + 64 * (r0 // 16) + r0 % 16), None)
tmp15 = tl.load(in_ptr0 + (48 + 64 * (r0 // 16) + r0 % 16), None)
tmp1 = 1e-12
tmp2 = tmp0 + tmp1
tmp3 = tl_math.log(tmp2)
tmp4 = tmp0 * tmp3
tmp6 = tmp5 + tmp1
tmp7 = tl_math.log(tmp6)
tmp8 = tmp5 * tmp7
tmp9 = tmp4 + tmp8
tmp11 = tmp10 + tmp1
tmp12 = tl_math.log(tmp11)
tmp13 = tmp10 * tmp12
tmp14 = tmp9 + tmp13
tmp16 = tmp15 + tmp1
tmp17 = tl_math.log(tmp16)
tmp18 = tmp15 * tmp17
tmp19 = tmp14 + tmp18
tmp20 = -1.0
tmp21 = tmp19 * tmp20
tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK])
tmp24 = tl.sum(tmp22, 1)[:, None]
tmp25 = 64.0
tmp26 = tmp24 / tmp25
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp26, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_log_mean_mul_sum_0[grid(1)](buf1, arg0_1, 1,
64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf1,
def feature_map_permute(input):
s = input.data.shape
l = len(s)
if l == 2:
x = input
elif l == 3:
x = input.permute(0, 2, 1)
elif l == 4:
x = input.permute(0, 2, 3, 1)
elif l == 5:
x = input.permute(0, 2, 3, 4, 1)
else:
x = []
None
x = x.contiguous()
x = x.view(-1, s[1])
return x
class EntropyLoss(nn.Module):
def __init__(self, eps=1e-12):
super(EntropyLoss, self).__init__()
self.eps = eps
def forward(self, x):
b = x * torch.log(x + self.eps)
b = -1.0 * b.sum(dim=1)
b = b.mean()
return b
class EntropyLossEncapNew(nn.Module):
def __init__(self, eps=1e-12):
super(EntropyLossEncapNew, self).__init__()
self.eps = eps
self.entropy_loss = EntropyLoss(eps)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
vartikagpt10/memae-anomaly-detection
|
EntropyLossEncap
| false
| 16,655
|
[
"MIT"
] | 297
|
ceece7714fb241e82ef3f3785d3d1ed86c28113e
|
https://github.com/vartikagpt10/memae-anomaly-detection/tree/ceece7714fb241e82ef3f3785d3d1ed86c28113e
|
deconv2d
|
import torch
import torch.nn as nn
from torch.autograd import Variable
def spectral_norm(module, name='weight'):
SpectralNorm.apply(module, name)
return module
class SpectralNorm:
def __init__(self, name):
self.name = name
def compute_weight(self, module):
weight = getattr(module, self.name + '_orig')
u = getattr(module, self.name + '_u')
size = weight.size()
weight_mat = weight.contiguous().view(size[0], -1)
if weight_mat.is_cuda:
u = u
v = weight_mat.t() @ u
v = v / v.norm()
u = weight_mat @ v
u = u / u.norm()
weight_sn = weight_mat / (u.t() @ weight_mat @ v)
weight_sn = weight_sn.view(*size)
return weight_sn, Variable(u.data)
@staticmethod
def apply(module, name):
fn = SpectralNorm(name)
weight = getattr(module, name)
del module._parameters[name]
module.register_parameter(name + '_orig', nn.Parameter(weight.data))
input_size = weight.size(0)
u = Variable(torch.randn(input_size, 1) * 0.1, requires_grad=False)
setattr(module, name + '_u', u)
setattr(module, name, fn.compute_weight(module)[0])
module.register_forward_pre_hook(fn)
return fn
def __call__(self, module, input):
weight_sn, u = self.compute_weight(module)
setattr(module, self.name, weight_sn)
setattr(module, self.name + '_u', u)
class deconv2d(nn.Module):
def __init__(self, in_channels, out_channels, padding, kernel_size=(4,
4), stride=(2, 2), spectral_normed=False, iter=1):
super(deconv2d, self).__init__()
self.devconv = nn.ConvTranspose2d(in_channels, out_channels,
kernel_size, stride, padding=padding)
if spectral_normed:
self.devconv = spectral_norm(self.deconv)
def forward(self, input):
out = self.devconv(input)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'padding': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from torch.autograd import Variable
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2,
2), padding=(4, 4), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 2, 2), (16, 4, 2, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(64)](buf1, primals_2, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
return buf1, primals_1, primals_3
def spectral_norm(module, name='weight'):
SpectralNorm.apply(module, name)
return module
class SpectralNorm:
def __init__(self, name):
self.name = name
def compute_weight(self, module):
weight = getattr(module, self.name + '_orig')
u = getattr(module, self.name + '_u')
size = weight.size()
weight_mat = weight.contiguous().view(size[0], -1)
if weight_mat.is_cuda:
u = u
v = weight_mat.t() @ u
v = v / v.norm()
u = weight_mat @ v
u = u / u.norm()
weight_sn = weight_mat / (u.t() @ weight_mat @ v)
weight_sn = weight_sn.view(*size)
return weight_sn, Variable(u.data)
@staticmethod
def apply(module, name):
fn = SpectralNorm(name)
weight = getattr(module, name)
del module._parameters[name]
module.register_parameter(name + '_orig', nn.Parameter(weight.data))
input_size = weight.size(0)
u = Variable(torch.randn(input_size, 1) * 0.1, requires_grad=False)
setattr(module, name + '_u', u)
setattr(module, name, fn.compute_weight(module)[0])
module.register_forward_pre_hook(fn)
return fn
def __call__(self, module, input):
weight_sn, u = self.compute_weight(module)
setattr(module, self.name, weight_sn)
setattr(module, self.name + '_u', u)
class deconv2dNew(nn.Module):
def __init__(self, in_channels, out_channels, padding, kernel_size=(4,
4), stride=(2, 2), spectral_normed=False, iter=1):
super(deconv2dNew, self).__init__()
self.devconv = nn.ConvTranspose2d(in_channels, out_channels,
kernel_size, stride, padding=padding)
if spectral_normed:
self.devconv = spectral_norm(self.deconv)
def forward(self, input_0):
primals_1 = self.devconv.weight
primals_2 = self.devconv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
vandit15/Self-Supervised-Gans-Pytorch
|
deconv2d
| false
| 16,656
|
[
"MIT"
] | 66
|
01408fcce3e6cf4795d90c0f9d27e6906d5b59f3
|
https://github.com/vandit15/Self-Supervised-Gans-Pytorch/tree/01408fcce3e6cf4795d90c0f9d27e6906d5b59f3
|
L1RankLoss
|
import torch
import torch.nn.functional as F
import torch.onnx
class L1RankLoss(torch.nn.Module):
"""
L1 loss + Rank loss
"""
def __init__(self, **kwargs):
super(L1RankLoss, self).__init__()
self.l1_w = kwargs.get('l1_w', 1)
self.rank_w = kwargs.get('rank_w', 1)
self.hard_thred = kwargs.get('hard_thred', 1)
self.use_margin = kwargs.get('use_margin', False)
def forward(self, preds, gts):
preds = preds.view(-1)
gts = gts.view(-1)
l1_loss = F.l1_loss(preds, gts) * self.l1_w
n = len(preds)
preds = preds.unsqueeze(0).repeat(n, 1)
preds_t = preds.t()
img_label = gts.unsqueeze(0).repeat(n, 1)
img_label_t = img_label.t()
masks = torch.sign(img_label - img_label_t)
masks_hard = (torch.abs(img_label - img_label_t) < self.hard_thred) & (
torch.abs(img_label - img_label_t) > 0)
if self.use_margin:
rank_loss = masks_hard * torch.relu(torch.abs(img_label -
img_label_t) - masks * (preds - preds_t))
else:
rank_loss = masks_hard * torch.relu(-masks * (preds - preds_t))
rank_loss = rank_loss.sum() / (masks_hard.sum() + 1e-08)
loss_total = l1_loss + rank_loss * self.rank_w
return loss_total
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_mean_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel
):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp6, None)
@triton.jit
def triton_red_fused_abs_bitwise_and_gt_lt_mul_neg_relu_repeat_sign_sub_sum_1(
in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.
constexpr, RBLOCK: tl.constexpr):
xnumel = 8
rnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
_tmp25 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp29 = tl.full([XBLOCK, RBLOCK], 0, tl.int64)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + r1 % 256, rmask, eviction_policy=
'evict_last', other=0.0)
tmp1 = tl.load(in_ptr0 + (32 * x0 + r1 // 256), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp18 = tl.load(in_ptr1 + r1 % 256, rmask, eviction_policy=
'evict_last', other=0.0)
tmp19 = tl.load(in_ptr1 + (32 * x0 + r1 // 256), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = 1.0
tmp5 = tmp3 < tmp4
tmp6 = 0.0
tmp7 = tmp3 > tmp6
tmp8 = tmp5 & tmp7
tmp9 = tmp8.to(tl.float32)
tmp10 = tl.full([1, 1], 0, tl.int32)
tmp11 = tmp10 < tmp2
tmp12 = tmp11.to(tl.int8)
tmp13 = tmp2 < tmp10
tmp14 = tmp13.to(tl.int8)
tmp15 = tmp12 - tmp14
tmp16 = tmp15.to(tmp2.dtype)
tmp17 = -tmp16
tmp20 = tmp18 - tmp19
tmp21 = tmp17 * tmp20
tmp22 = triton_helpers.maximum(tmp10, tmp21)
tmp23 = tmp9 * tmp22
tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK])
tmp26 = _tmp25 + tmp24
_tmp25 = tl.where(rmask & xmask, tmp26, _tmp25)
tmp27 = tmp8.to(tl.int64)
tmp28 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK])
tmp30 = _tmp29 + tmp28
_tmp29 = tl.where(rmask & xmask, tmp30, _tmp29)
tmp25 = tl.sum(_tmp25, 1)[:, None]
tl.store(out_ptr0 + x0, tmp25, xmask)
tmp29 = tl.sum(_tmp29, 1)[:, None]
tl.store(out_ptr1 + x0, tmp29, xmask)
@triton.jit
def triton_per_fused_abs_add_bitwise_and_div_gt_lt_mean_mul_neg_relu_repeat_sign_sub_sum_2(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 8
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp4 = tl.load(in_ptr1 + r0, None)
tmp8 = tl.load(in_out_ptr0 + 0)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, 1])
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.sum(tmp1, 1)[:, None]
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp7 = tl.sum(tmp5, 1)[:, None]
tmp10 = 256.0
tmp11 = tmp9 / tmp10
tmp12 = 1.0
tmp13 = tmp11 * tmp12
tmp14 = tmp7.to(tl.float32)
tmp15 = 1e-08
tmp16 = tmp14 + tmp15
tmp17 = tmp3 / tmp16
tmp18 = tmp17 * tmp12
tmp19 = tmp13 + tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp19, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_abs_mean_sub_0[grid(1)](arg0_1, arg1_1, buf0, 1,
256, num_warps=2, num_stages=1)
buf1 = empty_strided_cuda((8,), (1,), torch.float32)
buf3 = empty_strided_cuda((8,), (1,), torch.int64)
triton_red_fused_abs_bitwise_and_gt_lt_mul_neg_relu_repeat_sign_sub_sum_1[
grid(8)](arg1_1, arg0_1, buf1, buf3, 8, 8192, XBLOCK=1, RBLOCK=
2048, num_warps=16, num_stages=1)
del arg0_1
del arg1_1
buf5 = buf0
del buf0
triton_per_fused_abs_add_bitwise_and_div_gt_lt_mean_mul_neg_relu_repeat_sign_sub_sum_2[
grid(1)](buf5, buf1, buf3, 1, 8, XBLOCK=1, num_warps=2,
num_stages=1)
del buf1
del buf3
return buf5,
class L1RankLossNew(torch.nn.Module):
"""
L1 loss + Rank loss
"""
def __init__(self, **kwargs):
super(L1RankLossNew, self).__init__()
self.l1_w = kwargs.get('l1_w', 1)
self.rank_w = kwargs.get('rank_w', 1)
self.hard_thred = kwargs.get('hard_thred', 1)
self.use_margin = kwargs.get('use_margin', False)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
usutdzxych/CenseoQoE
|
L1RankLoss
| false
| 16,658
|
[
"BSD-3-Clause"
] | 75
|
3f653296b223da6190e1e1781e7b9b54ff877102
|
https://github.com/usutdzxych/CenseoQoE/tree/3f653296b223da6190e1e1781e7b9b54ff877102
|
Iter_Downsample
|
import torch
import torch.nn as nn
from math import sqrt as sqrt
from itertools import product as product
class Iter_Downsample(nn.Module):
def __init__(self):
super(Iter_Downsample, self).__init__()
self.init_ds = nn.Sequential(nn.MaxPool2d(kernel_size=2, stride=2,
padding=0), nn.MaxPool2d(kernel_size=2, stride=2, padding=0))
self.ds1 = nn.MaxPool2d(kernel_size=2, stride=2, padding=1)
self.ds2 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
self.ds3 = nn.MaxPool2d(kernel_size=2, stride=2, padding=1)
self.ds4 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
def forward(self, x):
x = self.init_ds(x)
x1 = self.ds1(x)
x2 = self.ds2(x1)
x3 = self.ds3(x2)
x4 = self.ds4(x3)
return x1, x2, x3, x4
def get_inputs():
return [torch.rand([4, 4, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from math import sqrt as sqrt
from itertools import product as product
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_2(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 1296
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 9 % 9
x0 = xindex % 9
x2 = xindex // 81
x4 = xindex
tmp0 = -1 + 2 * x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 16, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + 2 * x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-17 + 2 * x0 + 32 * x1 + 256 * x2), tmp10 &
xmask, eviction_policy='evict_last', other=float('-inf'))
tmp12 = 2 * x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-16 + 2 * x0 + 32 * x1 + 256 * x2), tmp16 &
xmask, eviction_policy='evict_last', other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 2 * x1
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp22 & tmp9
tmp24 = tl.load(in_ptr0 + (-1 + 2 * x0 + 32 * x1 + 256 * x2), tmp23 &
xmask, eviction_policy='evict_last', other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = tmp22 & tmp15
tmp27 = tl.load(in_ptr0 + (2 * x0 + 32 * x1 + 256 * x2), tmp26 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp28 = triton_helpers.maximum(tmp27, tmp25)
tl.store(out_ptr0 + x4, tmp28, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 18 * x1 + 81 * x2), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 18 * x1 + 81 * x2), xmask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (9 + 2 * x0 + 18 * x1 + 81 * x2), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (10 + 2 * x0 + 18 * x1 + 81 * x2), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + x3, tmp6, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_4(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 3 % 3
x0 = xindex % 3
x2 = xindex // 9
x4 = xindex
tmp0 = -1 + 2 * x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + 2 * x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-5 + 2 * x0 + 8 * x1 + 16 * x2), tmp10 &
xmask, eviction_policy='evict_last', other=float('-inf'))
tmp12 = 2 * x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-4 + 2 * x0 + 8 * x1 + 16 * x2), tmp16 &
xmask, eviction_policy='evict_last', other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 2 * x1
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp22 & tmp9
tmp24 = tl.load(in_ptr0 + (-1 + 2 * x0 + 8 * x1 + 16 * x2), tmp23 &
xmask, eviction_policy='evict_last', other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = tmp22 & tmp15
tmp27 = tl.load(in_ptr0 + (2 * x0 + 8 * x1 + 16 * x2), tmp26 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp28 = triton_helpers.maximum(tmp27, tmp25)
tl.store(out_ptr0 + x4, tmp28, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 9 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 9 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (3 + 9 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (4 + 9 * x0), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 64, 64), (16384, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 32, 32), (4096, 1024, 32, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0[grid(16384)](arg0_1,
buf0, 16384, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch
.float32)
triton_poi_fused_max_pool2d_with_indices_1[grid(4096)](buf0, buf1,
4096, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
buf2 = empty_strided_cuda((4, 4, 9, 9), (324, 81, 9, 1), torch.float32)
triton_poi_fused_max_pool2d_with_indices_2[grid(1296)](buf1, buf2,
1296, XBLOCK=128, num_warps=4, num_stages=1)
del buf1
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_max_pool2d_with_indices_3[grid(256)](buf2, buf3,
256, XBLOCK=128, num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32)
triton_poi_fused_max_pool2d_with_indices_4[grid(144)](buf3, buf4,
144, XBLOCK=256, num_warps=4, num_stages=1)
buf5 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
triton_poi_fused_max_pool2d_with_indices_5[grid(16)](buf4, buf5, 16,
XBLOCK=16, num_warps=1, num_stages=1)
return buf2, buf3, buf4, buf5
class Iter_DownsampleNew(nn.Module):
def __init__(self):
super(Iter_DownsampleNew, self).__init__()
self.init_ds = nn.Sequential(nn.MaxPool2d(kernel_size=2, stride=2,
padding=0), nn.MaxPool2d(kernel_size=2, stride=2, padding=0))
self.ds1 = nn.MaxPool2d(kernel_size=2, stride=2, padding=1)
self.ds2 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
self.ds3 = nn.MaxPool2d(kernel_size=2, stride=2, padding=1)
self.ds4 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0], output[1], output[2], output[3]
|
vaesl/LFIP
|
Iter_Downsample
| false
| 16,659
|
[
"MIT"
] | 59
|
eb9d934616c508c9a9032f170baa1d97fa792822
|
https://github.com/vaesl/LFIP/tree/eb9d934616c508c9a9032f170baa1d97fa792822
|
_Residual_Block
|
import torch
import torch.nn as nn
class _Residual_Block(nn.Module):
def __init__(self):
super(_Residual_Block, self).__init__()
self.conv1 = nn.Conv2d(in_channels=256, out_channels=256,
kernel_size=3, stride=1, padding=1, bias=False)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(in_channels=256, out_channels=256,
kernel_size=3, stride=1, padding=1, bias=False)
def forward(self, x):
identity_data = x
output = self.relu(self.conv1(x))
output = self.conv2(output)
output *= 0.1
output = torch.add(output, identity_data)
return output
def get_inputs():
return [torch.rand([4, 256, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 256 * x2 + 1048576 * y1), tmp0, None)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, None)
@triton.jit
def triton_poi_fused_add_mul_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4096
y1 = yindex // 4096
tmp0 = tl.load(in_ptr0 + (x2 + 256 * y3), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr1 + (x2 + 256 * y3), xmask, eviction_policy=
'evict_last')
tmp1 = 0.1
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tl.store(out_ptr0 + (y0 + 4096 * x2 + 1048576 * y1), tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 256, 64, 64), (1048576, 4096, 64, 1))
assert_size_stride(primals_2, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_3, (256, 256, 3, 3), (2304, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 256, 64, 64), (1048576, 1, 16384, 256
), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(1024, 4096)](primals_1, buf0, 1024, 4096,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_1[grid(65536, 9)](primals_2, buf1, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_1[grid(65536, 9)](primals_3, buf2, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_3
buf3 = extern_kernels.convolution(buf0, buf1, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 256, 64, 64), (1048576, 1, 16384, 256))
buf4 = buf3
del buf3
triton_poi_fused_relu_2[grid(4194304)](buf4, 4194304, XBLOCK=1024,
num_warps=4, num_stages=1)
buf5 = extern_kernels.convolution(buf4, buf2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 256, 64, 64), (1048576, 1, 16384, 256))
buf6 = empty_strided_cuda((4, 256, 64, 64), (1048576, 4096, 64, 1),
torch.float32)
triton_poi_fused_add_mul_3[grid(16384, 256)](buf5, buf0, buf6,
16384, 256, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del buf5
return buf6, buf0, buf1, buf2, buf4
class _Residual_BlockNew(nn.Module):
def __init__(self):
super(_Residual_BlockNew, self).__init__()
self.conv1 = nn.Conv2d(in_channels=256, out_channels=256,
kernel_size=3, stride=1, padding=1, bias=False)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(in_channels=256, out_channels=256,
kernel_size=3, stride=1, padding=1, bias=False)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv2.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
twtygqyy/pytorch-EDSR
|
_Residual_Block
| false
| 16,661
|
[
"MIT"
] | 59
|
001031b6563fcc45d4e7edb7e14c41fb9982ce64
|
https://github.com/twtygqyy/pytorch-EDSR/tree/001031b6563fcc45d4e7edb7e14c41fb9982ce64
|
Residual_D
|
import torch
import torch.nn as nn
from torch.autograd import Variable
def spectral_norm(module, name='weight'):
SpectralNorm.apply(module, name)
return module
class SpectralNorm:
def __init__(self, name):
self.name = name
def compute_weight(self, module):
weight = getattr(module, self.name + '_orig')
u = getattr(module, self.name + '_u')
size = weight.size()
weight_mat = weight.contiguous().view(size[0], -1)
if weight_mat.is_cuda:
u = u
v = weight_mat.t() @ u
v = v / v.norm()
u = weight_mat @ v
u = u / u.norm()
weight_sn = weight_mat / (u.t() @ weight_mat @ v)
weight_sn = weight_sn.view(*size)
return weight_sn, Variable(u.data)
@staticmethod
def apply(module, name):
fn = SpectralNorm(name)
weight = getattr(module, name)
del module._parameters[name]
module.register_parameter(name + '_orig', nn.Parameter(weight.data))
input_size = weight.size(0)
u = Variable(torch.randn(input_size, 1) * 0.1, requires_grad=False)
setattr(module, name + '_u', u)
setattr(module, name, fn.compute_weight(module)[0])
module.register_forward_pre_hook(fn)
return fn
def __call__(self, module, input):
weight_sn, u = self.compute_weight(module)
setattr(module, self.name, weight_sn)
setattr(module, self.name + '_u', u)
class conv2d(nn.Module):
def __init__(self, in_channels, out_channels, padding, kernel_size=4,
stride=2, spectral_normed=False):
super(conv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding=padding)
if spectral_normed:
self.conv = spectral_norm(self.conv)
def forward(self, input):
out = self.conv(input)
return out
class Residual_D(nn.Module):
def __init__(self, in_channels, out_channels, kernel=3, stride=1,
spectral_normed=False, down_sampling=False, is_start=False):
super(Residual_D, self).__init__()
self.down_sampling = down_sampling
self.is_start = is_start
self.avgpool_short = nn.AvgPool2d(2, 2, padding=1)
self.conv_short = conv2d(in_channels, out_channels, kernel_size=1,
stride=1, padding=0, spectral_normed=False)
self.conv1 = conv2d(in_channels, out_channels, spectral_normed=
spectral_normed, kernel_size=kernel, stride=stride, padding=1)
self.conv2 = conv2d(out_channels, out_channels, spectral_normed=
spectral_normed, kernel_size=kernel, stride=stride, padding=1)
self.avgpool2 = nn.AvgPool2d(2, 2, padding=1)
self.relu = nn.ReLU()
def forward(self, x):
input = x
if self.is_start:
conv1 = self.relu(self.conv1(x))
conv2 = self.relu(self.conv2(conv1))
if self.down_sampling:
conv2 = self.avgpool2(conv2)
else:
conv1 = self.conv1(self.relu(x))
conv2 = self.conv2(self.relu(conv1))
if self.down_sampling:
conv2 = self.avgpool2(conv2)
if self.down_sampling:
input = self.avgpool_short(input)
resi = self.conv_short(input)
return resi + conv2
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from torch.autograd import Variable
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_convolution_2(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_out_ptr0 + x3, xmask)
tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + x3, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_relu_0[grid(256)](primals_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_relu_1[grid(256)](buf2, primals_3, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = extern_kernels.convolution(primals_1, primals_6, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = buf3
del buf3
triton_poi_fused_add_convolution_2[grid(256)](buf5, buf4, primals_7,
primals_5, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf4
del primals_5
del primals_7
return buf5, primals_1, primals_2, primals_4, primals_6, buf0, buf2
def spectral_norm(module, name='weight'):
SpectralNorm.apply(module, name)
return module
class SpectralNorm:
def __init__(self, name):
self.name = name
def compute_weight(self, module):
weight = getattr(module, self.name + '_orig')
u = getattr(module, self.name + '_u')
size = weight.size()
weight_mat = weight.contiguous().view(size[0], -1)
if weight_mat.is_cuda:
u = u
v = weight_mat.t() @ u
v = v / v.norm()
u = weight_mat @ v
u = u / u.norm()
weight_sn = weight_mat / (u.t() @ weight_mat @ v)
weight_sn = weight_sn.view(*size)
return weight_sn, Variable(u.data)
@staticmethod
def apply(module, name):
fn = SpectralNorm(name)
weight = getattr(module, name)
del module._parameters[name]
module.register_parameter(name + '_orig', nn.Parameter(weight.data))
input_size = weight.size(0)
u = Variable(torch.randn(input_size, 1) * 0.1, requires_grad=False)
setattr(module, name + '_u', u)
setattr(module, name, fn.compute_weight(module)[0])
module.register_forward_pre_hook(fn)
return fn
def __call__(self, module, input):
weight_sn, u = self.compute_weight(module)
setattr(module, self.name, weight_sn)
setattr(module, self.name + '_u', u)
class conv2d(nn.Module):
def __init__(self, in_channels, out_channels, padding, kernel_size=4,
stride=2, spectral_normed=False):
super(conv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding=padding)
if spectral_normed:
self.conv = spectral_norm(self.conv)
def forward(self, input):
out = self.conv(input)
return out
class Residual_DNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel=3, stride=1,
spectral_normed=False, down_sampling=False, is_start=False):
super(Residual_DNew, self).__init__()
self.down_sampling = down_sampling
self.is_start = is_start
self.avgpool_short = nn.AvgPool2d(2, 2, padding=1)
self.conv_short = conv2d(in_channels, out_channels, kernel_size=1,
stride=1, padding=0, spectral_normed=False)
self.conv1 = conv2d(in_channels, out_channels, spectral_normed=
spectral_normed, kernel_size=kernel, stride=stride, padding=1)
self.conv2 = conv2d(out_channels, out_channels, spectral_normed=
spectral_normed, kernel_size=kernel, stride=stride, padding=1)
self.avgpool2 = nn.AvgPool2d(2, 2, padding=1)
self.relu = nn.ReLU()
def forward(self, input_0):
primals_6 = self.conv_short.conv.weight
primals_3 = self.conv_short.conv.bias
primals_2 = self.conv1.conv.weight
primals_5 = self.conv1.conv.bias
primals_4 = self.conv2.conv.weight
primals_7 = self.conv2.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
vandit15/Self-Supervised-Gans-Pytorch
|
Residual_D
| false
| 16,663
|
[
"MIT"
] | 66
|
01408fcce3e6cf4795d90c0f9d27e6906d5b59f3
|
https://github.com/vandit15/Self-Supervised-Gans-Pytorch/tree/01408fcce3e6cf4795d90c0f9d27e6906d5b59f3
|
GumbelSoftmaxLayer
|
import torch
import torch.nn as nn
from torch.distributions import RelaxedOneHotCategorical
import torch.nn.parallel
import torch.utils.data
import torch.distributions
def gumbel_softmax_sample(logits: 'torch.Tensor', temperature: 'float'=1.0,
training: 'bool'=True, straight_through: 'bool'=False):
size = logits.size()
if not training:
indexes = logits.argmax(dim=-1)
one_hot = torch.zeros_like(logits).view(-1, size[-1])
one_hot.scatter_(1, indexes.view(-1, 1), 1)
one_hot = one_hot.view(*size)
return one_hot
sample = RelaxedOneHotCategorical(logits=logits, temperature=temperature
).rsample()
if straight_through:
size = sample.size()
indexes = sample.argmax(dim=-1)
hard_sample = torch.zeros_like(sample).view(-1, size[-1])
hard_sample.scatter_(1, indexes.view(-1, 1), 1)
hard_sample = hard_sample.view(*size)
sample = sample + (hard_sample - sample).detach()
return sample
class GumbelSoftmaxLayer(nn.Module):
def __init__(self, temperature: 'float'=1.0, trainable_temperature:
'bool'=False, straight_through: 'bool'=False):
super(GumbelSoftmaxLayer, self).__init__()
self.straight_through = straight_through
if not trainable_temperature:
self.temperature = temperature
else:
self.temperature = torch.nn.Parameter(torch.tensor([temperature
]), requires_grad=True)
def forward(self, logits: 'torch.Tensor'):
return gumbel_softmax_sample(logits, self.temperature, self.
training, self.straight_through)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from torch.distributions import RelaxedOneHotCategorical
import torch.nn.parallel
import torch.utils.data
import torch.distributions
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_argmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp32 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 > tmp1
tmp3 = tmp0 == tmp1
tmp4 = tmp0 != tmp0
tmp5 = tmp1 != tmp1
tmp6 = tmp4 > tmp5
tmp7 = tmp2 | tmp6
tmp8 = tmp4 & tmp5
tmp9 = tmp3 | tmp8
tmp10 = tl.full([1], 0, tl.int64)
tmp11 = tl.full([1], 1, tl.int64)
tmp12 = tmp10 < tmp11
tmp13 = tmp9 & tmp12
tmp14 = tmp7 | tmp13
tmp15 = tl.where(tmp14, tmp0, tmp1)
tmp16 = tl.where(tmp14, tmp10, tmp11)
tmp18 = tmp15 > tmp17
tmp19 = tmp15 == tmp17
tmp20 = tmp15 != tmp15
tmp21 = tmp17 != tmp17
tmp22 = tmp20 > tmp21
tmp23 = tmp18 | tmp22
tmp24 = tmp20 & tmp21
tmp25 = tmp19 | tmp24
tmp26 = tl.full([1], 2, tl.int64)
tmp27 = tmp16 < tmp26
tmp28 = tmp25 & tmp27
tmp29 = tmp23 | tmp28
tmp30 = tl.where(tmp29, tmp15, tmp17)
tmp31 = tl.where(tmp29, tmp16, tmp26)
tmp33 = tmp30 > tmp32
tmp34 = tmp30 == tmp32
tmp35 = tmp30 != tmp30
tmp36 = tmp32 != tmp32
tmp37 = tmp35 > tmp36
tmp38 = tmp33 | tmp37
tmp39 = tmp35 & tmp36
tmp40 = tmp34 | tmp39
tmp41 = tl.full([1], 3, tl.int64)
tmp42 = tmp31 < tmp41
tmp43 = tmp40 & tmp42
tmp44 = tmp38 | tmp43
tl.where(tmp44, tmp30, tmp32)
tmp46 = tl.where(tmp44, tmp31, tmp41)
tl.store(out_ptr0 + x0, tmp46, xmask)
@triton.jit
def triton_poi_fused_scatter_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = x0
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tl.store(in_out_ptr0 + x4, tmp5, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64)
get_raw_stream(0)
triton_poi_fused_argmax_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
triton_poi_fused_scatter_1[grid(256)](buf2, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf0
return buf2,
def gumbel_softmax_sample(logits: 'torch.Tensor', temperature: 'float'=1.0,
training: 'bool'=True, straight_through: 'bool'=False):
size = logits.size()
if not training:
indexes = logits.argmax(dim=-1)
one_hot = torch.zeros_like(logits).view(-1, size[-1])
one_hot.scatter_(1, indexes.view(-1, 1), 1)
one_hot = one_hot.view(*size)
return one_hot
sample = RelaxedOneHotCategorical(logits=logits, temperature=temperature
).rsample()
if straight_through:
size = sample.size()
indexes = sample.argmax(dim=-1)
hard_sample = torch.zeros_like(sample).view(-1, size[-1])
hard_sample.scatter_(1, indexes.view(-1, 1), 1)
hard_sample = hard_sample.view(*size)
sample = sample + (hard_sample - sample).detach()
return sample
class GumbelSoftmaxLayerNew(nn.Module):
def __init__(self, temperature: 'float'=1.0, trainable_temperature:
'bool'=False, straight_through: 'bool'=False):
super(GumbelSoftmaxLayerNew, self).__init__()
self.straight_through = straight_through
if not trainable_temperature:
self.temperature = temperature
else:
self.temperature = torch.nn.Parameter(torch.tensor([temperature
]), requires_grad=True)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
vengalraoguttha/EGG
|
GumbelSoftmaxLayer
| false
| 16,664
|
[
"MIT"
] | 254
|
e4f8412f197543ec7f1f00cf89b5a364b038dc57
|
https://github.com/vengalraoguttha/EGG/tree/e4f8412f197543ec7f1f00cf89b5a364b038dc57
|
ReinforcedReceiver
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.utils.data
from torch.distributions import Bernoulli
import torch.distributions
class ReinforcedReceiver(nn.Module):
def __init__(self, n_bits, n_hidden):
super(ReinforcedReceiver, self).__init__()
self.emb_column = nn.Linear(n_bits, n_hidden)
self.fc1 = nn.Linear(2 * n_hidden, 2 * n_hidden)
self.fc2 = nn.Linear(2 * n_hidden, n_bits)
def forward(self, embedded_message, bits, _aux_input=None):
embedded_bits = self.emb_column(bits.float())
x = torch.cat([embedded_bits, embedded_message], dim=1)
x = self.fc1(x)
x = F.leaky_relu(x)
x = self.fc2(x)
probs = x.sigmoid()
distr = Bernoulli(probs=probs)
entropy = distr.entropy()
if self.training:
sample = distr.sample()
else:
sample = (probs > 0.5).float()
log_prob = distr.log_prob(sample).sum(dim=1)
return sample, log_prob, entropy
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'n_bits': 4, 'n_hidden': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
import torch.distributions
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tl.store(out_ptr0 + (x0 + 8 * x1), tmp0, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (8, 8), (8, 1))
assert_size_stride(primals_6, (8,), (1,))
assert_size_stride(primals_7, (4, 8), (8, 1))
assert_size_stride(primals_8, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
buf0 = reinterpret_tensor(buf2, (4, 4), (8, 1), 0)
extern_kernels.addmm(primals_3, primals_1, reinterpret_tensor(
primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = reinterpret_tensor(buf2, (4, 4), (8, 1), 4)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(16)](primals_4, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (8, 8), (1, 8
), 0), out=buf3)
buf4 = empty_strided_cuda((4, 8), (8, 1), torch.bool)
buf5 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
triton_poi_fused_leaky_relu_1[grid(32)](buf3, primals_6, buf4, buf5,
32, XBLOCK=32, num_warps=1, num_stages=1)
del buf3
del primals_6
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf5, reinterpret_tensor(primals_7, (8, 4), (1, 8
), 0), out=buf6)
buf7 = buf6
del buf6
triton_poi_fused_sigmoid_2[grid(16)](buf7, primals_8, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_8
return buf7, buf7, primals_1, buf2, buf4, buf5, buf7, primals_7, primals_5
class ReinforcedReceiverNew(nn.Module):
def __init__(self, n_bits, n_hidden):
super(ReinforcedReceiverNew, self).__init__()
self.emb_column = nn.Linear(n_bits, n_hidden)
self.fc1 = nn.Linear(2 * n_hidden, 2 * n_hidden)
self.fc2 = nn.Linear(2 * n_hidden, n_bits)
def forward(self, input_0, input_1):
primals_1 = self.emb_column.weight
primals_3 = self.emb_column.bias
primals_5 = self.fc1.weight
primals_6 = self.fc1.bias
primals_7 = self.fc2.weight
primals_8 = self.fc2.bias
primals_2 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0], output[1], output[2]
|
vengalraoguttha/EGG
|
ReinforcedReceiver
| false
| 16,665
|
[
"MIT"
] | 254
|
e4f8412f197543ec7f1f00cf89b5a364b038dc57
|
https://github.com/vengalraoguttha/EGG/tree/e4f8412f197543ec7f1f00cf89b5a364b038dc57
|
EntropyLoss
|
import torch
from torch import nn
class EntropyLoss(nn.Module):
def __init__(self, eps=1e-12):
super(EntropyLoss, self).__init__()
self.eps = eps
def forward(self, x):
b = x * torch.log(x + self.eps)
b = -1.0 * b.sum(dim=1)
b = b.mean()
return b
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_log_mean_mul_sum_0(in_out_ptr0, in_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp5 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp10 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp15 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp1 = 1e-12
tmp2 = tmp0 + tmp1
tmp3 = tl_math.log(tmp2)
tmp4 = tmp0 * tmp3
tmp6 = tmp5 + tmp1
tmp7 = tl_math.log(tmp6)
tmp8 = tmp5 * tmp7
tmp9 = tmp4 + tmp8
tmp11 = tmp10 + tmp1
tmp12 = tl_math.log(tmp11)
tmp13 = tmp10 * tmp12
tmp14 = tmp9 + tmp13
tmp16 = tmp15 + tmp1
tmp17 = tl_math.log(tmp16)
tmp18 = tmp15 * tmp17
tmp19 = tmp14 + tmp18
tmp20 = -1.0
tmp21 = tmp19 * tmp20
tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK])
tmp24 = tl.sum(tmp22, 1)[:, None]
tmp25 = 64.0
tmp26 = tmp24 / tmp25
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp26, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_log_mean_mul_sum_0[grid(1)](buf1, arg0_1, 1,
64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf1,
class EntropyLossNew(nn.Module):
def __init__(self, eps=1e-12):
super(EntropyLossNew, self).__init__()
self.eps = eps
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
vartikagpt10/memae-anomaly-detection
|
EntropyLoss
| false
| 16,666
|
[
"MIT"
] | 297
|
ceece7714fb241e82ef3f3785d3d1ed86c28113e
|
https://github.com/vartikagpt10/memae-anomaly-detection/tree/ceece7714fb241e82ef3f3785d3d1ed86c28113e
|
BahdanauAttention
|
import torch
import torch.nn as nn
class BahdanauAttention(nn.Module):
def __init__(self, annot_dim, query_dim, attn_dim):
super(BahdanauAttention, self).__init__()
self.query_layer = nn.Linear(query_dim, attn_dim, bias=True)
self.annot_layer = nn.Linear(annot_dim, attn_dim, bias=True)
self.v = nn.Linear(attn_dim, 1, bias=False)
def forward(self, annots, query):
"""
Shapes:
- annots: (batch, max_time, dim)
- query: (batch, 1, dim) or (batch, dim)
"""
if query.dim() == 2:
query = query.unsqueeze(1)
processed_query = self.query_layer(query)
processed_annots = self.annot_layer(annots)
alignment = self.v(torch.tanh(processed_query + processed_annots))
return alignment.squeeze(-1)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'annot_dim': 4, 'query_dim': 4, 'attn_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_tanh_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp7 = libdevice.tanh(tmp6)
tl.store(in_out_ptr0 + x2, tmp7, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (1, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_add_tanh_0[grid(256)](buf2, primals_3, buf1,
primals_5, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf1
del primals_3
del primals_5
buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 1), (1, 4), 0), out=buf3)
return reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), buf2, primals_7
class BahdanauAttentionNew(nn.Module):
def __init__(self, annot_dim, query_dim, attn_dim):
super(BahdanauAttentionNew, self).__init__()
self.query_layer = nn.Linear(query_dim, attn_dim, bias=True)
self.annot_layer = nn.Linear(annot_dim, attn_dim, bias=True)
self.v = nn.Linear(attn_dim, 1, bias=False)
def forward(self, input_0, input_1):
primals_2 = self.query_layer.weight
primals_3 = self.query_layer.bias
primals_4 = self.annot_layer.weight
primals_5 = self.annot_layer.bias
primals_7 = self.v.weight
primals_1 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
vigilancetrent/chatbot-advanced
|
BahdanauAttention
| false
| 16,667
|
[
"Apache-2.0"
] | 52
|
2e0c72c4df2e1434da995b7105f8f0414aba6248
|
https://github.com/vigilancetrent/chatbot-advanced/tree/2e0c72c4df2e1434da995b7105f8f0414aba6248
|
Interpolate
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Interpolate(nn.Module):
def __init__(self, scale_factor, mode='bilinear', align_corners=True):
super(Interpolate, self).__init__()
self.scale_factor = scale_factor
self.mode = mode
self.align_corners = align_corners
def forward(self, x):
x = F.interpolate(x, scale_factor=self.scale_factor, mode=self.mode,
align_corners=self.align_corners)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'scale_factor': 1.0}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(
in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x2 = xindex // 16
x4 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 3, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tmp11 = x0
tmp12 = tmp11.to(tl.float32)
tmp13 = tmp12 * tmp2
tmp14 = triton_helpers.maximum(tmp13, tmp4)
tmp15 = tmp14.to(tl.int32)
tmp16 = tl.load(in_ptr0 + (tmp15 + 4 * tmp10 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp17 = tmp15 + tmp7
tmp18 = triton_helpers.minimum(tmp17, tmp9)
tmp19 = tl.load(in_ptr0 + (tmp18 + 4 * tmp10 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp20 = tmp19 - tmp16
tmp21 = tmp15.to(tl.float32)
tmp22 = tmp14 - tmp21
tmp23 = triton_helpers.maximum(tmp22, tmp4)
tmp24 = triton_helpers.minimum(tmp23, tmp2)
tmp25 = tmp20 * tmp24
tmp26 = tmp16 + tmp25
tmp27 = tl.load(in_ptr0 + (tmp15 + 4 * tmp6 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp28 = tl.load(in_ptr0 + (tmp18 + 4 * tmp6 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp29 = tmp28 - tmp27
tmp30 = tmp29 * tmp24
tmp31 = tmp27 + tmp30
tmp32 = tmp26 - tmp31
tmp33 = tmp6.to(tl.float32)
tmp34 = tmp5 - tmp33
tmp35 = triton_helpers.maximum(tmp34, tmp4)
tmp36 = triton_helpers.minimum(tmp35, tmp2)
tmp37 = tmp32 * tmp36
tmp38 = tmp31 + tmp37
tl.store(in_out_ptr0 + x4, tmp38, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0[grid
(256)](buf1, arg0_1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf1,
class InterpolateNew(nn.Module):
def __init__(self, scale_factor, mode='bilinear', align_corners=True):
super(InterpolateNew, self).__init__()
self.scale_factor = scale_factor
self.mode = mode
self.align_corners = align_corners
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
vietnhatthai/3d-vehicle-tracking
|
Interpolate
| false
| 16,668
|
[
"BSD-3-Clause"
] | 603
|
8ee189f6792897651bb56bb2950ce07c9629a89d
|
https://github.com/vietnhatthai/3d-vehicle-tracking/tree/8ee189f6792897651bb56bb2950ce07c9629a89d
|
UpSample
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class UpSample(nn.Sequential):
def __init__(self, skip_input, output_features):
super(UpSample, self).__init__()
self.convA = nn.Conv2d(skip_input, output_features, kernel_size=3,
stride=1, padding=1)
self.leakyreluA = nn.LeakyReLU(0.2)
self.convB = nn.Conv2d(output_features, output_features,
kernel_size=3, stride=1, padding=1)
self.leakyreluB = nn.LeakyReLU(0.2)
def forward(self, x, concat_with):
up_x = F.interpolate(x, size=[concat_with.size(2), concat_with.size
(3)], mode='bilinear', align_corners=True)
return self.leakyreluB(self.convB(self.leakyreluA(self.convA(torch.
cat([up_x, concat_with], dim=1)))))
def get_inputs():
return [torch.rand([4, 3, 4, 4]), torch.rand([4, 1, 4, 4])]
def get_init_inputs():
return [[], {'skip_input': 4, 'output_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(in_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x2 = xindex // 16
x4 = xindex // 48
x7 = xindex % 48
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 3, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tmp11 = x0
tmp12 = tmp11.to(tl.float32)
tmp13 = tmp12 * tmp2
tmp14 = triton_helpers.maximum(tmp13, tmp4)
tmp15 = tmp14.to(tl.int32)
tmp16 = tl.load(in_ptr0 + (tmp15 + 4 * tmp10 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp17 = tmp15 + tmp7
tmp18 = triton_helpers.minimum(tmp17, tmp9)
tmp19 = tl.load(in_ptr0 + (tmp18 + 4 * tmp10 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp20 = tmp19 - tmp16
tmp21 = tmp15.to(tl.float32)
tmp22 = tmp14 - tmp21
tmp23 = triton_helpers.maximum(tmp22, tmp4)
tmp24 = triton_helpers.minimum(tmp23, tmp2)
tmp25 = tmp20 * tmp24
tmp26 = tmp16 + tmp25
tmp27 = tl.load(in_ptr0 + (tmp15 + 4 * tmp6 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp28 = tl.load(in_ptr0 + (tmp18 + 4 * tmp6 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp29 = tmp28 - tmp27
tmp30 = tmp29 * tmp24
tmp31 = tmp27 + tmp30
tmp32 = tmp26 - tmp31
tmp33 = tmp6.to(tl.float32)
tmp34 = tmp5 - tmp33
tmp35 = triton_helpers.maximum(tmp34, tmp4)
tmp36 = triton_helpers.minimum(tmp35, tmp2)
tmp37 = tmp32 * tmp36
tmp38 = tmp31 + tmp37
tl.store(out_ptr1 + (x7 + 64 * x4), tmp38, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tl.store(out_ptr0 + (x0 + 64 * x1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(primals_2, (4, 3, 4, 4), (48, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = reinterpret_tensor(buf3, (4, 3, 4, 4), (64, 16, 4, 1), 0)
get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0[grid
(192)](primals_2, buf1, 192, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = reinterpret_tensor(buf3, (4, 1, 4, 4), (64, 16, 4, 1), 48)
triton_poi_fused_cat_1[grid(64)](primals_1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
buf4 = extern_kernels.convolution(buf3, primals_3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_convolution_leaky_relu_2[grid(256)](buf4,
primals_4, buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_4
buf7 = extern_kernels.convolution(buf6, primals_5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 4, 4, 4), (64, 16, 4, 1))
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf9 = buf4
del buf4
triton_poi_fused_convolution_leaky_relu_2[grid(256)](buf7,
primals_6, buf8, buf9, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf7
del primals_6
return buf9, primals_3, primals_5, buf3, buf5, buf6, buf8
class UpSampleNew(nn.Sequential):
def __init__(self, skip_input, output_features):
super(UpSampleNew, self).__init__()
self.convA = nn.Conv2d(skip_input, output_features, kernel_size=3,
stride=1, padding=1)
self.leakyreluA = nn.LeakyReLU(0.2)
self.convB = nn.Conv2d(output_features, output_features,
kernel_size=3, stride=1, padding=1)
self.leakyreluB = nn.LeakyReLU(0.2)
def forward(self, input_0, input_1):
primals_3 = self.convA.weight
primals_4 = self.convA.bias
primals_5 = self.convB.weight
primals_6 = self.convB.bias
primals_2 = input_0
primals_1 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
varun-affinsys/Monocular-Depth-Estimation-with-Transfer-Learning-pretrained-MobileNetV2
|
UpSample
| false
| 16,669
|
[
"MIT"
] | 70
|
9b20c5b3d7a9f90e1dc6f40e17ee31d9b3dee684
|
https://github.com/varun-affinsys/Monocular-Depth-Estimation-with-Transfer-Learning-pretrained-MobileNetV2/tree/9b20c5b3d7a9f90e1dc6f40e17ee31d9b3dee684
|
GTConv_2
|
import torch
import torch.nn.functional as F
from torch import nn
import torch.utils.data
import torch.onnx.operators
import torch.optim
import torch.optim.lr_scheduler
class GTConv_2(nn.Module):
def __init__(self, in_channels, out_channels):
super(GTConv_2, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.weight = nn.Parameter(torch.Tensor(in_channels, 1, 1))
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.weight)
def forward(self, A):
A = A.permute(2, 0, 1)
A = torch.sum(F.softmax(self.weight, dim=0) * A, dim=0)
return A
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
import torch.utils.data
import torch.onnx.operators
import torch.optim
import torch.optim.lr_scheduler
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused__softmax_0(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel,
XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = triton_helpers.max2(tmp1, 1)[:, None]
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.sum(tmp6, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp3, None)
tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp8, None)
@triton.jit
def triton_poi_fused__softmax_mul_sum_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr2 + 0)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp9 = tl.load(in_ptr3 + 4 * x0, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + 1)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp16 = tl.load(in_ptr3 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr0 + 2)
tmp20 = tl.broadcast_to(tmp19, [XBLOCK])
tmp24 = tl.load(in_ptr3 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp27 = tl.load(in_ptr0 + 3)
tmp28 = tl.broadcast_to(tmp27, [XBLOCK])
tmp32 = tl.load(in_ptr3 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp4 = tmp1 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp8 = tmp5 / tmp7
tmp10 = tmp8 * tmp9
tmp13 = tmp12 - tmp3
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp7
tmp17 = tmp15 * tmp16
tmp18 = tmp10 + tmp17
tmp21 = tmp20 - tmp3
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp22 / tmp7
tmp25 = tmp23 * tmp24
tmp26 = tmp18 + tmp25
tmp29 = tmp28 - tmp3
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp30 / tmp7
tmp33 = tmp31 * tmp32
tmp34 = tmp26 + tmp33
tl.store(out_ptr0 + x0, tmp34, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 1, 1), (1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 1, 1), (1, 1, 1), torch.float32)
buf1 = empty_strided_cuda((1, 1, 1), (1, 1, 1), torch.float32)
get_raw_stream(0)
triton_per_fused__softmax_0[grid(1)](primals_2, buf0, buf1, 1, 4,
XBLOCK=1, num_warps=2, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_mul_sum_1[grid(16)](primals_2, buf0, buf1,
primals_1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf0
del buf1
return buf2, primals_1, primals_2
class GTConv_2New(nn.Module):
def __init__(self, in_channels, out_channels):
super(GTConv_2New, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.weight = nn.Parameter(torch.Tensor(in_channels, 1, 1))
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.weight)
def forward(self, input_0):
primals_2 = self.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
verashira/TSPNet
|
GTConv_2
| false
| 16,670
|
[
"MIT"
] | 83
|
ee454165dcc61cdbbff19565364e2221727ed2b8
|
https://github.com/verashira/TSPNet/tree/ee454165dcc61cdbbff19565364e2221727ed2b8
|
TemporalBlock
|
import torch
from torch import nn
import torch.utils.data
import torch.onnx.operators
import torch.optim
import torch.optim.lr_scheduler
def TemporalConvLayer(input_channels, output_channels, kernel_size):
m = nn.Conv1d(in_channels=input_channels, out_channels=output_channels,
kernel_size=kernel_size)
nn.init.xavier_normal_(m.weight)
return m
class TemporalBlock(nn.Module):
def __init__(self, input_size, output_size, num_channels, kernel_size,
dropout):
super().__init__()
self.pad = nn.ZeroPad2d((0, 0, 0, kernel_size - 1))
self.tconv = TemporalConvLayer(input_size, num_channels, kernel_size)
self.relu = nn.ReLU()
def forward(self, x):
x = self.pad(x)
x = self.tconv(x.transpose(1, 2)).transpose(1, 2)
x = self.relu(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'output_size': 4, 'num_channels': 4,
'kernel_size': 4, 'dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
import torch.utils.data
import torch.onnx.operators
import torch.optim
import torch.optim.lr_scheduler
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 112
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 7
x2 = xindex // 28
x3 = xindex % 28
x4 = xindex
tmp0 = x1
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.load(in_ptr0 + (x3 + 16 * x2), tmp2 & xmask, other=0.0)
tl.store(out_ptr0 + x4, tmp3, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 7
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 28 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 7 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr0 + x3, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 7, 4), (28, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(112)](primals_1, buf0, 112,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4, 7), (28, 7, 1), torch.float32)
triton_poi_fused_convolution_1[grid(16, 7)](buf0, buf1, 16, 7,
XBLOCK=8, YBLOCK=16, num_warps=4, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4), (16, 4, 1))
del buf1
buf3 = reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0)
del buf2
buf4 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(64)](buf3,
primals_3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
return buf3, primals_2, reinterpret_tensor(buf0, (4, 4, 7), (28, 1, 4), 0
), buf4
def TemporalConvLayer(input_channels, output_channels, kernel_size):
m = nn.Conv1d(in_channels=input_channels, out_channels=output_channels,
kernel_size=kernel_size)
nn.init.xavier_normal_(m.weight)
return m
class TemporalBlockNew(nn.Module):
def __init__(self, input_size, output_size, num_channels, kernel_size,
dropout):
super().__init__()
self.pad = nn.ZeroPad2d((0, 0, 0, kernel_size - 1))
self.tconv = TemporalConvLayer(input_size, num_channels, kernel_size)
self.relu = nn.ReLU()
def forward(self, input_0):
primals_1 = self.tconv.weight
primals_3 = self.tconv.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
verashira/TSPNet
|
TemporalBlock
| false
| 16,671
|
[
"MIT"
] | 83
|
ee454165dcc61cdbbff19565364e2221727ed2b8
|
https://github.com/verashira/TSPNet/tree/ee454165dcc61cdbbff19565364e2221727ed2b8
|
SoftmaxAllocator
|
import torch
class SoftmaxAllocator(torch.nn.Module):
"""Portfolio creation by computing a softmax over the asset dimension with temperature.
Parameters
----------
temperature : None or float
If None, then needs to be provided per sample during forward pass. If ``float`` then assumed
to be always the same.
formulation : str, {'analytical', 'variational'}
Controls what way the problem is solved. If 'analytical' then using an explicit formula,
however, one cannot decide on a `max_weight` different than 1. If `variational` then solved
via convex optimization and one can set any `max_weight`.
n_assets : None or int
Only required and used if `formulation='variational`.
max_weight : float
A float between (0, 1] representing the maximum weight per asset.
"""
def __init__(self, temperature=1, formulation='analytical', n_assets=
None, max_weight=1):
super().__init__()
self.temperature = temperature
if formulation not in {'analytical', 'variational'}:
raise ValueError('Unrecognized formulation {}'.format(formulation))
if formulation == 'variational' and n_assets is None:
raise ValueError(
'One needs to provide n_assets for the variational formulation.'
)
if formulation == 'analytical' and max_weight != 1:
raise ValueError(
'Cannot constraint weights via max_weight for analytical formulation'
)
if formulation == 'variational' and n_assets * max_weight < 1:
raise ValueError(
'One cannot create fully invested portfolio with the given max_weight'
)
self.formulation = formulation
if formulation == 'analytical':
self.layer = torch.nn.Softmax(dim=1)
else:
x = cp.Parameter(n_assets)
w = cp.Variable(n_assets)
obj = -x @ w - cp.sum(cp.entr(w))
cons = [cp.sum(w) == 1.0, w <= max_weight]
prob = cp.Problem(cp.Minimize(obj), cons)
self.layer = CvxpyLayer(prob, [x], [w])
def forward(self, x, temperature=None):
"""Perform forward pass.
Parameters
----------
x : torch.Tensor
Tensor of shape `(n_samples, n_assets`).
temperature : None or torch.Tensor
If None, then using the `temperature` provided at construction time. Otherwise a `torch.Tensor` of shape
`(n_samples,)` representing a per sample temperature.
Returns
-------
weights : torch.Tensor
Tensor of shape `(n_samples, n_assets`).
"""
n_samples, _ = x.shape
device, dtype = x.device, x.dtype
if not (temperature is None) ^ (self.temperature is None):
raise ValueError('Not clear which temperature to use')
if temperature is not None:
temperature_ = temperature
else:
temperature_ = float(self.temperature) * torch.ones(n_samples,
dtype=dtype, device=device)
inp = x / temperature_[..., None]
return self.layer(inp
) if self.formulation == 'analytical' else self.layer(inp)[0]
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(16)](buf0, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf0
return buf1,
class SoftmaxAllocatorNew(torch.nn.Module):
"""Portfolio creation by computing a softmax over the asset dimension with temperature.
Parameters
----------
temperature : None or float
If None, then needs to be provided per sample during forward pass. If ``float`` then assumed
to be always the same.
formulation : str, {'analytical', 'variational'}
Controls what way the problem is solved. If 'analytical' then using an explicit formula,
however, one cannot decide on a `max_weight` different than 1. If `variational` then solved
via convex optimization and one can set any `max_weight`.
n_assets : None or int
Only required and used if `formulation='variational`.
max_weight : float
A float between (0, 1] representing the maximum weight per asset.
"""
def __init__(self, temperature=1, formulation='analytical', n_assets=
None, max_weight=1):
super().__init__()
self.temperature = temperature
if formulation not in {'analytical', 'variational'}:
raise ValueError('Unrecognized formulation {}'.format(formulation))
if formulation == 'variational' and n_assets is None:
raise ValueError(
'One needs to provide n_assets for the variational formulation.'
)
if formulation == 'analytical' and max_weight != 1:
raise ValueError(
'Cannot constraint weights via max_weight for analytical formulation'
)
if formulation == 'variational' and n_assets * max_weight < 1:
raise ValueError(
'One cannot create fully invested portfolio with the given max_weight'
)
self.formulation = formulation
if formulation == 'analytical':
self.layer = torch.nn.Softmax(dim=1)
else:
x = cp.Parameter(n_assets)
w = cp.Variable(n_assets)
obj = -x @ w - cp.sum(cp.entr(w))
cons = [cp.sum(w) == 1.0, w <= max_weight]
prob = cp.Problem(cp.Minimize(obj), cons)
self.layer = CvxpyLayer(prob, [x], [w])
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
vishalbelsare/deepdow
|
SoftmaxAllocator
| false
| 16,672
|
[
"Apache-2.0"
] | 511
|
cbb99347fba9a447d4fcae64fe5137c203643e44
|
https://github.com/vishalbelsare/deepdow/tree/cbb99347fba9a447d4fcae64fe5137c203643e44
|
TransformerEncoderLayer
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.utils.data
import torch.distributions
class TransformerEncoderLayer(nn.Module):
def __init__(self, embed_dim, num_heads, hidden_size, dropout=0.0,
attention_dropout=0.0, activation_dropout=0.0):
super().__init__()
self.embed_dim = embed_dim
self.self_attn = torch.nn.MultiheadAttention(embed_dim=self.
embed_dim, num_heads=num_heads, dropout=attention_dropout)
self.self_attn_layer_norm = torch.nn.LayerNorm(self.embed_dim)
self.dropout = dropout
self.activation_dropout = activation_dropout
self.normalize_before = True
self.fc1 = torch.nn.Linear(self.embed_dim, hidden_size)
self.fc2 = torch.nn.Linear(hidden_size, self.embed_dim)
self.layer_norm = torch.nn.LayerNorm(self.embed_dim)
self.init_parameters()
def forward(self, x, key_padding_mask=None, attn_mask=None):
residual = x
x = self.self_attn_layer_norm(x)
x, _att = self.self_attn(query=x, key=x, value=x, key_padding_mask=
key_padding_mask, attn_mask=attn_mask)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
residual = x
x = self.layer_norm(x)
x = F.relu(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
return x
def init_parameters(self):
nn.init.xavier_uniform_(self.fc1.weight)
nn.init.constant_(self.fc1.bias, 0.0)
nn.init.xavier_uniform_(self.fc2.weight)
nn.init.constant_(self.fc2.bias, 0.0)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'embed_dim': 4, 'num_heads': 4, 'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
import torch.distributions
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_9(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_out_ptr0 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4, 4), (4, 1))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(4)](primals_1, buf0, buf1,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(16)](primals_1, buf0,
buf1, primals_2, primals_3, buf2, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del primals_2
del primals_3
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4
), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 4),
buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 16), alpha=
1, beta=1, out=buf4)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 8),
buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha=
1, beta=1, out=buf5)
buf6 = reinterpret_tensor(buf3, (4, 4, 1), (1, 4, 16), 0)
del buf3
triton_poi_fused_mul_2[grid(16)](buf6, primals_5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_5
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf6, reinterpret_tensor(buf4, (4, 1, 4), (1, 1,
4), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(64)](buf7, buf8, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf9 = buf7
del buf7
triton_poi_fused__softmax_4[grid(64)](buf8, buf9, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf8
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf9, reinterpret_tensor(buf5, (4, 4, 1), (1, 4,
1), 0), out=buf10)
buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
triton_poi_fused_clone_5[grid(4, 4)](buf10, buf11, 4, 4, XBLOCK=4,
YBLOCK=4, num_warps=1, num_stages=1)
buf12 = reinterpret_tensor(buf10, (4, 4), (4, 1), 0)
del buf10
extern_kernels.addmm(primals_7, reinterpret_tensor(buf11, (4, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf12)
del primals_7
buf13 = buf1
del buf1
buf14 = buf0
del buf0
triton_poi_fused_add_native_layer_norm_6[grid(4)](primals_1, buf12,
buf13, buf14, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_7[grid(16)](primals_1, buf12,
buf13, buf14, primals_8, primals_9, buf15, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf13
del buf14
del primals_9
buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf15, reinterpret_tensor(primals_10, (4, 4), (1,
4), 0), out=buf16)
buf17 = buf16
del buf16
triton_poi_fused_relu_8[grid(16)](buf17, primals_11, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_11
buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf17, reinterpret_tensor(primals_12, (4, 4), (1,
4), 0), out=buf18)
buf19 = buf18
del buf18
triton_poi_fused_add_9[grid(16)](buf19, primals_1, buf12,
primals_13, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_13
return (buf19, primals_1, primals_8, buf2, buf9, reinterpret_tensor(
buf11, (4, 4), (4, 1), 0), buf12, buf15, buf17, primals_12,
primals_10, primals_6, reinterpret_tensor(buf5, (4, 1, 4), (1, 1, 4
), 0), reinterpret_tensor(buf6, (4, 1, 4), (1, 1, 4), 0),
reinterpret_tensor(buf4, (4, 4, 1), (1, 4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (4, 1), 32),
reinterpret_tensor(primals_4, (4, 4), (4, 1), 16),
reinterpret_tensor(primals_4, (4, 4), (4, 1), 0))
class TransformerEncoderLayerNew(nn.Module):
def __init__(self, embed_dim, num_heads, hidden_size, dropout=0.0,
attention_dropout=0.0, activation_dropout=0.0):
super().__init__()
self.embed_dim = embed_dim
self.self_attn = torch.nn.MultiheadAttention(embed_dim=self.
embed_dim, num_heads=num_heads, dropout=attention_dropout)
self.self_attn_layer_norm = torch.nn.LayerNorm(self.embed_dim)
self.dropout = dropout
self.activation_dropout = activation_dropout
self.normalize_before = True
self.fc1 = torch.nn.Linear(self.embed_dim, hidden_size)
self.fc2 = torch.nn.Linear(hidden_size, self.embed_dim)
self.layer_norm = torch.nn.LayerNorm(self.embed_dim)
self.init_parameters()
def init_parameters(self):
nn.init.xavier_uniform_(self.fc1.weight)
nn.init.constant_(self.fc1.bias, 0.0)
nn.init.xavier_uniform_(self.fc2.weight)
nn.init.constant_(self.fc2.bias, 0.0)
def forward(self, input_0):
primals_4 = self.self_attn.in_proj_weight
primals_5 = self.self_attn.in_proj_bias
primals_1 = self.self_attn.out_proj.weight
primals_2 = self.self_attn.out_proj.bias
primals_3 = self.self_attn_layer_norm.weight
primals_7 = self.self_attn_layer_norm.bias
primals_6 = self.fc1.weight
primals_8 = self.fc1.bias
primals_10 = self.fc2.weight
primals_9 = self.fc2.bias
primals_11 = self.layer_norm.weight
primals_13 = self.layer_norm.bias
primals_12 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
|
vengalraoguttha/EGG
|
TransformerEncoderLayer
| false
| 16,673
|
[
"MIT"
] | 254
|
e4f8412f197543ec7f1f00cf89b5a364b038dc57
|
https://github.com/vengalraoguttha/EGG/tree/e4f8412f197543ec7f1f00cf89b5a364b038dc57
|
SpatialCrossMapLRN
|
import torch
import torch.nn as nn
import torch.nn.parallel
class SpatialCrossMapLRN(nn.Module):
def __init__(self, local_size=1, alpha=1.0, beta=0.75, k=1,
ACROSS_CHANNELS=True):
super(SpatialCrossMapLRN, self).__init__()
self.ACROSS_CHANNELS = ACROSS_CHANNELS
if ACROSS_CHANNELS:
self.average = nn.AvgPool3d(kernel_size=(local_size, 1, 1),
stride=1, padding=(int((local_size - 1.0) / 2), 0, 0))
else:
self.average = nn.AvgPool2d(kernel_size=local_size, stride=1,
padding=int((local_size - 1.0) / 2))
self.alpha = alpha
self.beta = beta
self.k = k
def forward(self, x):
if self.ACROSS_CHANNELS:
div = x.pow(2).unsqueeze(1)
div = self.average(div).squeeze(1)
div = div.mul(self.alpha).add(self.k).pow(self.beta)
else:
div = x.pow(2)
div = self.average(div)
div = div.mul(self.alpha).add(self.k).pow(self.beta)
x = x.div(div)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mul_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0 * tmp0
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 + tmp2
tmp6 = 0.75
tmp7 = libdevice.pow(tmp5, tmp6)
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mul_pow_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class SpatialCrossMapLRNNew(nn.Module):
def __init__(self, local_size=1, alpha=1.0, beta=0.75, k=1,
ACROSS_CHANNELS=True):
super(SpatialCrossMapLRNNew, self).__init__()
self.ACROSS_CHANNELS = ACROSS_CHANNELS
if ACROSS_CHANNELS:
self.average = nn.AvgPool3d(kernel_size=(local_size, 1, 1),
stride=1, padding=(int((local_size - 1.0) / 2), 0, 0))
else:
self.average = nn.AvgPool2d(kernel_size=local_size, stride=1,
padding=int((local_size - 1.0) / 2))
self.alpha = alpha
self.beta = beta
self.k = k
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
vfdev-5/models-comparison.pytorch
|
SpatialCrossMapLRN
| false
| 16,674
|
[
"BSD-3-Clause"
] | 174
|
6a09c41c1ed6160af0734924700a9150249c3df6
|
https://github.com/vfdev-5/models-comparison.pytorch/tree/6a09c41c1ed6160af0734924700a9150249c3df6
|
Symmetric
|
import torch
from torch import nn
class NonSquareError(ValueError):
def __init__(self, name, size):
super().__init__(
'The {} parametrization can just be applied to square matrices. Got a tensor of size {}'
.format(name, size))
class VectorError(ValueError):
def __init__(self, name, size):
super().__init__(
'Cannot instantiate {} on a tensor of less than 2 dimensions. Got a tensor of size {}'
.format(name, size))
class Symmetric(nn.Module):
def __init__(self, lower=True):
"""
Vector space of symmetric matrices, parametrized in terms of the upper or lower
triangular part of a matrix.
Args:
size (torch.size): Size of the tensor to be parametrized
lower (bool): Optional. Uses the lower triangular part of the matrix to
parametrize the matrix. Default: ``True``
"""
super().__init__()
self.lower = lower
@staticmethod
def frame(X, lower):
if lower:
return X.tril(0) + X.tril(-1).transpose(-2, -1)
else:
return X.triu(0) + X.triu(1).transpose(-2, -1)
def forward(self, X):
if len(X.size()) < 2:
raise VectorError(type(self).__name__, X.size())
if X.size(-2) != X.size(-1):
raise NonSquareError(type(self).__name__, X.size())
return self.frame(X, self.lower)
@staticmethod
def in_manifold(X, eps=1e-06):
return X.dim() >= 2 and X.size(-2) == X.size(-1) and torch.allclose(X,
X.transpose(-2, -1), atol=eps)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_tril_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y3 = yindex
y1 = yindex // 4
tmp3 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask & ymask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp0 = x2 + -1 * y0
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 <= tmp1
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = y0 + -1 * x2
tmp7 = tl.full([1, 1], -1, tl.int64)
tmp8 = tmp6 <= tmp7
tmp10 = tl.where(tmp8, tmp9, tmp4)
tmp11 = tmp5 + tmp10
tl.store(out_ptr0 + (x2 + 4 * y3), tmp11, xmask & ymask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_tril_0[grid(64, 4)](arg0_1, buf0, 64, 4,
XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class NonSquareError(ValueError):
def __init__(self, name, size):
super().__init__(
'The {} parametrization can just be applied to square matrices. Got a tensor of size {}'
.format(name, size))
class VectorError(ValueError):
def __init__(self, name, size):
super().__init__(
'Cannot instantiate {} on a tensor of less than 2 dimensions. Got a tensor of size {}'
.format(name, size))
class SymmetricNew(nn.Module):
def __init__(self, lower=True):
"""
Vector space of symmetric matrices, parametrized in terms of the upper or lower
triangular part of a matrix.
Args:
size (torch.size): Size of the tensor to be parametrized
lower (bool): Optional. Uses the lower triangular part of the matrix to
parametrize the matrix. Default: ``True``
"""
super().__init__()
self.lower = lower
@staticmethod
def frame(X, lower):
if lower:
return X.tril(0) + X.tril(-1).transpose(-2, -1)
else:
return X.triu(0) + X.triu(1).transpose(-2, -1)
@staticmethod
def in_manifold(X, eps=1e-06):
return X.dim() >= 2 and X.size(-2) == X.size(-1) and torch.allclose(X,
X.transpose(-2, -1), atol=eps)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
vishalbelsare/geotorch
|
Symmetric
| false
| 16,675
|
[
"MIT"
] | 422
|
ba38d406c245d609fee4b4dac3f6427bf6d73a8e
|
https://github.com/vishalbelsare/geotorch/tree/ba38d406c245d609fee4b4dac3f6427bf6d73a8e
|
Cov2Corr
|
import torch
import torch.nn as nn
class Cov2Corr(nn.Module):
"""Conversion from covariance matrix to correlation matrix."""
def forward(self, covmat):
"""Convert.
Parameters
----------
covmat : torch.Tensor
Covariance matrix of shape (n_samples, n_assets, n_assets).
Returns
-------
corrmat : torch.Tensor
Correlation matrix of shape (n_samples, n_assets, n_assets).
"""
n_samples, n_assets, _ = covmat.shape
stds = torch.sqrt(torch.diagonal(covmat, dim1=1, dim2=2))
stds_ = stds.view(n_samples, n_assets, 1)
corr = covmat / torch.matmul(stds_, stds_.permute(0, 2, 1))
return corr
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (5 * x0 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = libdevice.sqrt(tmp0)
tl.store(out_ptr0 + x2, tmp1, xmask)
@triton.jit
def triton_poi_fused_div_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_out_ptr0 + x0, xmask)
tmp2 = tmp0 / tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_sqrt_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (4, 1, 0), 0
), reinterpret_tensor(buf0, (4, 1, 4), (4, 0, 1), 0), out=buf1)
del buf0
buf2 = buf1
del buf1
triton_poi_fused_div_1[grid(64)](buf2, arg0_1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
return buf2,
class Cov2CorrNew(nn.Module):
"""Conversion from covariance matrix to correlation matrix."""
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
vishalbelsare/deepdow
|
Cov2Corr
| false
| 16,676
|
[
"Apache-2.0"
] | 511
|
cbb99347fba9a447d4fcae64fe5137c203643e44
|
https://github.com/vishalbelsare/deepdow/tree/cbb99347fba9a447d4fcae64fe5137c203643e44
|
InformedSender
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.utils.data
import torch.distributions
class InformedSender(nn.Module):
def __init__(self, game_size, feat_size, embedding_size, hidden_size,
vocab_size=100, temp=1.0):
super(InformedSender, self).__init__()
self.game_size = game_size
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.temp = temp
self.lin1 = nn.Linear(feat_size, embedding_size, bias=False)
self.conv2 = nn.Conv2d(1, hidden_size, kernel_size=(game_size, 1),
stride=(game_size, 1), bias=False)
self.conv3 = nn.Conv2d(1, 1, kernel_size=(hidden_size, 1), stride=(
hidden_size, 1), bias=False)
self.lin4 = nn.Linear(embedding_size, vocab_size, bias=False)
def forward(self, x, _aux_input=None):
emb = self.return_embeddings(x)
h = self.conv2(emb)
h = torch.sigmoid(h)
h = h.transpose(1, 2)
h = self.conv3(h)
h = torch.sigmoid(h)
h = h.squeeze(dim=1)
h = h.squeeze(dim=1)
h = self.lin4(h)
h = h.mul(1.0 / self.temp)
logits = F.log_softmax(h, dim=1)
return logits
def return_embeddings(self, x):
embs = []
for i in range(self.game_size):
h = x[i]
if len(h.size()) == 3:
h = h.squeeze(dim=-1)
h_i = self.lin1(h)
h_i = h_i.unsqueeze(dim=1)
h_i = h_i.unsqueeze(dim=1)
embs.append(h_i)
h = torch.cat(embs, dim=2)
return h
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'game_size': 4, 'feat_size': 4, 'embedding_size': 4,
'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
import torch.distributions
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + 4 * x2), tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + (x0 + 4 * x2), tmp14 & xmask, eviction_policy
='evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tl.full([1], 4, tl.int64)
tmp19 = tl.load(in_ptr3 + (x0 + 4 * x2), tmp16 & xmask, eviction_policy
='evict_last', other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tl.store(out_ptr0 + x3, tmp22, xmask)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
@triton.jit
def triton_poi_fused_sigmoid_2(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
@triton.jit
def triton_per_fused__log_softmax_3(in_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 100
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 100 * x0), rmask & xmask, other=0.0)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(rmask & xmask, tmp3, float('-inf'))
tmp6 = triton_helpers.max2(tmp5, 1)[:, None]
tmp7 = tmp2 - tmp6
tmp8 = tmp7 * tmp1
tmp9 = tl_math.exp(tmp8)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.where(rmask & xmask, tmp10, 0)
tmp13 = tl.sum(tmp12, 1)[:, None]
tmp14 = tl_math.log(tmp13)
tmp15 = tmp8 - tmp14
tl.store(out_ptr2 + (r1 + 100 * x0), tmp15, rmask & xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 1, 4, 1), (4, 4, 1, 1))
assert_size_stride(primals_4, (1, 1, 4, 1), (4, 4, 1, 1))
assert_size_stride(primals_5, (100, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 16),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 32),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf2)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 48),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf3)
del primals_2
buf4 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(64)](buf0, buf1, buf2, buf3, buf4, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf0
del buf1
del buf2
del buf3
buf5 = extern_kernels.convolution(buf4, primals_3, stride=(4, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 1, 4), (16, 4, 4, 1))
buf6 = buf5
del buf5
triton_poi_fused_sigmoid_1[grid(64)](buf6, 64, XBLOCK=64, num_warps
=1, num_stages=1)
buf7 = extern_kernels.convolution(reinterpret_tensor(buf6, (4, 1, 4,
4), (16, 4, 4, 1), 0), primals_4, stride=(4, 1), padding=(0, 0),
dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf7, (4, 1, 1, 4), (4, 4, 4, 1))
buf8 = buf7
del buf7
triton_poi_fused_sigmoid_2[grid(16)](buf8, 16, XBLOCK=16, num_warps
=1, num_stages=1)
buf9 = empty_strided_cuda((4, 100), (100, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf8, (4, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 100), (1, 4), 0), out=buf9)
buf12 = empty_strided_cuda((4, 100), (100, 1), torch.float32)
triton_per_fused__log_softmax_3[grid(4)](buf9, buf12, 4, 100,
XBLOCK=1, num_warps=2, num_stages=1)
del buf9
return buf12, primals_3, primals_4, reinterpret_tensor(primals_1, (4, 4
), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (4, 1), 16
), reinterpret_tensor(primals_1, (4, 4), (4, 1), 32
), reinterpret_tensor(primals_1, (4, 4), (4, 1), 48
), buf4, buf6, buf8, buf12, primals_5
class InformedSenderNew(nn.Module):
def __init__(self, game_size, feat_size, embedding_size, hidden_size,
vocab_size=100, temp=1.0):
super(InformedSenderNew, self).__init__()
self.game_size = game_size
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.temp = temp
self.lin1 = nn.Linear(feat_size, embedding_size, bias=False)
self.conv2 = nn.Conv2d(1, hidden_size, kernel_size=(game_size, 1),
stride=(game_size, 1), bias=False)
self.conv3 = nn.Conv2d(1, 1, kernel_size=(hidden_size, 1), stride=(
hidden_size, 1), bias=False)
self.lin4 = nn.Linear(embedding_size, vocab_size, bias=False)
def return_embeddings(self, x):
embs = []
for i in range(self.game_size):
h = x[i]
if len(h.size()) == 3:
h = h.squeeze(dim=-1)
h_i = self.lin1(h)
h_i = h_i.unsqueeze(dim=1)
h_i = h_i.unsqueeze(dim=1)
embs.append(h_i)
h = torch.cat(embs, dim=2)
return h
def forward(self, input_0):
primals_2 = self.lin1.weight
primals_3 = self.conv2.weight
primals_4 = self.conv3.weight
primals_5 = self.lin4.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
vengalraoguttha/EGG
|
InformedSender
| false
| 16,677
|
[
"MIT"
] | 254
|
e4f8412f197543ec7f1f00cf89b5a364b038dc57
|
https://github.com/vengalraoguttha/EGG/tree/e4f8412f197543ec7f1f00cf89b5a364b038dc57
|
Skew
|
import torch
from torch import nn
class NonSquareError(ValueError):
def __init__(self, name, size):
super().__init__(
'The {} parametrization can just be applied to square matrices. Got a tensor of size {}'
.format(name, size))
class VectorError(ValueError):
def __init__(self, name, size):
super().__init__(
'Cannot instantiate {} on a tensor of less than 2 dimensions. Got a tensor of size {}'
.format(name, size))
class Skew(nn.Module):
def __init__(self, lower=True):
"""
Vector space of skew-symmetric matrices, parametrized in terms of
the upper or lower triangular part of a matrix.
Args:
size (torch.size): Size of the tensor to be parametrized
lower (bool): Optional. Uses the lower triangular part of the matrix
to parametrize the matrix. Default: ``True``
"""
super().__init__()
self.lower = lower
@staticmethod
def frame(X, lower):
if lower:
X = X.tril(-1)
else:
X = X.triu(1)
return X - X.transpose(-2, -1)
def forward(self, X):
if len(X.size()) < 2:
raise VectorError(type(self).__name__, X.size())
if X.size(-2) != X.size(-1):
raise NonSquareError(type(self).__name__, X.size())
return self.frame(X, self.lower)
@staticmethod
def in_manifold(X):
return X.dim() >= 2 and X.size(-2) == X.size(-1) and torch.allclose(X,
-X.transpose(-2, -1))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_sub_tril_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y3 = yindex
y1 = yindex // 4
tmp3 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask & ymask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp0 = x2 + -1 * y0
tmp1 = tl.full([1, 1], -1, tl.int64)
tmp2 = tmp0 <= tmp1
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = y0 + -1 * x2
tmp7 = tmp6 <= tmp1
tmp9 = tl.where(tmp7, tmp8, tmp4)
tmp10 = tmp5 - tmp9
tl.store(out_ptr0 + (x2 + 4 * y3), tmp10, xmask & ymask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_sub_tril_0[grid(64, 4)](arg0_1, buf0, 64, 4,
XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class NonSquareError(ValueError):
def __init__(self, name, size):
super().__init__(
'The {} parametrization can just be applied to square matrices. Got a tensor of size {}'
.format(name, size))
class VectorError(ValueError):
def __init__(self, name, size):
super().__init__(
'Cannot instantiate {} on a tensor of less than 2 dimensions. Got a tensor of size {}'
.format(name, size))
class SkewNew(nn.Module):
def __init__(self, lower=True):
"""
Vector space of skew-symmetric matrices, parametrized in terms of
the upper or lower triangular part of a matrix.
Args:
size (torch.size): Size of the tensor to be parametrized
lower (bool): Optional. Uses the lower triangular part of the matrix
to parametrize the matrix. Default: ``True``
"""
super().__init__()
self.lower = lower
@staticmethod
def frame(X, lower):
if lower:
X = X.tril(-1)
else:
X = X.triu(1)
return X - X.transpose(-2, -1)
@staticmethod
def in_manifold(X):
return X.dim() >= 2 and X.size(-2) == X.size(-1) and torch.allclose(X,
-X.transpose(-2, -1))
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
vishalbelsare/geotorch
|
Skew
| false
| 16,678
|
[
"MIT"
] | 422
|
ba38d406c245d609fee4b4dac3f6427bf6d73a8e
|
https://github.com/vishalbelsare/geotorch/tree/ba38d406c245d609fee4b4dac3f6427bf6d73a8e
|
Naked
|
from torch.nn import Module
import torch
from torch import Tensor
class Naked(Module):
"""Returns a tensor filled with the scalar value zero.
Args:
out_features (int, default=1): Size of each output sample.
Shape:
- Input: :math:`(N, *, H_{\\text{in}})` where
:math:`*` means any number of additional dimensions and
:math:`H_{\\text{in}}` is the number of input features.
- Output: :math:`(N, *, H_{\\text{out}})` where
all but the last dimension are the same shape as the input and
:math:`H_{\\text{out}}` is the number of output features.
Examples:
>>> from pfhedge.nn import Naked
>>>
>>> m = Naked()
>>> input = torch.empty((2, 3))
>>> m(input)
tensor([[0.],
[0.]])
"""
def __init__(self, out_features: 'int'=1):
super().__init__()
self.out_features = out_features
def forward(self, input: 'Tensor') ->Tensor:
return input.new_zeros(input.size()[:-1] + (self.out_features,))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.nn import Module
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_new_zeros_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + x0, tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_new_zeros_0[grid(64)](buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
return buf0,
class NakedNew(Module):
"""Returns a tensor filled with the scalar value zero.
Args:
out_features (int, default=1): Size of each output sample.
Shape:
- Input: :math:`(N, *, H_{\\text{in}})` where
:math:`*` means any number of additional dimensions and
:math:`H_{\\text{in}}` is the number of input features.
- Output: :math:`(N, *, H_{\\text{out}})` where
all but the last dimension are the same shape as the input and
:math:`H_{\\text{out}}` is the number of output features.
Examples:
>>> from pfhedge.nn import Naked
>>>
>>> m = Naked()
>>> input = torch.empty((2, 3))
>>> m(input)
tensor([[0.],
[0.]])
"""
def __init__(self, out_features: 'int'=1):
super().__init__()
self.out_features = out_features
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
vishalbelsare/pfhedge
|
Naked
| false
| 16,679
|
[
"MIT"
] | 81
|
4d7ff173995e0795942bc6ec55f3fdc5bfb7a5f1
|
https://github.com/vishalbelsare/pfhedge/tree/4d7ff173995e0795942bc6ec55f3fdc5bfb7a5f1
|
GatingMechanism
|
import torch
class GatingMechanism(torch.nn.Module):
def __init__(self, d_input, bg=0.1):
super(GatingMechanism, self).__init__()
self.Wr = torch.nn.Linear(d_input, d_input)
self.Ur = torch.nn.Linear(d_input, d_input)
self.Wz = torch.nn.Linear(d_input, d_input)
self.Uz = torch.nn.Linear(d_input, d_input)
self.Wg = torch.nn.Linear(d_input, d_input)
self.Ug = torch.nn.Linear(d_input, d_input)
self.bg = bg
self.sigmoid = torch.nn.Sigmoid()
self.tanh = torch.nn.Tanh()
def forward(self, x, y):
r = self.sigmoid(self.Wr(y) + self.Ur(x))
z = self.sigmoid(self.Wz(y) + self.Uz(x) - self.bg)
h = self.tanh(self.Wg(y) + self.Ug(torch.mul(r, x)))
g = torch.mul(1 - z, x) + torch.mul(z, h)
return g
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_input': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_sigmoid_sigmoid_backward_0(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x2, xmask)
tmp4 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr4 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp7 = tl.sigmoid(tmp6)
tmp9 = tmp7 * tmp8
tmp10 = 1.0
tmp11 = tmp10 - tmp7
tmp12 = tmp7 * tmp11
tl.store(out_ptr0 + x2, tmp9, xmask)
tl.store(out_ptr1 + x2, tmp12, xmask)
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_sub_tanh_1(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5,
in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_out_ptr1 + x2, xmask)
tmp11 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr4 + x2, xmask)
tmp14 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr6 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp7 = 0.1
tmp8 = tmp6 - tmp7
tmp9 = tl.sigmoid(tmp8)
tmp12 = tmp10 + tmp11
tmp15 = tmp13 + tmp14
tmp16 = tmp12 + tmp15
tmp17 = libdevice.tanh(tmp16)
tmp18 = 1.0
tmp19 = tmp18 - tmp9
tmp21 = tmp19 * tmp20
tmp22 = tmp9 * tmp17
tmp23 = tmp21 + tmp22
tl.store(in_out_ptr0 + x2, tmp9, xmask)
tl.store(in_out_ptr1 + x2, tmp17, xmask)
tl.store(out_ptr0 + x2, tmp23, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf3)
del primals_9
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf5)
del primals_11
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_sigmoid_sigmoid_backward_0[grid(256)](buf0,
primals_2, buf1, primals_5, primals_6, buf6, buf10, 256, XBLOCK
=256, num_warps=4, num_stages=1)
del primals_2
del primals_5
buf7 = buf1
del buf1
extern_kernels.mm(reinterpret_tensor(buf6, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf7)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf8 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
buf9 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused_add_mul_rsub_sigmoid_sub_tanh_1[grid(256)](buf4,
buf8, primals_8, buf3, primals_10, primals_12, buf7, primals_14,
primals_6, buf9, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf3
del buf7
del primals_10
del primals_12
del primals_14
del primals_8
return buf9, primals_6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf4, reinterpret_tensor(buf6, (64, 4), (4, 1), 0
), buf8, primals_13, buf10
class GatingMechanismNew(torch.nn.Module):
def __init__(self, d_input, bg=0.1):
super(GatingMechanismNew, self).__init__()
self.Wr = torch.nn.Linear(d_input, d_input)
self.Ur = torch.nn.Linear(d_input, d_input)
self.Wz = torch.nn.Linear(d_input, d_input)
self.Uz = torch.nn.Linear(d_input, d_input)
self.Wg = torch.nn.Linear(d_input, d_input)
self.Ug = torch.nn.Linear(d_input, d_input)
self.bg = bg
self.sigmoid = torch.nn.Sigmoid()
self.tanh = torch.nn.Tanh()
def forward(self, input_0, input_1):
primals_1 = self.Wr.weight
primals_2 = self.Wr.bias
primals_4 = self.Ur.weight
primals_5 = self.Ur.bias
primals_7 = self.Wz.weight
primals_8 = self.Wz.bias
primals_9 = self.Uz.weight
primals_10 = self.Uz.bias
primals_11 = self.Wg.weight
primals_12 = self.Wg.bias
primals_13 = self.Ug.weight
primals_14 = self.Ug.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14])
return output[0]
|
victor-psiori/Transformers-RL
|
GatingMechanism
| false
| 16,680
|
[
"MIT"
] | 50
|
85b3f2376ba473a45ca18c969aebb1ae82cf8475
|
https://github.com/victor-psiori/Transformers-RL/tree/85b3f2376ba473a45ca18c969aebb1ae82cf8475
|
EntropicLoss
|
from torch.nn import Module
import torch
from torch import Tensor
from typing import Callable
from typing import Union
from abc import ABC
def _format_float(value: 'float') ->str:
"""
>>> _format_float(1)
'1'
>>> _format_float(1.0)
'1.'
>>> _format_float(1e-4)
'1.0000e-04'
"""
tensor = torch.tensor([value])
return torch._tensor_str._Formatter(tensor).format(value)
def bisect(fn: 'Callable[[Tensor], Tensor]', target: 'Tensor', lower:
'Union[float, Tensor]', upper: 'Union[float, Tensor]', precision:
'float'=1e-06, max_iter: 'int'=100000) ->Tensor:
"""Perform binary search over a tensor.
The output tensor approximately satisfies the following relation:
.. code-block::
fn(output) = target
Args:
fn (callable[[Tensor], Tensor]): A monotone function.
target (Tensor): Target of function values.
lower (Tensor or float): Lower bound of binary search.
upper (Tensor or float): Upper bound of binary search.
precision (float, default=1e-6): Precision of output.
max_iter (int, default 100000): If the number of iterations exceeds this
value, abort computation and raise RuntimeError.
Returns:
torch.Tensor
Raises:
RuntimeError: If the number of iteration exceeds ``max_iter``.
Examples:
>>> target = torch.tensor([-1.0, 0.0, 1.0])
>>> fn = torch.log
>>> output = bisect(fn, target, 0.01, 10.0)
>>> output
tensor([0.3679, 1.0000, 2.7183])
>>> torch.allclose(fn(output), target, atol=1e-6)
True
Monotone decreasing function:
>>> fn = lambda input: -torch.log(input)
>>> output = bisect(fn, target, 0.01, 10.0)
>>> output
tensor([2.7183, 1.0000, 0.3679])
>>> torch.allclose(fn(output), target, atol=1e-6)
True
"""
lower, upper = map(torch.as_tensor, (lower, upper))
if not (lower < upper).all():
raise ValueError('condition lower < upper should be satisfied.')
if (fn(lower) > fn(upper)).all():
def mf(input):
return -fn(input)
return bisect(mf, -target, lower, upper, precision=precision,
max_iter=max_iter)
n_iter = 0
while torch.max(upper - lower) > precision:
n_iter += 1
if n_iter > max_iter:
raise RuntimeError(
f'Aborting since iteration exceeds max_iter={max_iter}.')
m = (lower + upper) / 2
output = fn(m)
lower = lower.where(output >= target, m)
upper = upper.where(output < target, m)
return upper
def exp_utility(input: 'Tensor', a: 'float'=1.0) ->Tensor:
"""Applies an exponential utility function.
An exponential utility function is defined as:
.. math::
u(x) = -\\exp(-a x) \\,.
Args:
input (torch.Tensor): The input tensor.
a (float, default=1.0): The risk aversion coefficient of the exponential
utility.
Returns:
torch.Tensor
"""
return -(-a * input).exp()
class HedgeLoss(Module, ABC):
"""Base class for hedging criteria."""
def forward(self, input: 'Tensor') ->Tensor:
"""Returns the loss of the profit-loss distribution.
This method should be overridden.
Args:
input (torch.Tensor): The distribution of the profit and loss.
Shape:
- Input: :math:`(N, *)` where
:math:`*` means any number of additional dimensions.
- Output: :math:`(*)`
Returns:
torch.Tensor
"""
def cash(self, input: 'Tensor') ->Tensor:
"""Returns the cash amount which is as preferable as
the given profit-loss distribution in terms of the loss.
The output ``cash`` is expected to satisfy the following relation:
.. code::
loss(torch.full_like(pnl, cash)) = loss(pnl)
By default, the output is computed by binary search.
If analytic form is known, it is recommended to override this method
for faster computation.
Args:
input (torch.Tensor): The distribution of the profit and loss.
Shape:
- Input: :math:`(N, *)` where
:math:`*` means any number of additional dimensions.
- Output: :math:`(*)`
Returns:
torch.Tensor
"""
return bisect(self, self(input), input.min(), input.max())
class EntropicLoss(HedgeLoss):
"""Creates a criterion that measures the expected exponential utility.
The loss of the profit-loss :math:`\\text{pnl}` is given by:
.. math::
\\text{loss}(\\text{pnl}) = -\\mathbf{E}[u(\\text{pnl})] \\,,
\\quad
u(x) = -\\exp(-a x) \\,.
.. seealso::
- :func:`pfhedge.nn.functional.exp_utility`:
The corresponding utility function.
Args:
a (float > 0, default=1.0): Risk aversion coefficient of
the exponential utility.
Shape:
- Input: :math:`(N, *)` where
:math:`*` means any number of additional dimensions.
- Output: :math:`(*)`
Examples:
>>> from pfhedge.nn import EntropicLoss
>>>
>>> loss = EntropicLoss()
>>> input = -torch.arange(4.0)
>>> loss(input)
tensor(7.7982)
>>> loss.cash(input)
tensor(-2.0539)
"""
def __init__(self, a: 'float'=1.0):
if not a > 0:
raise ValueError('Risk aversion coefficient should be positive.')
super().__init__()
self.a = a
def extra_repr(self) ->str:
return 'a=' + _format_float(self.a) if self.a != 1 else ''
def forward(self, input: 'Tensor') ->Tensor:
return -exp_utility(input, a=self.a).mean(0)
def cash(self, input: 'Tensor') ->Tensor:
return -(-exp_utility(input, a=self.a).mean(0)).log() / self.a
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch.nn import Module
from torch import Tensor
from typing import Callable
from typing import Union
from abc import ABC
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_exp_mean_mul_neg_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp5 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp10 = tl.load(in_ptr0 + (128 + x0), xmask)
tmp15 = tl.load(in_ptr0 + (192 + x0), xmask)
tmp1 = -1.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp4 = -tmp3
tmp6 = tmp5 * tmp1
tmp7 = tl_math.exp(tmp6)
tmp8 = -tmp7
tmp9 = tmp4 + tmp8
tmp11 = tmp10 * tmp1
tmp12 = tl_math.exp(tmp11)
tmp13 = -tmp12
tmp14 = tmp9 + tmp13
tmp16 = tmp15 * tmp1
tmp17 = tl_math.exp(tmp16)
tmp18 = -tmp17
tmp19 = tmp14 + tmp18
tmp20 = 4.0
tmp21 = tmp19 / tmp20
tmp22 = -tmp21
tl.store(out_ptr0 + x0, tmp22, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_exp_mean_mul_neg_0[grid(64)](arg0_1, buf0, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
return buf0,
def _format_float(value: 'float') ->str:
"""
>>> _format_float(1)
'1'
>>> _format_float(1.0)
'1.'
>>> _format_float(1e-4)
'1.0000e-04'
"""
tensor = torch.tensor([value])
return torch._tensor_str._Formatter(tensor).format(value)
def bisect(fn: 'Callable[[Tensor], Tensor]', target: 'Tensor', lower:
'Union[float, Tensor]', upper: 'Union[float, Tensor]', precision:
'float'=1e-06, max_iter: 'int'=100000) ->Tensor:
"""Perform binary search over a tensor.
The output tensor approximately satisfies the following relation:
.. code-block::
fn(output) = target
Args:
fn (callable[[Tensor], Tensor]): A monotone function.
target (Tensor): Target of function values.
lower (Tensor or float): Lower bound of binary search.
upper (Tensor or float): Upper bound of binary search.
precision (float, default=1e-6): Precision of output.
max_iter (int, default 100000): If the number of iterations exceeds this
value, abort computation and raise RuntimeError.
Returns:
torch.Tensor
Raises:
RuntimeError: If the number of iteration exceeds ``max_iter``.
Examples:
>>> target = torch.tensor([-1.0, 0.0, 1.0])
>>> fn = torch.log
>>> output = bisect(fn, target, 0.01, 10.0)
>>> output
tensor([0.3679, 1.0000, 2.7183])
>>> torch.allclose(fn(output), target, atol=1e-6)
True
Monotone decreasing function:
>>> fn = lambda input: -torch.log(input)
>>> output = bisect(fn, target, 0.01, 10.0)
>>> output
tensor([2.7183, 1.0000, 0.3679])
>>> torch.allclose(fn(output), target, atol=1e-6)
True
"""
lower, upper = map(torch.as_tensor, (lower, upper))
if not (lower < upper).all():
raise ValueError('condition lower < upper should be satisfied.')
if (fn(lower) > fn(upper)).all():
def mf(input):
return -fn(input)
return bisect(mf, -target, lower, upper, precision=precision,
max_iter=max_iter)
n_iter = 0
while torch.max(upper - lower) > precision:
n_iter += 1
if n_iter > max_iter:
raise RuntimeError(
f'Aborting since iteration exceeds max_iter={max_iter}.')
m = (lower + upper) / 2
output = fn(m)
lower = lower.where(output >= target, m)
upper = upper.where(output < target, m)
return upper
def exp_utility(input: 'Tensor', a: 'float'=1.0) ->Tensor:
"""Applies an exponential utility function.
An exponential utility function is defined as:
.. math::
u(x) = -\\exp(-a x) \\,.
Args:
input (torch.Tensor): The input tensor.
a (float, default=1.0): The risk aversion coefficient of the exponential
utility.
Returns:
torch.Tensor
"""
return -(-a * input).exp()
class HedgeLoss(Module, ABC):
"""Base class for hedging criteria."""
def forward(self, input: 'Tensor') ->Tensor:
"""Returns the loss of the profit-loss distribution.
This method should be overridden.
Args:
input (torch.Tensor): The distribution of the profit and loss.
Shape:
- Input: :math:`(N, *)` where
:math:`*` means any number of additional dimensions.
- Output: :math:`(*)`
Returns:
torch.Tensor
"""
def cash(self, input: 'Tensor') ->Tensor:
"""Returns the cash amount which is as preferable as
the given profit-loss distribution in terms of the loss.
The output ``cash`` is expected to satisfy the following relation:
.. code::
loss(torch.full_like(pnl, cash)) = loss(pnl)
By default, the output is computed by binary search.
If analytic form is known, it is recommended to override this method
for faster computation.
Args:
input (torch.Tensor): The distribution of the profit and loss.
Shape:
- Input: :math:`(N, *)` where
:math:`*` means any number of additional dimensions.
- Output: :math:`(*)`
Returns:
torch.Tensor
"""
return bisect(self, self(input), input.min(), input.max())
class EntropicLossNew(HedgeLoss):
"""Creates a criterion that measures the expected exponential utility.
The loss of the profit-loss :math:`\\text{pnl}` is given by:
.. math::
\\text{loss}(\\text{pnl}) = -\\mathbf{E}[u(\\text{pnl})] \\,,
\\quad
u(x) = -\\exp(-a x) \\,.
.. seealso::
- :func:`pfhedge.nn.functional.exp_utility`:
The corresponding utility function.
Args:
a (float > 0, default=1.0): Risk aversion coefficient of
the exponential utility.
Shape:
- Input: :math:`(N, *)` where
:math:`*` means any number of additional dimensions.
- Output: :math:`(*)`
Examples:
>>> from pfhedge.nn import EntropicLoss
>>>
>>> loss = EntropicLoss()
>>> input = -torch.arange(4.0)
>>> loss(input)
tensor(7.7982)
>>> loss.cash(input)
tensor(-2.0539)
"""
def __init__(self, a: 'float'=1.0):
if not a > 0:
raise ValueError('Risk aversion coefficient should be positive.')
super().__init__()
self.a = a
def extra_repr(self) ->str:
return 'a=' + _format_float(self.a) if self.a != 1 else ''
def cash(self, input: 'Tensor') ->Tensor:
return -(-exp_utility(input, a=self.a).mean(0)).log() / self.a
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
vishalbelsare/pfhedge
|
EntropicLoss
| false
| 16,681
|
[
"MIT"
] | 81
|
4d7ff173995e0795942bc6ec55f3fdc5bfb7a5f1
|
https://github.com/vishalbelsare/pfhedge/tree/4d7ff173995e0795942bc6ec55f3fdc5bfb7a5f1
|
ActorNetwork
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class ActorNetwork(nn.Module):
def __init__(self, state_size, action_size, hidden_size, seed=1412):
super(ActorNetwork, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.head = nn.Linear(hidden_size, action_size)
def forward(self, state):
x = F.relu(self.fc1(state))
x = F.relu(self.fc2(x))
x = self.head(x)
return 2 * torch.tanh(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_size': 4, 'action_size': 4, 'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_mul_tanh_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tmp2 = 2.0
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3,
primals_5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_tanh_1[grid(256)](buf4, buf5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(
buf3, (64, 4), (4, 1), 0), buf4, primals_6, buf6, primals_4, buf7
class ActorNetworkNew(nn.Module):
def __init__(self, state_size, action_size, hidden_size, seed=1412):
super(ActorNetworkNew, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.head = nn.Linear(hidden_size, action_size)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.head.weight
primals_7 = self.head.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
vlgiitr/Workshop-Spring-2022
|
ActorNetwork
| false
| 16,682
|
[
"MIT"
] | 69
|
003ed62c75a876e946eaa481c27224dd38914015
|
https://github.com/vlgiitr/Workshop-Spring-2022/tree/003ed62c75a876e946eaa481c27224dd38914015
|
SphereEmbedded
|
import torch
from torch import nn
def _extra_repr(**kwargs):
if 'n' in kwargs:
ret = 'n={}'.format(kwargs['n'])
elif 'dim' in kwargs:
ret = 'dim={}'.format(kwargs['dim'])
else:
ret = ''
if 'k' in kwargs:
ret += ', k={}'.format(kwargs['k'])
if 'rank' in kwargs:
ret += ', rank={}'.format(kwargs['rank'])
if 'radius' in kwargs:
ret += ', radius={}'.format(kwargs['radius'])
if 'lam' in kwargs:
ret += ', lambda={}'.format(kwargs['lam'])
if 'f' in kwargs:
ret += ', f={}'.format(kwargs['f'].__name__)
if 'tensorial_size' in kwargs:
ts = kwargs['tensorial_size']
if len(ts) != 0:
ret += ', tensorial_size={}'.format(tuple(ts))
if 'triv' in kwargs:
ret += ', triv={}'.format(kwargs['triv'].__name__)
if 'no_inv' in kwargs:
if kwargs['no_inv']:
ret += ', no inverse'
if 'transposed' in kwargs:
if kwargs['transposed']:
ret += ', transposed'
return ret
def _in_sphere(x, r, eps):
norm = x.norm(dim=-1)
rs = torch.full_like(norm, r)
return (torch.norm(norm - rs, p=float('inf')) < eps).all()
def project(x):
return x / x.norm(dim=-1, keepdim=True)
def uniform_init_sphere_(x, r=1.0):
"""Samples a point uniformly on the sphere into the tensor ``x``.
If ``x`` has :math:`d > 1` dimensions, the first :math:`d-1` dimensions
are treated as batch dimensions.
"""
with torch.no_grad():
x.normal_()
x.data = r * project(x)
return x
class InManifoldError(ValueError):
def __init__(self, X, M):
super().__init__('Tensor not contained in {}. Got\n{}'.format(M, X))
class SphereEmbedded(nn.Module):
def __init__(self, size, radius=1.0):
"""
Sphere as the orthogonal projection from
:math:`\\mathbb{R}^n` to :math:`\\mathbb{S}^{n-1}`, that is,
:math:`x \\mapsto \\frac{x}{\\lVert x \\rVert}`.
Args:
size (torch.size): Size of the tensor to be parametrized
radius (float): Optional.
Radius of the sphere. A positive number. Default: ``1.``
"""
super().__init__()
self.n = size[-1]
self.tensorial_size = size[:-1]
self.radius = SphereEmbedded.parse_radius(radius)
@staticmethod
def parse_radius(radius):
if radius <= 0.0:
raise ValueError(
'The radius has to be a positive real number. Got {}'.
format(radius))
return radius
def forward(self, x):
return self.radius * project(x)
def right_inverse(self, x, check_in_manifold=True):
if check_in_manifold and not self.in_manifold(x):
raise InManifoldError(x, self)
return x / self.radius
def in_manifold(self, x, eps=1e-05):
"""
Checks that a vector is on the sphere.
For tensors with more than 2 dimensions the first dimensions are
treated as batch dimensions.
Args:
X (torch.Tensor): The vector to be checked.
eps (float): Optional. Threshold at which the norm is considered
to be equal to ``1``. Default: ``1e-5``
"""
return _in_sphere(x, self.radius, eps)
def sample(self):
"""
Returns a uniformly sampled vector on the sphere.
"""
x = torch.empty(*(self.tensorial_size + (self.n,)))
return uniform_init_sphere_(x, r=self.radius)
def extra_repr(self):
return _extra_repr(n=self.n, radius=self.radius, tensorial_size=
self.tensorial_size)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'size': [4, 4]}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_linalg_vector_norm_mul_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tmp0 / tmp12
tmp14 = 1.0
tmp15 = tmp13 * tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_linalg_vector_norm_mul_0[grid(256)](arg0_1,
buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
def _extra_repr(**kwargs):
if 'n' in kwargs:
ret = 'n={}'.format(kwargs['n'])
elif 'dim' in kwargs:
ret = 'dim={}'.format(kwargs['dim'])
else:
ret = ''
if 'k' in kwargs:
ret += ', k={}'.format(kwargs['k'])
if 'rank' in kwargs:
ret += ', rank={}'.format(kwargs['rank'])
if 'radius' in kwargs:
ret += ', radius={}'.format(kwargs['radius'])
if 'lam' in kwargs:
ret += ', lambda={}'.format(kwargs['lam'])
if 'f' in kwargs:
ret += ', f={}'.format(kwargs['f'].__name__)
if 'tensorial_size' in kwargs:
ts = kwargs['tensorial_size']
if len(ts) != 0:
ret += ', tensorial_size={}'.format(tuple(ts))
if 'triv' in kwargs:
ret += ', triv={}'.format(kwargs['triv'].__name__)
if 'no_inv' in kwargs:
if kwargs['no_inv']:
ret += ', no inverse'
if 'transposed' in kwargs:
if kwargs['transposed']:
ret += ', transposed'
return ret
def _in_sphere(x, r, eps):
norm = x.norm(dim=-1)
rs = torch.full_like(norm, r)
return (torch.norm(norm - rs, p=float('inf')) < eps).all()
def project(x):
return x / x.norm(dim=-1, keepdim=True)
def uniform_init_sphere_(x, r=1.0):
"""Samples a point uniformly on the sphere into the tensor ``x``.
If ``x`` has :math:`d > 1` dimensions, the first :math:`d-1` dimensions
are treated as batch dimensions.
"""
with torch.no_grad():
x.normal_()
x.data = r * project(x)
return x
class InManifoldError(ValueError):
def __init__(self, X, M):
super().__init__('Tensor not contained in {}. Got\n{}'.format(M, X))
class SphereEmbeddedNew(nn.Module):
def __init__(self, size, radius=1.0):
"""
Sphere as the orthogonal projection from
:math:`\\mathbb{R}^n` to :math:`\\mathbb{S}^{n-1}`, that is,
:math:`x \\mapsto \\frac{x}{\\lVert x \\rVert}`.
Args:
size (torch.size): Size of the tensor to be parametrized
radius (float): Optional.
Radius of the sphere. A positive number. Default: ``1.``
"""
super().__init__()
self.n = size[-1]
self.tensorial_size = size[:-1]
self.radius = SphereEmbeddedNew.parse_radius(radius)
@staticmethod
def parse_radius(radius):
if radius <= 0.0:
raise ValueError(
'The radius has to be a positive real number. Got {}'.
format(radius))
return radius
def right_inverse(self, x, check_in_manifold=True):
if check_in_manifold and not self.in_manifold(x):
raise InManifoldError(x, self)
return x / self.radius
def in_manifold(self, x, eps=1e-05):
"""
Checks that a vector is on the sphere.
For tensors with more than 2 dimensions the first dimensions are
treated as batch dimensions.
Args:
X (torch.Tensor): The vector to be checked.
eps (float): Optional. Threshold at which the norm is considered
to be equal to ``1``. Default: ``1e-5``
"""
return _in_sphere(x, self.radius, eps)
def sample(self):
"""
Returns a uniformly sampled vector on the sphere.
"""
x = torch.empty(*(self.tensorial_size + (self.n,)))
return uniform_init_sphere_(x, r=self.radius)
def extra_repr(self):
return _extra_repr(n=self.n, radius=self.radius, tensorial_size=
self.tensorial_size)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
vishalbelsare/geotorch
|
SphereEmbedded
| false
| 16,683
|
[
"MIT"
] | 422
|
ba38d406c245d609fee4b4dac3f6427bf6d73a8e
|
https://github.com/vishalbelsare/geotorch/tree/ba38d406c245d609fee4b4dac3f6427bf6d73a8e
|
Decoder
|
import torch
import torch.nn as nn
from collections import OrderedDict
class Decoder(nn.Module):
def __init__(self, style_dim, class_dim):
super(Decoder, self).__init__()
self.linear_model = nn.Sequential(OrderedDict([('linear_1', nn.
Linear(in_features=style_dim + class_dim, out_features=500,
bias=True)), ('tan_h_1', nn.Tanh()), ('linear_2', nn.Linear(
in_features=500, out_features=784, bias=True))]))
def forward(self, style_latent_space, class_latent_space):
x = torch.cat((style_latent_space, class_latent_space), dim=1)
x = self.linear_model(x)
return x
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'style_dim': 4, 'class_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from collections import OrderedDict
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 2000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 500
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (500, 8), (8, 1))
assert_size_stride(primals_4, (500,), (1,))
assert_size_stride(primals_5, (784, 500), (500, 1))
assert_size_stride(primals_6, (784,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 500), (500, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 500), (1,
8), 0), out=buf1)
del primals_3
buf2 = buf1
del buf1
triton_poi_fused_tanh_1[grid(2000)](buf2, primals_4, 2000, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 784), (784, 1), torch.float32)
extern_kernels.addmm(primals_6, buf2, reinterpret_tensor(primals_5,
(500, 784), (1, 500), 0), alpha=1, beta=1, out=buf3)
del primals_6
return buf3, buf0, buf2, primals_5
class DecoderNew(nn.Module):
def __init__(self, style_dim, class_dim):
super(DecoderNew, self).__init__()
self.linear_model = nn.Sequential(OrderedDict([('linear_1', nn.
Linear(in_features=style_dim + class_dim, out_features=500,
bias=True)), ('tan_h_1', nn.Tanh()), ('linear_2', nn.Linear(
in_features=500, out_features=784, bias=True))]))
def forward(self, input_0, input_1):
primals_3 = self.linear_model.linear_1.weight
primals_4 = self.linear_model.linear_1.bias
primals_5 = self.linear_model.linear_2.weight
primals_6 = self.linear_model.linear_2.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
vicissitude1999/multi-level-vae
|
Decoder
| false
| 16,684
|
[
"MIT"
] | 68
|
83bc98fbe5046c61941298d4fd49b08fd868ee89
|
https://github.com/vicissitude1999/multi-level-vae/tree/83bc98fbe5046c61941298d4fd49b08fd868ee89
|
MixtureDensityHead
|
from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.distributions import Categorical
class MixtureDensityHead(nn.Module):
def __init__(self, config: 'DictConfig', **kwargs):
self.hparams = config
super().__init__()
self._build_network()
def _build_network(self):
self.pi = nn.Linear(self.hparams.input_dim, self.hparams.num_gaussian)
nn.init.normal_(self.pi.weight)
self.sigma = nn.Linear(self.hparams.input_dim, self.hparams.
num_gaussian, bias=self.hparams.sigma_bias_flag)
self.mu = nn.Linear(self.hparams.input_dim, self.hparams.num_gaussian)
nn.init.normal_(self.mu.weight)
if self.hparams.mu_bias_init is not None:
for i, bias in enumerate(self.hparams.mu_bias_init):
nn.init.constant_(self.mu.bias[i], bias)
def forward(self, x):
pi = self.pi(x)
sigma = self.sigma(x)
sigma = nn.ELU()(sigma) + 1 + 1e-15
mu = self.mu(x)
return pi, sigma, mu
def gaussian_probability(self, sigma, mu, target, log=False):
"""Returns the probability of `target` given MoG parameters `sigma` and `mu`.
Arguments:
sigma (BxGxO): The standard deviation of the Gaussians. B is the batch
size, G is the number of Gaussians, and O is the number of
dimensions per Gaussian.
mu (BxGxO): The means of the Gaussians. B is the batch size, G is the
number of Gaussians, and O is the number of dimensions per Gaussian.
target (BxI): A batch of target. B is the batch size and I is the number of
input dimensions.
Returns:
probabilities (BxG): The probability of each point in the probability
of the distribution in the corresponding sigma/mu index.
"""
target = target.expand_as(sigma)
if log:
ret = -torch.log(sigma) - 0.5 * LOG2PI - 0.5 * torch.pow((
target - mu) / sigma, 2)
else:
ret = ONEOVERSQRT2PI / sigma * torch.exp(-0.5 * ((target - mu) /
sigma) ** 2)
return ret
def log_prob(self, pi, sigma, mu, y):
log_component_prob = self.gaussian_probability(sigma, mu, y, log=True)
log_mix_prob = torch.log(nn.functional.gumbel_softmax(pi, tau=self.
hparams.softmax_temperature, dim=-1) + 1e-15)
return torch.logsumexp(log_component_prob + log_mix_prob, dim=-1)
def sample(self, pi, sigma, mu):
"""Draw samples from a MoG."""
categorical = Categorical(pi)
pis = categorical.sample().unsqueeze(1)
sample = Variable(sigma.data.new(sigma.size(0), 1).normal_())
sample = sample * sigma.gather(1, pis) + mu.gather(1, pis)
return sample
def generate_samples(self, pi, sigma, mu, n_samples=None):
if n_samples is None:
n_samples = self.hparams.n_samples
samples = []
softmax_pi = nn.functional.gumbel_softmax(pi, tau=self.hparams.
softmax_temperature, dim=-1)
assert (softmax_pi < 0).sum().item(
) == 0, 'pi parameter should not have negative'
for _ in range(n_samples):
samples.append(self.sample(softmax_pi, sigma, mu))
samples = torch.cat(samples, dim=1)
return samples
def generate_point_predictions(self, pi, sigma, mu, n_samples=None):
samples = self.generate_samples(pi, sigma, mu, n_samples)
if self.hparams.central_tendency == 'mean':
y_hat = torch.mean(samples, dim=-1)
elif self.hparams.central_tendency == 'median':
y_hat = torch.median(samples, dim=-1).values
return y_hat.unsqueeze(1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(input_dim=4, num_gaussian=4,
sigma_bias_flag=4, mu_bias_init=[4, 4])}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from torch.autograd import Variable
from torch.distributions import Categorical
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tmp8 = tmp7 + tmp3
tmp9 = 1e-15
tmp10 = tmp8 + tmp9
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_elu_0[grid(256)](buf1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf3)
del primals_6
del primals_7
return reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0
), buf2, reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1
class MixtureDensityHeadNew(nn.Module):
def __init__(self, config: 'DictConfig', **kwargs):
self.hparams = config
super().__init__()
self._build_network()
def _build_network(self):
self.pi = nn.Linear(self.hparams.input_dim, self.hparams.num_gaussian)
nn.init.normal_(self.pi.weight)
self.sigma = nn.Linear(self.hparams.input_dim, self.hparams.
num_gaussian, bias=self.hparams.sigma_bias_flag)
self.mu = nn.Linear(self.hparams.input_dim, self.hparams.num_gaussian)
nn.init.normal_(self.mu.weight)
if self.hparams.mu_bias_init is not None:
for i, bias in enumerate(self.hparams.mu_bias_init):
nn.init.constant_(self.mu.bias[i], bias)
def gaussian_probability(self, sigma, mu, target, log=False):
"""Returns the probability of `target` given MoG parameters `sigma` and `mu`.
Arguments:
sigma (BxGxO): The standard deviation of the Gaussians. B is the batch
size, G is the number of Gaussians, and O is the number of
dimensions per Gaussian.
mu (BxGxO): The means of the Gaussians. B is the batch size, G is the
number of Gaussians, and O is the number of dimensions per Gaussian.
target (BxI): A batch of target. B is the batch size and I is the number of
input dimensions.
Returns:
probabilities (BxG): The probability of each point in the probability
of the distribution in the corresponding sigma/mu index.
"""
target = target.expand_as(sigma)
if log:
ret = -torch.log(sigma) - 0.5 * LOG2PI - 0.5 * torch.pow((
target - mu) / sigma, 2)
else:
ret = ONEOVERSQRT2PI / sigma * torch.exp(-0.5 * ((target - mu) /
sigma) ** 2)
return ret
def log_prob(self, pi, sigma, mu, y):
log_component_prob = self.gaussian_probability(sigma, mu, y, log=True)
log_mix_prob = torch.log(nn.functional.gumbel_softmax(pi, tau=self.
hparams.softmax_temperature, dim=-1) + 1e-15)
return torch.logsumexp(log_component_prob + log_mix_prob, dim=-1)
def sample(self, pi, sigma, mu):
"""Draw samples from a MoG."""
categorical = Categorical(pi)
pis = categorical.sample().unsqueeze(1)
sample = Variable(sigma.data.new(sigma.size(0), 1).normal_())
sample = sample * sigma.gather(1, pis) + mu.gather(1, pis)
return sample
def generate_samples(self, pi, sigma, mu, n_samples=None):
if n_samples is None:
n_samples = self.hparams.n_samples
samples = []
softmax_pi = nn.functional.gumbel_softmax(pi, tau=self.hparams.
softmax_temperature, dim=-1)
assert (softmax_pi < 0).sum().item(
) == 0, 'pi parameter should not have negative'
for _ in range(n_samples):
samples.append(self.sample(softmax_pi, sigma, mu))
samples = torch.cat(samples, dim=1)
return samples
def generate_point_predictions(self, pi, sigma, mu, n_samples=None):
samples = self.generate_samples(pi, sigma, mu, n_samples)
if self.hparams.central_tendency == 'mean':
y_hat = torch.mean(samples, dim=-1)
elif self.hparams.central_tendency == 'median':
y_hat = torch.median(samples, dim=-1).values
return y_hat.unsqueeze(1)
def forward(self, input_0):
primals_1 = self.pi.weight
primals_2 = self.pi.bias
primals_4 = self.sigma.weight
primals_5 = self.sigma.bias
primals_6 = self.mu.weight
primals_7 = self.mu.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0], output[1], output[2]
|
robburdon/pytorch_tabular
|
MixtureDensityHead
| false
| 16,685
|
[
"MIT"
] | 560
|
9bf75f22c6e1b3033ad699713e77c283d55f3555
|
https://github.com/robburdon/pytorch_tabular/tree/9bf75f22c6e1b3033ad699713e77c283d55f3555
|
EntropicRiskMeasure
|
from torch.nn import Module
import torch
from torch import Tensor
from typing import Callable
from typing import Union
from abc import ABC
def _format_float(value: 'float') ->str:
"""
>>> _format_float(1)
'1'
>>> _format_float(1.0)
'1.'
>>> _format_float(1e-4)
'1.0000e-04'
"""
tensor = torch.tensor([value])
return torch._tensor_str._Formatter(tensor).format(value)
def bisect(fn: 'Callable[[Tensor], Tensor]', target: 'Tensor', lower:
'Union[float, Tensor]', upper: 'Union[float, Tensor]', precision:
'float'=1e-06, max_iter: 'int'=100000) ->Tensor:
"""Perform binary search over a tensor.
The output tensor approximately satisfies the following relation:
.. code-block::
fn(output) = target
Args:
fn (callable[[Tensor], Tensor]): A monotone function.
target (Tensor): Target of function values.
lower (Tensor or float): Lower bound of binary search.
upper (Tensor or float): Upper bound of binary search.
precision (float, default=1e-6): Precision of output.
max_iter (int, default 100000): If the number of iterations exceeds this
value, abort computation and raise RuntimeError.
Returns:
torch.Tensor
Raises:
RuntimeError: If the number of iteration exceeds ``max_iter``.
Examples:
>>> target = torch.tensor([-1.0, 0.0, 1.0])
>>> fn = torch.log
>>> output = bisect(fn, target, 0.01, 10.0)
>>> output
tensor([0.3679, 1.0000, 2.7183])
>>> torch.allclose(fn(output), target, atol=1e-6)
True
Monotone decreasing function:
>>> fn = lambda input: -torch.log(input)
>>> output = bisect(fn, target, 0.01, 10.0)
>>> output
tensor([2.7183, 1.0000, 0.3679])
>>> torch.allclose(fn(output), target, atol=1e-6)
True
"""
lower, upper = map(torch.as_tensor, (lower, upper))
if not (lower < upper).all():
raise ValueError('condition lower < upper should be satisfied.')
if (fn(lower) > fn(upper)).all():
def mf(input):
return -fn(input)
return bisect(mf, -target, lower, upper, precision=precision,
max_iter=max_iter)
n_iter = 0
while torch.max(upper - lower) > precision:
n_iter += 1
if n_iter > max_iter:
raise RuntimeError(
f'Aborting since iteration exceeds max_iter={max_iter}.')
m = (lower + upper) / 2
output = fn(m)
lower = lower.where(output >= target, m)
upper = upper.where(output < target, m)
return upper
def exp_utility(input: 'Tensor', a: 'float'=1.0) ->Tensor:
"""Applies an exponential utility function.
An exponential utility function is defined as:
.. math::
u(x) = -\\exp(-a x) \\,.
Args:
input (torch.Tensor): The input tensor.
a (float, default=1.0): The risk aversion coefficient of the exponential
utility.
Returns:
torch.Tensor
"""
return -(-a * input).exp()
def entropic_risk_measure(input: 'Tensor', a: 'float'=1.0) ->Tensor:
"""Returns the entropic risk measure.
See :class:`pfhedge.nn.EntropicRiskMeasure` for details.
"""
return (-exp_utility(input, a=a).mean(0)).log() / a
class HedgeLoss(Module, ABC):
"""Base class for hedging criteria."""
def forward(self, input: 'Tensor') ->Tensor:
"""Returns the loss of the profit-loss distribution.
This method should be overridden.
Args:
input (torch.Tensor): The distribution of the profit and loss.
Shape:
- Input: :math:`(N, *)` where
:math:`*` means any number of additional dimensions.
- Output: :math:`(*)`
Returns:
torch.Tensor
"""
def cash(self, input: 'Tensor') ->Tensor:
"""Returns the cash amount which is as preferable as
the given profit-loss distribution in terms of the loss.
The output ``cash`` is expected to satisfy the following relation:
.. code::
loss(torch.full_like(pnl, cash)) = loss(pnl)
By default, the output is computed by binary search.
If analytic form is known, it is recommended to override this method
for faster computation.
Args:
input (torch.Tensor): The distribution of the profit and loss.
Shape:
- Input: :math:`(N, *)` where
:math:`*` means any number of additional dimensions.
- Output: :math:`(*)`
Returns:
torch.Tensor
"""
return bisect(self, self(input), input.min(), input.max())
class EntropicRiskMeasure(HedgeLoss):
"""Creates a criterion that measures
the entropic risk measure.
The entropic risk measure of the profit-loss distribution
:math:`\\text{pnl}` is given by:
.. math::
\\text{loss}(\\text{pnl}) = \\frac{1}{a}
\\log(- \\mathbf{E}[u(\\text{pnl})]) \\,,
\\quad
u(x) = -\\exp(-a x) \\,.
.. seealso::
- :func:`pfhedge.nn.functional.exp_utility`:
The corresponding utility function.
Args:
a (float, default=1.0): Risk aversion coefficient of
the exponential utility.
This parameter should be positive.
Shape:
- Input: :math:`(N, *)` where
:math:`*` means any number of additional dimensions.
- Output: :math:`(*)`
Examples:
>>> from pfhedge.nn import EntropicRiskMeasure
>>>
>>> loss = EntropicRiskMeasure()
>>> input = -torch.arange(4.0)
>>> loss(input)
tensor(2.0539)
>>> loss.cash(input)
tensor(-2.0539)
"""
def __init__(self, a: 'float'=1.0):
if not a > 0:
raise ValueError('Risk aversion coefficient should be positive.')
super().__init__()
self.a = a
def extra_repr(self) ->str:
return 'a=' + _format_float(self.a) if self.a != 1 else ''
def forward(self, input: 'Tensor') ->Tensor:
return entropic_risk_measure(input, a=self.a)
def cash(self, input: 'Tensor') ->Tensor:
return -self(input)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch.nn import Module
from torch import Tensor
from typing import Callable
from typing import Union
from abc import ABC
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_exp_log_mean_mul_neg_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp5 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp10 = tl.load(in_ptr0 + (128 + x0), xmask)
tmp15 = tl.load(in_ptr0 + (192 + x0), xmask)
tmp1 = -1.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp4 = -tmp3
tmp6 = tmp5 * tmp1
tmp7 = tl_math.exp(tmp6)
tmp8 = -tmp7
tmp9 = tmp4 + tmp8
tmp11 = tmp10 * tmp1
tmp12 = tl_math.exp(tmp11)
tmp13 = -tmp12
tmp14 = tmp9 + tmp13
tmp16 = tmp15 * tmp1
tmp17 = tl_math.exp(tmp16)
tmp18 = -tmp17
tmp19 = tmp14 + tmp18
tmp20 = 4.0
tmp21 = tmp19 / tmp20
tmp22 = -tmp21
tmp23 = tl_math.log(tmp22)
tmp24 = 1.0
tmp25 = tmp23 * tmp24
tl.store(out_ptr0 + x0, tmp25, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_exp_log_mean_mul_neg_0[grid(64)](arg0_1, buf0,
64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
return buf0,
def _format_float(value: 'float') ->str:
"""
>>> _format_float(1)
'1'
>>> _format_float(1.0)
'1.'
>>> _format_float(1e-4)
'1.0000e-04'
"""
tensor = torch.tensor([value])
return torch._tensor_str._Formatter(tensor).format(value)
def bisect(fn: 'Callable[[Tensor], Tensor]', target: 'Tensor', lower:
'Union[float, Tensor]', upper: 'Union[float, Tensor]', precision:
'float'=1e-06, max_iter: 'int'=100000) ->Tensor:
"""Perform binary search over a tensor.
The output tensor approximately satisfies the following relation:
.. code-block::
fn(output) = target
Args:
fn (callable[[Tensor], Tensor]): A monotone function.
target (Tensor): Target of function values.
lower (Tensor or float): Lower bound of binary search.
upper (Tensor or float): Upper bound of binary search.
precision (float, default=1e-6): Precision of output.
max_iter (int, default 100000): If the number of iterations exceeds this
value, abort computation and raise RuntimeError.
Returns:
torch.Tensor
Raises:
RuntimeError: If the number of iteration exceeds ``max_iter``.
Examples:
>>> target = torch.tensor([-1.0, 0.0, 1.0])
>>> fn = torch.log
>>> output = bisect(fn, target, 0.01, 10.0)
>>> output
tensor([0.3679, 1.0000, 2.7183])
>>> torch.allclose(fn(output), target, atol=1e-6)
True
Monotone decreasing function:
>>> fn = lambda input: -torch.log(input)
>>> output = bisect(fn, target, 0.01, 10.0)
>>> output
tensor([2.7183, 1.0000, 0.3679])
>>> torch.allclose(fn(output), target, atol=1e-6)
True
"""
lower, upper = map(torch.as_tensor, (lower, upper))
if not (lower < upper).all():
raise ValueError('condition lower < upper should be satisfied.')
if (fn(lower) > fn(upper)).all():
def mf(input):
return -fn(input)
return bisect(mf, -target, lower, upper, precision=precision,
max_iter=max_iter)
n_iter = 0
while torch.max(upper - lower) > precision:
n_iter += 1
if n_iter > max_iter:
raise RuntimeError(
f'Aborting since iteration exceeds max_iter={max_iter}.')
m = (lower + upper) / 2
output = fn(m)
lower = lower.where(output >= target, m)
upper = upper.where(output < target, m)
return upper
def exp_utility(input: 'Tensor', a: 'float'=1.0) ->Tensor:
"""Applies an exponential utility function.
An exponential utility function is defined as:
.. math::
u(x) = -\\exp(-a x) \\,.
Args:
input (torch.Tensor): The input tensor.
a (float, default=1.0): The risk aversion coefficient of the exponential
utility.
Returns:
torch.Tensor
"""
return -(-a * input).exp()
def entropic_risk_measure(input: 'Tensor', a: 'float'=1.0) ->Tensor:
"""Returns the entropic risk measure.
See :class:`pfhedge.nn.EntropicRiskMeasure` for details.
"""
return (-exp_utility(input, a=a).mean(0)).log() / a
class HedgeLoss(Module, ABC):
"""Base class for hedging criteria."""
def forward(self, input: 'Tensor') ->Tensor:
"""Returns the loss of the profit-loss distribution.
This method should be overridden.
Args:
input (torch.Tensor): The distribution of the profit and loss.
Shape:
- Input: :math:`(N, *)` where
:math:`*` means any number of additional dimensions.
- Output: :math:`(*)`
Returns:
torch.Tensor
"""
def cash(self, input: 'Tensor') ->Tensor:
"""Returns the cash amount which is as preferable as
the given profit-loss distribution in terms of the loss.
The output ``cash`` is expected to satisfy the following relation:
.. code::
loss(torch.full_like(pnl, cash)) = loss(pnl)
By default, the output is computed by binary search.
If analytic form is known, it is recommended to override this method
for faster computation.
Args:
input (torch.Tensor): The distribution of the profit and loss.
Shape:
- Input: :math:`(N, *)` where
:math:`*` means any number of additional dimensions.
- Output: :math:`(*)`
Returns:
torch.Tensor
"""
return bisect(self, self(input), input.min(), input.max())
class EntropicRiskMeasureNew(HedgeLoss):
"""Creates a criterion that measures
the entropic risk measure.
The entropic risk measure of the profit-loss distribution
:math:`\\text{pnl}` is given by:
.. math::
\\text{loss}(\\text{pnl}) = \\frac{1}{a}
\\log(- \\mathbf{E}[u(\\text{pnl})]) \\,,
\\quad
u(x) = -\\exp(-a x) \\,.
.. seealso::
- :func:`pfhedge.nn.functional.exp_utility`:
The corresponding utility function.
Args:
a (float, default=1.0): Risk aversion coefficient of
the exponential utility.
This parameter should be positive.
Shape:
- Input: :math:`(N, *)` where
:math:`*` means any number of additional dimensions.
- Output: :math:`(*)`
Examples:
>>> from pfhedge.nn import EntropicRiskMeasure
>>>
>>> loss = EntropicRiskMeasure()
>>> input = -torch.arange(4.0)
>>> loss(input)
tensor(2.0539)
>>> loss.cash(input)
tensor(-2.0539)
"""
def __init__(self, a: 'float'=1.0):
if not a > 0:
raise ValueError('Risk aversion coefficient should be positive.')
super().__init__()
self.a = a
def extra_repr(self) ->str:
return 'a=' + _format_float(self.a) if self.a != 1 else ''
def cash(self, input: 'Tensor') ->Tensor:
return -self(input)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
vishalbelsare/pfhedge
|
EntropicRiskMeasure
| false
| 16,686
|
[
"MIT"
] | 81
|
4d7ff173995e0795942bc6ec55f3fdc5bfb7a5f1
|
https://github.com/vishalbelsare/pfhedge/tree/4d7ff173995e0795942bc6ec55f3fdc5bfb7a5f1
|
VectorQuantizer
|
import torch
from torch import Tensor
from torch import nn
import torch.nn.functional as F
class VectorQuantizer(nn.Module):
"""
Reference:
[1] https://github.com/deepmind/sonnet/blob/v2/sonnet/src/nets/vqvae.py
"""
def __init__(self, num_embeddings: 'int', embedding_dim: 'int', beta:
'float'=0.25):
super(VectorQuantizer, self).__init__()
self.K = num_embeddings
self.D = embedding_dim
self.beta = beta
self.embedding = nn.Embedding(self.K, self.D)
self.embedding.weight.data.uniform_(-1 / self.K, 1 / self.K)
def get_codebook_indices(self, latents: 'Tensor') ->Tensor:
flat_latents = latents.view(-1, self.D)
dist = torch.sum(flat_latents ** 2, dim=1, keepdim=True) + torch.sum(
self.embedding.weight ** 2, dim=1) - 2 * torch.matmul(flat_latents,
self.embedding.weight.t())
encoding_inds = torch.argmin(dist, dim=1).unsqueeze(1)
return encoding_inds
def forward(self, latents: 'Tensor') ->Tensor:
latents = latents.permute(0, 2, 3, 1).contiguous()
latents_shape = latents.shape
encoding_inds = self.get_codebook_indices(latents)
encoding_one_hot = torch.nn.functional.one_hot(encoding_inds,
num_classes=self.K).float()
quantized_latents = torch.matmul(encoding_one_hot, self.embedding.
weight)
quantized_latents = quantized_latents.view(latents_shape)
commitment_loss = F.mse_loss(quantized_latents.detach(), latents)
embedding_loss = F.mse_loss(quantized_latents, latents.detach())
vq_loss = commitment_loss * self.beta + embedding_loss
quantized_latents = latents + (quantized_latents - latents).detach()
return quantized_latents.permute(0, 3, 1, 2).contiguous(), vq_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_embeddings': 4, 'embedding_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import Tensor
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_view_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (16 * x1 + 64 * (y0 // 16) + y0 % 16), xmask &
ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_mul_pow_sub_sum_1(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp23 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp12 = tmp11 * tmp11
tmp14 = tmp13 * tmp13
tmp15 = tmp12 + tmp14
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp10 + tmp21
tmp24 = 2.0
tmp25 = tmp23 * tmp24
tmp26 = tmp22 - tmp25
tl.store(in_out_ptr0 + x2, tmp26, xmask)
@triton.jit
def triton_poi_fused_argmin_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp32 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 < tmp1
tmp3 = tmp0 == tmp1
tmp4 = tmp0 != tmp0
tmp5 = tmp1 != tmp1
tmp6 = tmp4 > tmp5
tmp7 = tmp2 | tmp6
tmp8 = tmp4 & tmp5
tmp9 = tmp3 | tmp8
tmp10 = tl.full([1], 0, tl.int64)
tmp11 = tl.full([1], 1, tl.int64)
tmp12 = tmp10 < tmp11
tmp13 = tmp9 & tmp12
tmp14 = tmp7 | tmp13
tmp15 = tl.where(tmp14, tmp0, tmp1)
tmp16 = tl.where(tmp14, tmp10, tmp11)
tmp18 = tmp15 < tmp17
tmp19 = tmp15 == tmp17
tmp20 = tmp15 != tmp15
tmp21 = tmp17 != tmp17
tmp22 = tmp20 > tmp21
tmp23 = tmp18 | tmp22
tmp24 = tmp20 & tmp21
tmp25 = tmp19 | tmp24
tmp26 = tl.full([1], 2, tl.int64)
tmp27 = tmp16 < tmp26
tmp28 = tmp25 & tmp27
tmp29 = tmp23 | tmp28
tmp30 = tl.where(tmp29, tmp15, tmp17)
tmp31 = tl.where(tmp29, tmp16, tmp26)
tmp33 = tmp30 < tmp32
tmp34 = tmp30 == tmp32
tmp35 = tmp30 != tmp30
tmp36 = tmp32 != tmp32
tmp37 = tmp35 > tmp36
tmp38 = tmp33 | tmp37
tmp39 = tmp35 & tmp36
tmp40 = tmp34 | tmp39
tmp41 = tl.full([1], 3, tl.int64)
tmp42 = tmp31 < tmp41
tmp43 = tmp40 & tmp42
tmp44 = tmp38 | tmp43
tl.where(tmp44, tmp30, tmp32)
tmp46 = tl.where(tmp44, tmp31, tmp41)
tl.store(out_ptr0 + x0, tmp46, xmask)
@triton.jit
def triton_poi_fused__to_copy_arange_eq_view_3(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = x0
tmp2 = tmp0 == tmp1
tmp3 = tmp2.to(tl.int64)
tmp4 = tmp3.to(tl.float32)
tl.store(out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_per_fused_add_clone_mse_loss_mul_4(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 4
r1 = rindex // 4 % 16
r2 = rindex // 64
tmp0 = tl.load(in_ptr0 + r3, None)
tmp1 = tl.load(in_ptr1 + (r1 + 16 * r0 + 64 * r2), None,
eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 256.0
tmp8 = tmp6 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp11 = tmp10 + tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp11, None)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask)
tmp1 = tl.load(in_ptr1 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tmp2 = tmp1 - tmp0
tmp3 = tmp0 + tmp2
tl.store(out_ptr0 + (x2 + 16 * y3), tmp3, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_mse_loss_mse_loss_backward_6(in_out_ptr0,
in_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 16
y1 = yindex // 16
tmp0 = tl.load(in_out_ptr0 + (x2 + 4 * y3), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = 0.0078125
tmp4 = tmp2 * tmp3
tl.debug_barrier()
tl.store(in_out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_view_0[grid(64, 4)](primals_1, buf0, 64, 4,
XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_2, (4, 4), (1, 4
), 0), out=buf1)
buf2 = buf1
del buf1
triton_poi_fused_add_mul_pow_sub_sum_1[grid(256)](buf2, buf0,
primals_2, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused_argmin_2[grid(64)](buf2, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf4 = buf2
del buf2
triton_poi_fused__to_copy_arange_eq_view_3[grid(256)](buf3, buf4,
256, XBLOCK=128, num_warps=4, num_stages=1)
del buf3
buf5 = buf0
del buf0
extern_kernels.mm(buf4, primals_2, out=buf5)
del primals_2
buf6 = empty_strided_cuda((), (), torch.float32)
buf9 = buf6
del buf6
triton_per_fused_add_clone_mse_loss_mul_4[grid(1)](buf9, buf5,
primals_1, 1, 256, num_warps=2, num_stages=1)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_5[grid(16, 16)](primals_1, buf5, buf7, 16,
16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
buf8 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused_clone_mse_loss_mse_loss_backward_6[grid(64, 4)](buf8,
primals_1, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
return buf7, buf9, buf8, reinterpret_tensor(buf4, (4, 64), (1, 4), 0)
class VectorQuantizerNew(nn.Module):
"""
Reference:
[1] https://github.com/deepmind/sonnet/blob/v2/sonnet/src/nets/vqvae.py
"""
def __init__(self, num_embeddings: 'int', embedding_dim: 'int', beta:
'float'=0.25):
super(VectorQuantizerNew, self).__init__()
self.K = num_embeddings
self.D = embedding_dim
self.beta = beta
self.embedding = nn.Embedding(self.K, self.D)
self.embedding.weight.data.uniform_(-1 / self.K, 1 / self.K)
def get_codebook_indices(self, latents: 'Tensor') ->Tensor:
flat_latents = latents.view(-1, self.D)
dist = torch.sum(flat_latents ** 2, dim=1, keepdim=True) + torch.sum(
self.embedding.weight ** 2, dim=1) - 2 * torch.matmul(flat_latents,
self.embedding.weight.t())
encoding_inds = torch.argmin(dist, dim=1).unsqueeze(1)
return encoding_inds
def forward(self, input_0):
primals_2 = self.embedding.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0], output[1]
|
vipavlovic/pyprobml
|
VectorQuantizer
| false
| 16,687
|
[
"MIT"
] | 4,895
|
59a2edc682d0163955db5e2f27491ad772b60141
|
https://github.com/vipavlovic/pyprobml/tree/59a2edc682d0163955db5e2f27491ad772b60141
|
Warp
|
import torch
import torch.nn as nn
class Warp(torch.nn.Module):
"""Custom warping layer."""
def __init__(self, mode='bilinear', padding_mode='reflection'):
super().__init__()
self.mode = mode
self.padding_mode = padding_mode
def forward(self, x, tform):
"""Warp the tensor `x` with `tform` along the time dimension.
Parameters
----------
x : torch.Tensor
Tensor of shape `(n_samples, n_channels, lookback, n_assets)`.
tform : torch.Tensor
Tensor of shape `(n_samples, lookback)` or `(n_samples, lookback, n_assets)`.
Note that in the first case the same transformation is going to be used over all
assets. To prevent folding the transformation should be increasing along the
time dimension. It should range from -1 (beginning of the series) to 1 (end of
the series).
Returns
-------
x_warped : torch.Tensor
Warped version of input `x` with transformation `tform`. The shape is the same
as the input shape - `(n_samples, n_channels, lookback, n_assets)`.
"""
n_samples, _n_channels, lookback, n_assets = x.shape
dtype, device = x.dtype, x.device
if tform.ndim == 3:
ty = tform
elif tform.ndim == 2:
ty = torch.stack(n_assets * [tform], dim=-1)
else:
raise ValueError(
'The tform tensor needs to be either 2 or 3 dimensional.')
tx = torch.ones(n_samples, lookback, n_assets, dtype=dtype, device=
device)
tx *= torch.linspace(-1, 1, steps=n_assets, device=device, dtype=dtype
)[None, None, :]
grid = torch.stack([tx, ty], dim=-1)
x_warped = nn.functional.grid_sample(x, grid, mode=self.mode,
padding_mode=self.padding_mode, align_corners=True)
return x_warped
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_stack_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2 % 4
x2 = xindex // 8
x3 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = x1
tmp6 = tmp5.to(tl.float32)
tmp7 = 2.0
tmp8 = tmp6 < tmp7
tmp9 = 0.6666666666666666
tmp10 = tmp6 * tmp9
tmp11 = -1.0
tmp12 = tmp10 + tmp11
tmp13 = 3 + -1 * x1
tmp14 = tmp13.to(tl.float32)
tmp15 = tmp14 * tmp9
tmp16 = 1.0
tmp17 = tmp16 - tmp15
tmp18 = tl.where(tmp8, tmp12, tmp17)
tmp19 = tmp16 * tmp18
tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype)
tmp21 = tl.where(tmp4, tmp19, tmp20)
tmp22 = tmp0 >= tmp3
tmp23 = tl.full([1], 2, tl.int64)
tmp26 = tmp5 < tmp3
tmp27 = tmp26 & tmp22
tmp28 = tl.load(in_ptr0 + x2, tmp27 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp29 = tmp5 >= tmp3
tmp30 = tmp5 < tmp23
tmp31 = tmp29 & tmp30
tmp32 = tmp31 & tmp22
tmp33 = tl.load(in_ptr0 + x2, tmp32 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp34 = tmp5 >= tmp23
tmp35 = tl.full([1], 3, tl.int64)
tmp36 = tmp5 < tmp35
tmp37 = tmp34 & tmp36
tmp38 = tmp37 & tmp22
tmp39 = tl.load(in_ptr0 + x2, tmp38 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp40 = tmp5 >= tmp35
tl.full([1], 4, tl.int64)
tmp43 = tmp40 & tmp22
tmp44 = tl.load(in_ptr0 + x2, tmp43 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp45 = tl.where(tmp37, tmp39, tmp44)
tmp46 = tl.where(tmp31, tmp33, tmp45)
tmp47 = tl.where(tmp26, tmp28, tmp46)
tmp48 = tl.full(tmp47.shape, 0.0, tmp47.dtype)
tmp49 = tl.where(tmp22, tmp47, tmp48)
tmp50 = tl.where(tmp4, tmp21, tmp49)
tl.store(out_ptr0 + x3, tmp50, xmask)
@triton.jit
def triton_poi_fused_grid_sampler_2d_1(in_out_ptr2, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex // 64
x3 = xindex
x4 = xindex // 16
tmp0 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x2), xmask, eviction_policy
='evict_last')
tmp29 = tl.load(in_ptr0 + (2 * x0 + 32 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 1.5
tmp2 = tmp0 * tmp1
tmp3 = tmp2 + tmp1
tmp4 = 0.0
tmp5 = tmp3 - tmp4
tmp6 = tl_math.abs(tmp5)
tmp7 = 0.3333333333333333
tmp8 = tmp6 * tmp7
tmp9 = libdevice.floor(tmp8)
tmp10 = tmp9.to(tl.int8)
tmp11 = tl.full([1], 1, tl.int8)
tmp12 = tmp10 & tmp11
tmp13 = tl.full([1], 0, tl.int8)
tmp14 = tmp12 == tmp13
tmp15 = 3.0
tmp16 = libdevice.fmod(tmp6, tmp15)
tmp17 = tmp16 + tmp4
tmp18 = tmp15 - tmp16
tmp19 = tl.where(tmp14, tmp17, tmp18)
tmp20 = triton_helpers.maximum(tmp19, tmp4)
tmp21 = triton_helpers.minimum(tmp20, tmp15)
tmp22 = libdevice.floor(tmp21)
tmp23 = 1.0
tmp24 = tmp22 + tmp23
tmp25 = tmp24 >= tmp4
tmp26 = 4.0
tmp27 = tmp24 < tmp26
tmp28 = tmp25 & tmp27
tmp30 = tmp29 * tmp1
tmp31 = tmp30 + tmp1
tmp32 = tmp31 - tmp4
tmp33 = tl_math.abs(tmp32)
tmp34 = tmp33 * tmp7
tmp35 = libdevice.floor(tmp34)
tmp36 = tmp35.to(tl.int8)
tmp37 = tmp36 & tmp11
tmp38 = tmp37 == tmp13
tmp39 = libdevice.fmod(tmp33, tmp15)
tmp40 = tmp39 + tmp4
tmp41 = tmp15 - tmp39
tmp42 = tl.where(tmp38, tmp40, tmp41)
tmp43 = triton_helpers.maximum(tmp42, tmp4)
tmp44 = triton_helpers.minimum(tmp43, tmp15)
tmp45 = libdevice.floor(tmp44)
tmp46 = tmp45 >= tmp4
tmp47 = tmp45 < tmp26
tmp48 = tmp47 & tmp28
tmp49 = tmp46 & tmp48
tmp50 = tmp45 + tmp23
tmp51 = tmp50 < tmp26
tmp52 = tmp51 & tmp28
tmp53 = tmp22 >= tmp4
tmp54 = tmp22 < tmp26
tmp55 = tmp53 & tmp54
tmp56 = tmp51 & tmp55
tmp57 = tmp47 & tmp55
tmp58 = tmp44 - tmp45
tmp59 = tmp24 - tmp21
tmp60 = tmp58 * tmp59
tmp61 = tmp50 >= tmp4
tmp62 = tmp61 & tmp56
tmp63 = tl.where(tmp62, tmp60, tmp4)
tmp64 = tmp50 - tmp44
tmp65 = tmp21 - tmp22
tmp66 = tmp64 * tmp65
tmp67 = tmp58 * tmp65
tmp68 = tmp61 & tmp52
tmp69 = tl.where(tmp68, tmp67, tmp4)
tmp70 = tmp64 * tmp59
tmp71 = tmp46 & tmp57
tmp72 = tmp22.to(tl.int64)
tmp73 = tl.full([1], 0, tl.int64)
tmp74 = tl.where(tmp71, tmp72, tmp73)
tmp75 = tmp45.to(tl.int64)
tmp76 = tl.where(tmp71, tmp75, tmp73)
tmp77 = tl.full([XBLOCK], 4, tl.int32)
tmp78 = tmp74 + tmp77
tmp79 = tmp74 < 0
tmp80 = tl.where(tmp79, tmp78, tmp74)
tl.device_assert((0 <= tmp80) & (tmp80 < 4) | ~xmask,
'index out of bounds: 0 <= tmp80 < 4')
tmp82 = tmp76 + tmp77
tmp83 = tmp76 < 0
tmp84 = tl.where(tmp83, tmp82, tmp76)
tl.device_assert((0 <= tmp84) & (tmp84 < 4) | ~xmask,
'index out of bounds: 0 <= tmp84 < 4')
tmp86 = tl.load(in_ptr1 + (tmp84 + 4 * tmp80 + 16 * x4), xmask,
eviction_policy='evict_last')
tmp87 = tl.where(tmp71, tmp70, tmp4)
tmp88 = tmp86 * tmp87
tmp89 = tl.where(tmp62, tmp72, tmp73)
tmp90 = tmp50.to(tl.int64)
tmp91 = tl.where(tmp62, tmp90, tmp73)
tmp92 = tmp24.to(tl.int64)
tmp93 = tl.where(tmp68, tmp92, tmp73)
tmp94 = tl.where(tmp68, tmp90, tmp73)
tmp95 = tl.where(tmp49, tmp92, tmp73)
tmp96 = tmp95 + tmp77
tmp97 = tmp95 < 0
tmp98 = tl.where(tmp97, tmp96, tmp95)
tl.device_assert((0 <= tmp98) & (tmp98 < 4) | ~xmask,
'index out of bounds: 0 <= tmp98 < 4')
tmp100 = tl.where(tmp49, tmp75, tmp73)
tmp101 = tmp100 + tmp77
tmp102 = tmp100 < 0
tmp103 = tl.where(tmp102, tmp101, tmp100)
tl.device_assert((0 <= tmp103) & (tmp103 < 4) | ~xmask,
'index out of bounds: 0 <= tmp103 < 4')
tmp105 = tl.load(in_ptr1 + (tmp103 + 4 * tmp98 + 16 * x4), xmask,
eviction_policy='evict_last')
tmp106 = tmp89 + tmp77
tmp107 = tmp89 < 0
tmp108 = tl.where(tmp107, tmp106, tmp89)
tl.device_assert((0 <= tmp108) & (tmp108 < 4) | ~xmask,
'index out of bounds: 0 <= tmp108 < 4')
tmp110 = tmp91 + tmp77
tmp111 = tmp91 < 0
tmp112 = tl.where(tmp111, tmp110, tmp91)
tl.device_assert((0 <= tmp112) & (tmp112 < 4) | ~xmask,
'index out of bounds: 0 <= tmp112 < 4')
tmp114 = tl.load(in_ptr1 + (tmp112 + 4 * tmp108 + 16 * x4), xmask,
eviction_policy='evict_last')
tmp115 = tmp114 * tmp63
tmp116 = tmp88 + tmp115
tmp117 = tl.where(tmp49, tmp66, tmp4)
tmp118 = tmp105 * tmp117
tmp119 = tmp116 + tmp118
tmp120 = tmp93 + tmp77
tmp121 = tmp93 < 0
tmp122 = tl.where(tmp121, tmp120, tmp93)
tl.device_assert((0 <= tmp122) & (tmp122 < 4) | ~xmask,
'index out of bounds: 0 <= tmp122 < 4')
tmp124 = tmp94 + tmp77
tmp125 = tmp94 < 0
tmp126 = tl.where(tmp125, tmp124, tmp94)
tl.device_assert((0 <= tmp126) & (tmp126 < 4) | ~xmask,
'index out of bounds: 0 <= tmp126 < 4')
tmp128 = tl.load(in_ptr1 + (tmp126 + 4 * tmp122 + 16 * x4), xmask,
eviction_policy='evict_last')
tmp129 = tmp128 * tmp69
tmp130 = tmp119 + tmp129
tl.store(in_out_ptr2 + x3, tmp130, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_stack_0[grid(128)](arg1_1, buf0, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del arg1_1
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf10 = buf9
del buf9
buf22 = buf10
del buf10
triton_poi_fused_grid_sampler_2d_1[grid(256)](buf22, buf0, arg0_1,
256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del buf0
return buf22,
class WarpNew(torch.nn.Module):
"""Custom warping layer."""
def __init__(self, mode='bilinear', padding_mode='reflection'):
super().__init__()
self.mode = mode
self.padding_mode = padding_mode
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
vishalbelsare/deepdow
|
Warp
| false
| 16,688
|
[
"Apache-2.0"
] | 511
|
cbb99347fba9a447d4fcae64fe5137c203643e44
|
https://github.com/vishalbelsare/deepdow/tree/cbb99347fba9a447d4fcae64fe5137c203643e44
|
BertLayerNormNoVar
|
import torch
import torch.nn as nn
class BertLayerNormNoVar(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
super(BertLayerNormNoVar, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
x = x - u
return self.weight * x + self.bias
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mean_mul_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp0 * tmp11
tmp14 = tmp12 + tmp13
tl.store(out_ptr0 + x2, tmp14, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mean_mul_sub_0[grid(256)](primals_2, primals_1,
primals_3, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
del primals_3
return buf0, primals_1
class BertLayerNormNoVarNew(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
super(BertLayerNormNoVarNew, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, input_0):
primals_2 = self.weight
primals_3 = self.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
vtu81/auto_LiRPA
|
BertLayerNormNoVar
| false
| 16,689
|
[
"BSD-3-Clause"
] | 161
|
294152077c0abfafb5d62fee39335e60eff087b4
|
https://github.com/vtu81/auto_LiRPA/tree/294152077c0abfafb5d62fee39335e60eff087b4
|
ExpectedShortfall
|
from torch.nn import Module
import torch
from torch import Tensor
from typing import Callable
from typing import Union
from typing import Optional
from abc import ABC
from math import ceil
def bisect(fn: 'Callable[[Tensor], Tensor]', target: 'Tensor', lower:
'Union[float, Tensor]', upper: 'Union[float, Tensor]', precision:
'float'=1e-06, max_iter: 'int'=100000) ->Tensor:
"""Perform binary search over a tensor.
The output tensor approximately satisfies the following relation:
.. code-block::
fn(output) = target
Args:
fn (callable[[Tensor], Tensor]): A monotone function.
target (Tensor): Target of function values.
lower (Tensor or float): Lower bound of binary search.
upper (Tensor or float): Upper bound of binary search.
precision (float, default=1e-6): Precision of output.
max_iter (int, default 100000): If the number of iterations exceeds this
value, abort computation and raise RuntimeError.
Returns:
torch.Tensor
Raises:
RuntimeError: If the number of iteration exceeds ``max_iter``.
Examples:
>>> target = torch.tensor([-1.0, 0.0, 1.0])
>>> fn = torch.log
>>> output = bisect(fn, target, 0.01, 10.0)
>>> output
tensor([0.3679, 1.0000, 2.7183])
>>> torch.allclose(fn(output), target, atol=1e-6)
True
Monotone decreasing function:
>>> fn = lambda input: -torch.log(input)
>>> output = bisect(fn, target, 0.01, 10.0)
>>> output
tensor([2.7183, 1.0000, 0.3679])
>>> torch.allclose(fn(output), target, atol=1e-6)
True
"""
lower, upper = map(torch.as_tensor, (lower, upper))
if not (lower < upper).all():
raise ValueError('condition lower < upper should be satisfied.')
if (fn(lower) > fn(upper)).all():
def mf(input):
return -fn(input)
return bisect(mf, -target, lower, upper, precision=precision,
max_iter=max_iter)
n_iter = 0
while torch.max(upper - lower) > precision:
n_iter += 1
if n_iter > max_iter:
raise RuntimeError(
f'Aborting since iteration exceeds max_iter={max_iter}.')
m = (lower + upper) / 2
output = fn(m)
lower = lower.where(output >= target, m)
upper = upper.where(output < target, m)
return upper
def topp(input: 'Tensor', p: 'float', dim: 'Optional[int]'=None, largest:
'bool'=True):
"""Returns the largest :math:`p * N` elements of the given input tensor,
where :math:`N` stands for the total number of elements in the input tensor.
If ``dim`` is not given, the last dimension of the ``input`` is chosen.
If ``largest`` is ``False`` then the smallest elements are returned.
A namedtuple of ``(values, indices)`` is returned, where the ``indices``
are the indices of the elements in the original ``input`` tensor.
Args:
input (torch.Tensor): The input tensor.
p (float): The quantile level.
dim (int, optional): The dimension to sort along.
largest (bool, default=True): Controls whether to return largest or smallest
elements.
Returns:
torch.Tensor
Examples:
>>> from pfhedge.nn.functional import topp
>>>
>>> input = torch.arange(1.0, 6.0)
>>> input
tensor([1., 2., 3., 4., 5.])
>>> topp(input, 3 / 5)
torch.return_types.topk(
values=tensor([5., 4., 3.]),
indices=tensor([4, 3, 2]))
"""
if dim is None:
return input.topk(ceil(p * input.numel()), largest=largest)
else:
return input.topk(ceil(p * input.size(dim)), dim=dim, largest=largest)
def expected_shortfall(input: 'Tensor', p: 'float', dim: 'Optional[int]'=None
) ->Tensor:
"""Returns the expected shortfall of the given input tensor.
Args:
input (torch.Tensor): The input tensor.
p (float): The quantile level.
dim (int, optional): The dimension to sort along.
Returns:
torch.Tensor
Examples:
>>> from pfhedge.nn.functional import expected_shortfall
>>>
>>> input = -torch.arange(10.0)
>>> input
tensor([-0., -1., -2., -3., -4., -5., -6., -7., -8., -9.])
>>> expected_shortfall(input, 0.3)
tensor(8.)
"""
if dim is None:
return -topp(input, p=p, largest=False).values.mean()
else:
return -topp(input, p=p, largest=False, dim=dim).values.mean(dim=dim)
class HedgeLoss(Module, ABC):
"""Base class for hedging criteria."""
def forward(self, input: 'Tensor') ->Tensor:
"""Returns the loss of the profit-loss distribution.
This method should be overridden.
Args:
input (torch.Tensor): The distribution of the profit and loss.
Shape:
- Input: :math:`(N, *)` where
:math:`*` means any number of additional dimensions.
- Output: :math:`(*)`
Returns:
torch.Tensor
"""
def cash(self, input: 'Tensor') ->Tensor:
"""Returns the cash amount which is as preferable as
the given profit-loss distribution in terms of the loss.
The output ``cash`` is expected to satisfy the following relation:
.. code::
loss(torch.full_like(pnl, cash)) = loss(pnl)
By default, the output is computed by binary search.
If analytic form is known, it is recommended to override this method
for faster computation.
Args:
input (torch.Tensor): The distribution of the profit and loss.
Shape:
- Input: :math:`(N, *)` where
:math:`*` means any number of additional dimensions.
- Output: :math:`(*)`
Returns:
torch.Tensor
"""
return bisect(self, self(input), input.min(), input.max())
class ExpectedShortfall(HedgeLoss):
"""Creates a criterion that measures the expected shortfall.
.. seealso::
- :func:`pfhedge.nn.functional.expected_shortfall`
Args:
p (float, default=0.1): Quantile level.
This parameter should satisfy :math:`0 < p \\leq 1`.
Shape:
- Input: :math:`(N, *)` where
:math:`*` means any number of additional dimensions.
- Output: :math:`(*)`
Examples:
>>> from pfhedge.nn import ExpectedShortfall
>>>
>>> loss = ExpectedShortfall(0.5)
>>> input = -torch.arange(4.0)
>>> loss(input)
tensor(2.5000)
>>> loss.cash(input)
tensor(-2.5000)
"""
def __init__(self, p: 'float'=0.1):
if not 0 < p <= 1:
raise ValueError('The quantile level should satisfy 0 < p <= 1.')
super().__init__()
self.p = p
def extra_repr(self) ->str:
return str(self.p)
def forward(self, input: 'Tensor') ->Tensor:
return expected_shortfall(input, p=self.p, dim=0)
def cash(self, input: 'Tensor') ->Tensor:
return -self(input)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.nn import Module
from torch import Tensor
from typing import Callable
from typing import Union
from typing import Optional
from abc import ABC
from math import ceil
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mean_neg_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 / tmp1
tmp3 = -tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = torch.ops.aten.topk.default(arg0_1, 1, 0, False)
del arg0_1
buf1 = buf0[0]
del buf0
buf3 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0)
del buf1
get_raw_stream(0)
triton_poi_fused_mean_neg_0[grid(64)](buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
return buf3,
def bisect(fn: 'Callable[[Tensor], Tensor]', target: 'Tensor', lower:
'Union[float, Tensor]', upper: 'Union[float, Tensor]', precision:
'float'=1e-06, max_iter: 'int'=100000) ->Tensor:
"""Perform binary search over a tensor.
The output tensor approximately satisfies the following relation:
.. code-block::
fn(output) = target
Args:
fn (callable[[Tensor], Tensor]): A monotone function.
target (Tensor): Target of function values.
lower (Tensor or float): Lower bound of binary search.
upper (Tensor or float): Upper bound of binary search.
precision (float, default=1e-6): Precision of output.
max_iter (int, default 100000): If the number of iterations exceeds this
value, abort computation and raise RuntimeError.
Returns:
torch.Tensor
Raises:
RuntimeError: If the number of iteration exceeds ``max_iter``.
Examples:
>>> target = torch.tensor([-1.0, 0.0, 1.0])
>>> fn = torch.log
>>> output = bisect(fn, target, 0.01, 10.0)
>>> output
tensor([0.3679, 1.0000, 2.7183])
>>> torch.allclose(fn(output), target, atol=1e-6)
True
Monotone decreasing function:
>>> fn = lambda input: -torch.log(input)
>>> output = bisect(fn, target, 0.01, 10.0)
>>> output
tensor([2.7183, 1.0000, 0.3679])
>>> torch.allclose(fn(output), target, atol=1e-6)
True
"""
lower, upper = map(torch.as_tensor, (lower, upper))
if not (lower < upper).all():
raise ValueError('condition lower < upper should be satisfied.')
if (fn(lower) > fn(upper)).all():
def mf(input):
return -fn(input)
return bisect(mf, -target, lower, upper, precision=precision,
max_iter=max_iter)
n_iter = 0
while torch.max(upper - lower) > precision:
n_iter += 1
if n_iter > max_iter:
raise RuntimeError(
f'Aborting since iteration exceeds max_iter={max_iter}.')
m = (lower + upper) / 2
output = fn(m)
lower = lower.where(output >= target, m)
upper = upper.where(output < target, m)
return upper
def topp(input: 'Tensor', p: 'float', dim: 'Optional[int]'=None, largest:
'bool'=True):
"""Returns the largest :math:`p * N` elements of the given input tensor,
where :math:`N` stands for the total number of elements in the input tensor.
If ``dim`` is not given, the last dimension of the ``input`` is chosen.
If ``largest`` is ``False`` then the smallest elements are returned.
A namedtuple of ``(values, indices)`` is returned, where the ``indices``
are the indices of the elements in the original ``input`` tensor.
Args:
input (torch.Tensor): The input tensor.
p (float): The quantile level.
dim (int, optional): The dimension to sort along.
largest (bool, default=True): Controls whether to return largest or smallest
elements.
Returns:
torch.Tensor
Examples:
>>> from pfhedge.nn.functional import topp
>>>
>>> input = torch.arange(1.0, 6.0)
>>> input
tensor([1., 2., 3., 4., 5.])
>>> topp(input, 3 / 5)
torch.return_types.topk(
values=tensor([5., 4., 3.]),
indices=tensor([4, 3, 2]))
"""
if dim is None:
return input.topk(ceil(p * input.numel()), largest=largest)
else:
return input.topk(ceil(p * input.size(dim)), dim=dim, largest=largest)
def expected_shortfall(input: 'Tensor', p: 'float', dim: 'Optional[int]'=None
) ->Tensor:
"""Returns the expected shortfall of the given input tensor.
Args:
input (torch.Tensor): The input tensor.
p (float): The quantile level.
dim (int, optional): The dimension to sort along.
Returns:
torch.Tensor
Examples:
>>> from pfhedge.nn.functional import expected_shortfall
>>>
>>> input = -torch.arange(10.0)
>>> input
tensor([-0., -1., -2., -3., -4., -5., -6., -7., -8., -9.])
>>> expected_shortfall(input, 0.3)
tensor(8.)
"""
if dim is None:
return -topp(input, p=p, largest=False).values.mean()
else:
return -topp(input, p=p, largest=False, dim=dim).values.mean(dim=dim)
class HedgeLoss(Module, ABC):
"""Base class for hedging criteria."""
def forward(self, input: 'Tensor') ->Tensor:
"""Returns the loss of the profit-loss distribution.
This method should be overridden.
Args:
input (torch.Tensor): The distribution of the profit and loss.
Shape:
- Input: :math:`(N, *)` where
:math:`*` means any number of additional dimensions.
- Output: :math:`(*)`
Returns:
torch.Tensor
"""
def cash(self, input: 'Tensor') ->Tensor:
"""Returns the cash amount which is as preferable as
the given profit-loss distribution in terms of the loss.
The output ``cash`` is expected to satisfy the following relation:
.. code::
loss(torch.full_like(pnl, cash)) = loss(pnl)
By default, the output is computed by binary search.
If analytic form is known, it is recommended to override this method
for faster computation.
Args:
input (torch.Tensor): The distribution of the profit and loss.
Shape:
- Input: :math:`(N, *)` where
:math:`*` means any number of additional dimensions.
- Output: :math:`(*)`
Returns:
torch.Tensor
"""
return bisect(self, self(input), input.min(), input.max())
class ExpectedShortfallNew(HedgeLoss):
"""Creates a criterion that measures the expected shortfall.
.. seealso::
- :func:`pfhedge.nn.functional.expected_shortfall`
Args:
p (float, default=0.1): Quantile level.
This parameter should satisfy :math:`0 < p \\leq 1`.
Shape:
- Input: :math:`(N, *)` where
:math:`*` means any number of additional dimensions.
- Output: :math:`(*)`
Examples:
>>> from pfhedge.nn import ExpectedShortfall
>>>
>>> loss = ExpectedShortfall(0.5)
>>> input = -torch.arange(4.0)
>>> loss(input)
tensor(2.5000)
>>> loss.cash(input)
tensor(-2.5000)
"""
def __init__(self, p: 'float'=0.1):
if not 0 < p <= 1:
raise ValueError('The quantile level should satisfy 0 < p <= 1.')
super().__init__()
self.p = p
def extra_repr(self) ->str:
return str(self.p)
def cash(self, input: 'Tensor') ->Tensor:
return -self(input)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
vishalbelsare/pfhedge
|
ExpectedShortfall
| false
| 16,690
|
[
"MIT"
] | 81
|
4d7ff173995e0795942bc6ec55f3fdc5bfb7a5f1
|
https://github.com/vishalbelsare/pfhedge/tree/4d7ff173995e0795942bc6ec55f3fdc5bfb7a5f1
|
MultiHeadDenseLayer
|
import torch
import tensorflow as tf
import torch.nn as nn
import torch.nn.functional as F
def get_activation(activ):
if callable(activ):
return activ
if activ is None:
return lambda x: x
if activ == 'tanh':
return F.tanh
elif activ == 'relu':
return F.relu
elif activ == 'gelu':
return F.gelu
elif activ == 'glu':
return lambda x: F.glu(x, -1)
else:
raise ValueError('Unknown activation: {}'.format(activ))
class MultiHeadDenseLayer(nn.Module):
""" Auto splitting or combining heads for the linear transformation. """
def __init__(self, input_size, output_units, num_heads, activation=None,
use_bias=True, is_output_transform=False):
""" Initializes MultiHeadDenseLayer.
Args:
input_size: The input dimension.
output_units: A int scalar or int list, indicating the transformed output units.
It must be a int scalar when `is_output_transform` is True.
num_heads: The head num.
activation: A string or a callable function for activation.
use_bias: A boolean, whether to add bias tensor.
is_output_transform: A boolean, whether to use this layer for the output
transformation in multi head attention.
"""
super(MultiHeadDenseLayer, self).__init__()
self._output_units = output_units
self._num_heads = num_heads
self._use_bias = use_bias
self._is_output_transform = is_output_transform
self._activation = activation
self._activation_fn = get_activation(activation)
self._flatten_output_units = tf.nest.flatten(self._output_units)
if is_output_transform:
assert not tf.nest.is_nested(self._output_units)
self._kernel = torch.nn.Parameter(torch.nn.init.xavier_normal_(
torch.empty(input_size, self._output_units)))
else:
self._kernel = torch.nn.Parameter(torch.nn.init.xavier_normal_(
torch.empty(input_size, sum(self._flatten_output_units))),
requires_grad=True)
if self._use_bias:
self._bias = torch.nn.Parameter(torch.zeros(sum(self.
_flatten_output_units)), requires_grad=True)
def compat_kernel_shape(self, input_shape):
""" Compatible kernel for variable storage. """
if self._is_output_transform:
return [input_shape[-1] * input_shape[-2], self._output_units]
return [input_shape[-1], sum(self._flatten_output_units)]
@property
def kernel_shape(self):
""" The kernel shape. """
if self._is_output_transform:
return [self._num_heads, -1, self._output_units]
return [-1, sum(self._flatten_output_units)]
def forward(self, inputs):
""" Implements ``call()`` for MultiHeadDenseLayer.
Args:
inputs: A float tensor of shape [batch_size, length, hidden_size]
when output_projection is False, otherwise a float tensor of shape
[batch_size, length, num_heads, num_units_per_head].
Returns:
The projected tensor with shape [batch_size, length, num_heads,
num_units_per_head] per `self._output_units` when output_projection
is False, otherwise [batch_size, length, output_units].
"""
kernel = torch.reshape(self._kernel, self.kernel_shape)
if self._is_output_transform:
output = torch.einsum('abcd,cde->abe', inputs, kernel)
else:
output = torch.einsum('abc,cd->abd', inputs, kernel)
if self._use_bias:
output += self._bias
if not self._is_output_transform:
output = torch.split(output, self._flatten_output_units, dim=-1)
output = tf.nest.map_structure(lambda x, num_units: torch.
reshape(x, list(x.size())[:-1] + [self._num_heads,
num_units // self._num_heads]), output, self.
_flatten_output_units, check_types=False)
output = tf.nest.flatten(output)
if self._activation_fn is not None:
output = tf.nest.map_structure(self._activation_fn, output,
check_types=False)
return tf.nest.pack_sequence_as(self._output_units, output)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'output_units': 4, 'num_heads': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import tensorflow as tf
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 16, 4), (64, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_2, (1, 16, 4), (64, 4,
1), 0), reinterpret_tensor(primals_1, (1, 4, 4), (16, 4, 1), 0),
out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_add_0[grid(64)](buf1, primals_3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_3
return buf1, reinterpret_tensor(primals_2, (1, 4, 16), (64, 1, 4), 0)
def get_activation(activ):
if callable(activ):
return activ
if activ is None:
return lambda x: x
if activ == 'tanh':
return F.tanh
elif activ == 'relu':
return F.relu
elif activ == 'gelu':
return F.gelu
elif activ == 'glu':
return lambda x: F.glu(x, -1)
else:
raise ValueError('Unknown activation: {}'.format(activ))
class MultiHeadDenseLayerNew(nn.Module):
""" Auto splitting or combining heads for the linear transformation. """
def __init__(self, input_size, output_units, num_heads, activation=None,
use_bias=True, is_output_transform=False):
""" Initializes MultiHeadDenseLayer.
Args:
input_size: The input dimension.
output_units: A int scalar or int list, indicating the transformed output units.
It must be a int scalar when `is_output_transform` is True.
num_heads: The head num.
activation: A string or a callable function for activation.
use_bias: A boolean, whether to add bias tensor.
is_output_transform: A boolean, whether to use this layer for the output
transformation in multi head attention.
"""
super(MultiHeadDenseLayerNew, self).__init__()
self._output_units = output_units
self._num_heads = num_heads
self._use_bias = use_bias
self._is_output_transform = is_output_transform
self._activation = activation
self._activation_fn = get_activation(activation)
self._flatten_output_units = tf.nest.flatten(self._output_units)
if is_output_transform:
assert not tf.nest.is_nested(self._output_units)
self._kernel = torch.nn.Parameter(torch.nn.init.xavier_normal_(
torch.empty(input_size, self._output_units)))
else:
self._kernel = torch.nn.Parameter(torch.nn.init.xavier_normal_(
torch.empty(input_size, sum(self._flatten_output_units))),
requires_grad=True)
if self._use_bias:
self._bias = torch.nn.Parameter(torch.zeros(sum(self.
_flatten_output_units)), requires_grad=True)
def compat_kernel_shape(self, input_shape):
""" Compatible kernel for variable storage. """
if self._is_output_transform:
return [input_shape[-1] * input_shape[-2], self._output_units]
return [input_shape[-1], sum(self._flatten_output_units)]
@property
def kernel_shape(self):
""" The kernel shape. """
if self._is_output_transform:
return [self._num_heads, -1, self._output_units]
return [-1, sum(self._flatten_output_units)]
def forward(self, input_0):
primals_1 = self._kernel
primals_3 = self._bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
ishine/neurst
|
MultiHeadDenseLayer
| false
| 16,691
|
[
"Apache-2.0"
] | 208
|
2ba322393fcfed4261b33f4a657e12bbe321baaa
|
https://github.com/ishine/neurst/tree/2ba322393fcfed4261b33f4a657e12bbe321baaa
|
FocalLoss
|
import torch
import torch.nn as nn
from matplotlib.font_manager import *
class FocalLoss(nn.Module):
"""
Focal loss: focus more on hard samples
"""
def __init__(self, gamma=0, eps=1e-07):
"""
:param gamma:
:param eps:
"""
super(FocalLoss, self).__init__()
self.gamma = gamma
self.eps = eps
self.ce = torch.nn.CrossEntropyLoss()
def forward(self, input, target):
"""
:param input:
:param target:
:return:
"""
log_p = self.ce(input, target)
p = torch.exp(-log_p)
loss = (1.0 - p) ** self.gamma * log_p
return loss.mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
from matplotlib.font_manager import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_per_fused__log_softmax_div_exp_mean_mul_neg_pow_rsub_sum_1(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = rindex // 64
tmp0 = tl.load(in_ptr0 + r3, None)
tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr1 + r3, None)
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tmp15 = tmp13 * tmp14
tmp16 = tl.broadcast_to(tmp15, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tmp19 = -tmp18
tmp20 = 0.015625
tmp21 = tmp19 * tmp20
tmp22 = -tmp21
tmp23 = tl_math.exp(tmp22)
tmp24 = 1.0
tmp24 - tmp23
tmp26 = tmp24 * tmp21
tmp27 = tmp26 / tmp24
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp27, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused__log_softmax_div_exp_mean_mul_neg_pow_rsub_sum_1[grid
(1)](buf2, buf0, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del buf0
return buf2,
class FocalLossNew(nn.Module):
"""
Focal loss: focus more on hard samples
"""
def __init__(self, gamma=0, eps=1e-07):
"""
:param gamma:
:param eps:
"""
super(FocalLossNew, self).__init__()
self.gamma = gamma
self.eps = eps
self.ce = torch.nn.CrossEntropyLoss()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
wang-tf/RepNet-MDNet-VehicleReID
|
FocalLoss
| false
| 16,692
|
[
"MIT"
] | 226
|
d3d184331206ca4bdb5ea399e5b90a9ccc53b400
|
https://github.com/wang-tf/RepNet-MDNet-VehicleReID/tree/d3d184331206ca4bdb5ea399e5b90a9ccc53b400
|
L1DepthLoss
|
from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
class L1DepthLoss(nn.Module):
"""Custom L1 loss for depth sequences."""
def __init__(self, args):
super(L1DepthLoss, self).__init__()
self.args = args
self.word_dim = 1
def forward(self, predictions, label_batch, length_batch):
""" Computes L1 loss on depth sequences.
Ignores all entries where label_batch=-1
Normalizes first within sentences (by dividing by the sentence length)
and then across the batch.
Args:
predictions: A pytorch batch of predicted depths
label_batch: A pytorch batch of true depths
length_batch: A pytorch batch of sentence lengths
Returns:
A tuple of:
batch_loss: average loss in the batch
total_sents: number of sentences in the batch
"""
total_sents = torch.sum(length_batch != 0).float()
labels_1s = (label_batch != -1).float()
predictions_masked = predictions * labels_1s
labels_masked = label_batch * labels_1s
if total_sents > 0:
loss_per_sent = torch.sum(torch.abs(predictions_masked -
labels_masked), dim=self.word_dim)
normalized_loss_per_sent = loss_per_sent / length_batch.float()
batch_loss = torch.sum(normalized_loss_per_sent) / total_sents
else:
batch_loss = torch.tensor(0.0, device=self.args['device'])
return batch_loss, total_sents
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'args': _mock_config()}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy_mul_ne_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp5 = tl.load(in_ptr1 + x0, xmask)
tmp1 = -1.0
tmp2 = tmp0 != tmp1
tmp3 = tmp2.to(tl.float32)
tmp4 = tmp0 * tmp3
tmp6 = tmp5 * tmp3
tl.store(out_ptr0 + x0, tmp4, xmask)
tl.store(out_ptr1 + x0, tmp6, xmask)
@triton.jit
def triton_per_fused__to_copy_gt_ne_sum_1(in_ptr0, out_ptr1, out_ptr2,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = 0.0
tmp2 = tmp0 != tmp1
tmp3 = tmp2.to(tl.int64)
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp7 > tmp1
tl.store(out_ptr1 + tl.full([1], 0, tl.int32), tmp7, None)
tl.store(out_ptr2 + tl.full([1], 0, tl.int32), tmp8, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__to_copy_mul_ne_0[grid(256)](arg1_1, arg2_1, buf0,
buf1, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg1_1
del arg2_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = empty_strided_cuda((), (), torch.bool)
triton_per_fused__to_copy_gt_ne_sum_1[grid(1)](arg0_1, buf3, buf4,
1, 256, num_warps=2, num_stages=1)
del arg0_1
return buf0, buf1, buf3, buf4
class L1DepthLossNew(nn.Module):
"""Custom L1 loss for depth sequences."""
def __init__(self, args):
super(L1DepthLossNew, self).__init__()
self.args = args
self.word_dim = 1
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0], output[1]
|
wanyao1992/structural-probes
|
L1DepthLoss
| false
| 16,693
|
[
"Apache-2.0"
] | 357
|
3071c93b23601d834628d79a74e46e8ab5e5a66b
|
https://github.com/wanyao1992/structural-probes/tree/3071c93b23601d834628d79a74e46e8ab5e5a66b
|
HLoss
|
import torch
import torch.nn.functional as F
from torch import nn
class HLoss(nn.Module):
"""
returning the negative entropy of an input tensor
"""
def __init__(self, is_maximization=False):
super(HLoss, self).__init__()
self.is_neg = is_maximization
def forward(self, x):
b = F.softmax(x, dim=1) * F.log_softmax(x, dim=1)
if self.is_neg:
b = 1.0 * b.sum(dim=1).mean()
else:
b = -1.0 * b.sum(dim=1).mean()
return b
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax__softmax_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
tl.store(out_ptr1 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax__softmax_mul_1(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr1 + x3, xmask)
tmp10 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp11 = tl_math.exp(tmp10)
tmp13 = tl_math.exp(tmp12)
tmp14 = tmp11 + tmp13
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp14 + tmp16
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp17 + tmp19
tmp21 = tl_math.log(tmp20)
tmp22 = tmp9 - tmp21
tmp23 = tmp8 * tmp22
tl.store(out_ptr0 + x3, tmp23, xmask)
@triton.jit
def triton_per_fused_mean_mul_sum_2(in_out_ptr0, in_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp1 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp3 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp5 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.sum(tmp7, 1)[:, None]
tmp10 = 64.0
tmp11 = tmp9 / tmp10
tmp12 = -1.0
tmp13 = tmp11 * tmp12
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp13, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax__softmax_0[grid(256)](arg0_1, buf0,
buf1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__log_softmax__softmax_mul_1[grid(256)](buf0, buf1,
buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del buf1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3
del buf3
triton_per_fused_mean_mul_sum_2[grid(1)](buf4, buf2, 1, 64, XBLOCK=
1, num_warps=2, num_stages=1)
del buf2
return buf4,
class HLossNew(nn.Module):
"""
returning the negative entropy of an input tensor
"""
def __init__(self, is_maximization=False):
super(HLossNew, self).__init__()
self.is_neg = is_maximization
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
vt-vl-lab/SDN
|
HLoss
| false
| 16,694
|
[
"MIT"
] | 88
|
d1f0a448acf720b9b86527f808cb17d30ed2f4e9
|
https://github.com/vt-vl-lab/SDN/tree/d1f0a448acf720b9b86527f808cb17d30ed2f4e9
|
Align
|
import torch
import torch.nn.functional as F
class Align(torch.nn.Module):
def __init__(self, p):
super(Align, self).__init__()
self.p = p
def forward(self, e1, e2):
pred = -torch.norm(e1 - e2, p=self.p, dim=1)
return pred
def only_pos_loss(self, e1, r, e2):
return -F.logsigmoid(-torch.sum(torch.pow(e1 + r - e2, 2), 1)).sum()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'p': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_linalg_vector_norm_neg_sub_0(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp6 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask)
tmp11 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp12 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask)
tmp17 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp18 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tmp3 * tmp3
tmp7 = tmp5 - tmp6
tmp8 = tmp7 * tmp7
tmp9 = tmp8 * tmp8
tmp10 = tmp4 + tmp9
tmp13 = tmp11 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tmp14 * tmp14
tmp16 = tmp10 + tmp15
tmp19 = tmp17 - tmp18
tmp20 = tmp19 * tmp19
tmp21 = tmp20 * tmp20
tmp22 = tmp16 + tmp21
tmp23 = 0.25
tmp24 = libdevice.pow(tmp22, tmp23)
tmp25 = -tmp24
tl.store(out_ptr0 + x2, tmp25, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_linalg_vector_norm_neg_sub_0[grid(64)](arg0_1,
arg1_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class AlignNew(torch.nn.Module):
def __init__(self, p):
super(AlignNew, self).__init__()
self.p = p
def only_pos_loss(self, e1, r, e2):
return -F.logsigmoid(-torch.sum(torch.pow(e1 + r - e2, 2), 1)).sum()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
weihangzhang/EAkit
|
Align
| false
| 16,695
|
[
"MIT"
] | 102
|
dde8e914480cd1a3585271f70db11d567d9c2a04
|
https://github.com/weihangzhang/EAkit/tree/dde8e914480cd1a3585271f70db11d567d9c2a04
|
N_TransE
|
import torch
import torch.nn.functional as F
class N_TransE(torch.nn.Module):
def __init__(self, p, params):
super(N_TransE, self).__init__()
self.p = p
self.params = params
def forward(self, e1, r, e2):
pred = -torch.norm(e1 + r - e2, p=self.p, dim=1)
return pred
def loss(self, pos_score, neg_score, target):
return F.relu(pos_score + self.params[0] - neg_score).sum(
) + self.params[1] * F.relu(pos_score - self.params[2]).sum()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'p': 4, 'params': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_linalg_vector_norm_neg_sub_0(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr2 + (x0 + 64 * x1), xmask)
tmp7 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp8 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask)
tmp10 = tl.load(in_ptr2 + (16 + x0 + 64 * x1), xmask)
tmp15 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp16 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask)
tmp18 = tl.load(in_ptr2 + (32 + x0 + 64 * x1), xmask)
tmp23 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp24 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask)
tmp26 = tl.load(in_ptr2 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp5 = tmp4 * tmp4
tmp6 = tmp5 * tmp5
tmp9 = tmp7 + tmp8
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp12 * tmp12
tmp14 = tmp6 + tmp13
tmp17 = tmp15 + tmp16
tmp19 = tmp17 - tmp18
tmp20 = tmp19 * tmp19
tmp21 = tmp20 * tmp20
tmp22 = tmp14 + tmp21
tmp25 = tmp23 + tmp24
tmp27 = tmp25 - tmp26
tmp28 = tmp27 * tmp27
tmp29 = tmp28 * tmp28
tmp30 = tmp22 + tmp29
tmp31 = 0.25
tmp32 = libdevice.pow(tmp30, tmp31)
tmp33 = -tmp32
tl.store(in_out_ptr0 + x2, tmp33, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_add_linalg_vector_norm_neg_sub_0[grid(64)](buf1,
arg0_1, arg1_1, arg2_1, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf1,
class N_TransENew(torch.nn.Module):
def __init__(self, p, params):
super(N_TransENew, self).__init__()
self.p = p
self.params = params
def loss(self, pos_score, neg_score, target):
return F.relu(pos_score + self.params[0] - neg_score).sum(
) + self.params[1] * F.relu(pos_score - self.params[2]).sum()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
weihangzhang/EAkit
|
N_TransE
| false
| 16,696
|
[
"MIT"
] | 102
|
dde8e914480cd1a3585271f70db11d567d9c2a04
|
https://github.com/weihangzhang/EAkit/tree/dde8e914480cd1a3585271f70db11d567d9c2a04
|
L1DistanceLoss
|
from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
class L1DistanceLoss(nn.Module):
"""Custom L1 loss for distance matrices."""
def __init__(self, args):
super(L1DistanceLoss, self).__init__()
self.args = args
self.word_pair_dims = 1, 2
def forward(self, predictions, label_batch, length_batch):
""" Computes L1 loss on distance matrices.
Ignores all entries where label_batch=-1
Normalizes first within sentences (by dividing by the square of the sentence length)
and then across the batch.
Args:
predictions: A pytorch batch of predicted distances
label_batch: A pytorch batch of true distances
length_batch: A pytorch batch of sentence lengths
Returns:
A tuple of:
batch_loss: average loss in the batch
total_sents: number of sentences in the batch
"""
labels_1s = (label_batch != -1).float()
predictions_masked = predictions * labels_1s
labels_masked = label_batch * labels_1s
total_sents = torch.sum(length_batch != 0).float()
squared_lengths = length_batch.pow(2).float()
if total_sents > 0:
loss_per_sent = torch.sum(torch.abs(predictions_masked -
labels_masked), dim=self.word_pair_dims)
normalized_loss_per_sent = loss_per_sent / squared_lengths
batch_loss = torch.sum(normalized_loss_per_sent) / total_sents
else:
batch_loss = torch.tensor(0.0, device=self.args['device'])
return batch_loss, total_sents
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'args': _mock_config()}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused__to_copy_gt_ne_pow_sum_0(in_ptr0, out_ptr0, out_ptr2,
out_ptr3, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tmp0 * tmp0
tmp2 = 0.0
tmp3 = tmp0 != tmp2
tmp4 = tmp3.to(tl.int64)
tmp5 = tl.broadcast_to(tmp4, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = tmp7.to(tl.float32)
tmp9 = tmp8 > tmp2
tl.store(out_ptr0 + tl.broadcast_to(r0, [RBLOCK]), tmp1, None)
tl.store(out_ptr2 + tl.full([1], 0, tl.int32), tmp8, None)
tl.store(out_ptr3 + tl.full([1], 0, tl.int32), tmp9, None)
@triton.jit
def triton_poi_fused__to_copy_mul_ne_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp5 = tl.load(in_ptr1 + x0, xmask)
tmp1 = -1.0
tmp2 = tmp0 != tmp1
tmp3 = tmp2.to(tl.float32)
tmp4 = tmp0 * tmp3
tmp6 = tmp5 * tmp3
tl.store(out_ptr0 + x0, tmp4, xmask)
tl.store(out_ptr1 + x0, tmp6, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((), (), torch.float32)
buf5 = empty_strided_cuda((), (), torch.bool)
get_raw_stream(0)
triton_per_fused__to_copy_gt_ne_pow_sum_0[grid(1)](arg2_1, buf0,
buf4, buf5, 1, 256, num_warps=2, num_stages=1)
del arg2_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__to_copy_mul_ne_1[grid(256)](arg0_1, arg1_1, buf1,
buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0, buf4, buf1, buf2, buf5
class L1DistanceLossNew(nn.Module):
"""Custom L1 loss for distance matrices."""
def __init__(self, args):
super(L1DistanceLossNew, self).__init__()
self.args = args
self.word_pair_dims = 1, 2
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0], output[1]
|
wanyao1992/structural-probes
|
L1DistanceLoss
| false
| 16,697
|
[
"Apache-2.0"
] | 357
|
3071c93b23601d834628d79a74e46e8ab5e5a66b
|
https://github.com/wanyao1992/structural-probes/tree/3071c93b23601d834628d79a74e46e8ab5e5a66b
|
LayerNorm
|
import torch
class LayerNorm(torch.nn.Module):
def __init__(self, input_dim):
super(LayerNorm, self).__init__()
self.gamma = torch.nn.Parameter(torch.ones(input_dim))
self.beta = torch.nn.Parameter(torch.zeros(input_dim))
self.eps = 1e-06
def forward(self, x, mask):
mean = x.mean(-1, keepdim=True)
std = torch.sqrt(x.var(dim=1, keepdim=True) + self.eps)
output = self.gamma * (x - mean) / (std + self.eps) + self.beta
return output * mask.unsqueeze(1)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x4 = xindex
x5 = xindex // 4
x3 = xindex // 64
x6 = xindex % 16
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x4, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x5, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + 4 * x5), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + 4 * x5), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + 4 * x5), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (x6 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr1 + (16 + x6 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr1 + (32 + x6 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr1 + (48 + x6 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp0 * tmp11
tmp15 = tmp13 + tmp14
tmp17 = tmp15 + tmp16
tmp19 = tmp17 + tmp18
tmp20 = tmp19 / tmp9
tmp21 = tmp13 - tmp20
tmp22 = tmp21 * tmp21
tmp23 = tmp14 - tmp20
tmp24 = tmp23 * tmp23
tmp25 = tmp22 + tmp24
tmp26 = tmp16 - tmp20
tmp27 = tmp26 * tmp26
tmp28 = tmp25 + tmp27
tmp29 = tmp18 - tmp20
tmp30 = tmp29 * tmp29
tmp31 = tmp28 + tmp30
tmp32 = 3.0
tmp33 = tmp31 / tmp32
tmp34 = 1e-06
tmp35 = tmp33 + tmp34
tmp36 = libdevice.sqrt(tmp35)
tmp37 = tmp36 + tmp34
tmp38 = tmp12 / tmp37
tl.store(out_ptr0 + x4, tmp38, xmask)
@triton.jit
def triton_poi_fused_add_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex % 256
x0 = xindex % 4
x3 = xindex // 256
x5 = xindex % 64
x6 = xindex
tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x5 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + x6, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0[grid(256)](primals_2,
primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_poi_fused_add_mul_1[grid(1024)](buf0, primals_3, primals_4,
buf1, 1024, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_3
return buf1, primals_1, primals_4
class LayerNormNew(torch.nn.Module):
def __init__(self, input_dim):
super(LayerNormNew, self).__init__()
self.gamma = torch.nn.Parameter(torch.ones(input_dim))
self.beta = torch.nn.Parameter(torch.zeros(input_dim))
self.eps = 1e-06
def forward(self, input_0, input_1):
primals_2 = self.gamma
primals_3 = self.beta
primals_1 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
watchernyu/MatchLSTM-Analyze-Adversarial-Training
|
LayerNorm
| false
| 16,698
|
[
"MIT"
] | 50
|
00bd33d3dd22d5291dc2c1ec5feef5eb93b59b3a
|
https://github.com/watchernyu/MatchLSTM-Analyze-Adversarial-Training/tree/00bd33d3dd22d5291dc2c1ec5feef5eb93b59b3a
|
StructuredAttention_bi
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class StructuredAttention_bi(nn.Module):
def __init__(self, dropout=0.1, scale=100):
super(StructuredAttention_bi, self).__init__()
self.dropout = dropout
self.scale = scale
def forward(self, C, Q, c_mask, q_mask):
_bsz, _, _num_img, _num_region, _hsz = Q.shape
S, S_mask = self.similarity(C, Q, c_mask, q_mask)
S_c = F.softmax(S * self.scale, dim=-1)
S_q = F.softmax(S * self.scale, dim=-2)
S_c = S_c * S_mask
S_q = S_q * S_mask
A_c = torch.matmul(S_c, Q)
A_q = torch.matmul(S_q.transpose(-2, -1), C)
return A_c, A_q, S_mask, S_mask.transpose(-2, -1)
def similarity(self, C, Q, c_mask, q_mask):
C = F.dropout(F.normalize(C, p=2, dim=-1), p=self.dropout, training
=self.training)
Q = F.dropout(F.normalize(Q, p=2, dim=-1), p=self.dropout, training
=self.training)
S_mask = torch.matmul(c_mask.unsqueeze(-1), q_mask.unsqueeze(-2))
S = torch.matmul(C, Q.transpose(-2, -1))
masked_S = S - 10000000000.0 * (1 - S_mask)
return masked_S, S_mask
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4, 4]), torch.
rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 256
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_div_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused__softmax_mul_rsub_sub_3(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp23 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = 1.0
tmp3 = tmp2 - tmp1
tmp4 = 10000000000.0
tmp5 = tmp3 * tmp4
tmp6 = tmp0 - tmp5
tmp7 = tmp6 * tmp2
tmp10 = tmp2 - tmp9
tmp11 = tmp10 * tmp4
tmp12 = tmp8 - tmp11
tmp13 = tmp12 * tmp2
tmp14 = triton_helpers.maximum(tmp7, tmp13)
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp4
tmp19 = tmp15 - tmp18
tmp20 = tmp19 * tmp2
tmp21 = triton_helpers.maximum(tmp14, tmp20)
tmp24 = tmp2 - tmp23
tmp25 = tmp24 * tmp4
tmp26 = tmp22 - tmp25
tmp27 = tmp26 * tmp2
tmp28 = triton_helpers.maximum(tmp21, tmp27)
tmp29 = tmp7 - tmp28
tmp30 = 100.0
tmp31 = tmp29 * tmp30
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp13 - tmp28
tmp34 = tmp33 * tmp30
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp32 + tmp35
tmp37 = tmp20 - tmp28
tmp38 = tmp37 * tmp30
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp36 + tmp39
tmp41 = tmp27 - tmp28
tmp42 = tmp41 * tmp30
tmp43 = tl_math.exp(tmp42)
tmp44 = tmp40 + tmp43
tl.store(out_ptr0 + x0, tmp28, xmask)
tl.store(out_ptr1 + x0, tmp44, xmask)
@triton.jit
def triton_poi_fused__softmax_mul_rsub_sub_4(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 16 * x1), xmask)
tmp8 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp9 = tl.load(in_ptr1 + (4 + x0 + 16 * x1), xmask)
tmp15 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp16 = tl.load(in_ptr1 + (8 + x0 + 16 * x1), xmask)
tmp22 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp23 = tl.load(in_ptr1 + (12 + x0 + 16 * x1), xmask)
tmp2 = 1.0
tmp3 = tmp2 - tmp1
tmp4 = 10000000000.0
tmp5 = tmp3 * tmp4
tmp6 = tmp0 - tmp5
tmp7 = tmp6 * tmp2
tmp10 = tmp2 - tmp9
tmp11 = tmp10 * tmp4
tmp12 = tmp8 - tmp11
tmp13 = tmp12 * tmp2
tmp14 = triton_helpers.maximum(tmp7, tmp13)
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp4
tmp19 = tmp15 - tmp18
tmp20 = tmp19 * tmp2
tmp21 = triton_helpers.maximum(tmp14, tmp20)
tmp24 = tmp2 - tmp23
tmp25 = tmp24 * tmp4
tmp26 = tmp22 - tmp25
tmp27 = tmp26 * tmp2
tmp28 = triton_helpers.maximum(tmp21, tmp27)
tmp29 = tmp7 - tmp28
tmp30 = 100.0
tmp31 = tmp29 * tmp30
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp13 - tmp28
tmp34 = tmp33 * tmp30
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp32 + tmp35
tmp37 = tmp20 - tmp28
tmp38 = tmp37 * tmp30
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp36 + tmp39
tmp41 = tmp27 - tmp28
tmp42 = tmp41 * tmp30
tmp43 = tl_math.exp(tmp42)
tmp44 = tmp40 + tmp43
tl.store(out_ptr0 + x2, tmp28, xmask)
tl.store(out_ptr1 + x2, tmp44, xmask)
@triton.jit
def triton_poi_fused__softmax_mul_rsub_sub_5(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x1 = xindex // 4
x0 = xindex % 4
x3 = xindex // 16
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + x4, xmask)
tmp8 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr4 + (x0 + 4 * x3), xmask, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr5 + (x0 + 4 * x3), xmask, eviction_policy=
'evict_last')
tmp2 = 1.0
tmp3 = tmp2 - tmp1
tmp4 = 10000000000.0
tmp5 = tmp3 * tmp4
tmp6 = tmp0 - tmp5
tmp7 = tmp6 * tmp2
tmp9 = tmp7 - tmp8
tmp10 = 100.0
tmp11 = tmp9 * tmp10
tmp12 = tl_math.exp(tmp11)
tmp14 = tmp12 / tmp13
tmp15 = tmp14 * tmp1
tmp17 = tmp7 - tmp16
tmp18 = tmp17 * tmp10
tmp19 = tl_math.exp(tmp18)
tmp21 = tmp19 / tmp20
tmp22 = tmp21 * tmp1
tl.store(out_ptr0 + x4, tmp15, xmask)
tl.store(out_ptr1 + x4, tmp22, xmask)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(256)](arg1_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_poi_fused_clone_1[grid(1024)](buf0, buf1, 1024, XBLOCK=128,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_poi_fused_div_2[grid(1024)](arg0_1, buf2, 1024, XBLOCK=128,
num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf1, (64, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf2, (64, 4, 4), (16, 1, 4), 0), out=buf3)
buf4 = reinterpret_tensor(buf2, (64, 4, 4), (16, 4, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(arg2_1, (64, 4, 1), (4, 1, 1),
0), reinterpret_tensor(arg3_1, (64, 1, 4), (4, 4, 1), 0), out=buf4)
del arg2_1
del arg3_1
buf5 = reinterpret_tensor(buf0, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0
)
del buf0
buf6 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 256),
torch.float32)
triton_poi_fused__softmax_mul_rsub_sub_3[grid(256)](buf3, buf4,
buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf9 = empty_strided_cuda((4, 4, 4, 1, 4), (64, 16, 4, 256, 1),
torch.float32)
buf10 = empty_strided_cuda((4, 4, 4, 1, 4), (64, 16, 4, 256, 1),
torch.float32)
triton_poi_fused__softmax_mul_rsub_sub_4[grid(256)](buf3, buf4,
buf9, buf10, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf7 = buf1
del buf1
buf11 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_poi_fused__softmax_mul_rsub_sub_5[grid(1024)](buf3, buf4,
buf5, buf6, buf9, buf10, buf7, buf11, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
del buf10
del buf5
del buf6
del buf9
buf8 = buf3
del buf3
extern_kernels.bmm(reinterpret_tensor(buf7, (64, 4, 4), (16, 4, 1),
0), reinterpret_tensor(arg0_1, (64, 4, 4), (16, 4, 1), 0), out=buf8
)
del arg0_1
buf12 = buf7
del buf7
triton_poi_fused_clone_1[grid(1024)](arg1_1, buf12, 1024, XBLOCK=
128, num_warps=4, num_stages=1)
del arg1_1
buf13 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf11, (64, 4, 4), (16, 1, 4),
0), reinterpret_tensor(buf12, (64, 4, 4), (16, 4, 1), 0), out=buf13
)
del buf11
del buf12
return reinterpret_tensor(buf8, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0
), reinterpret_tensor(buf13, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0
), reinterpret_tensor(buf4, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0
), reinterpret_tensor(buf4, (4, 4, 4, 4, 4), (256, 64, 16, 1, 4), 0)
class StructuredAttention_biNew(nn.Module):
def __init__(self, dropout=0.1, scale=100):
super(StructuredAttention_biNew, self).__init__()
self.dropout = dropout
self.scale = scale
def similarity(self, C, Q, c_mask, q_mask):
C = F.dropout(F.normalize(C, p=2, dim=-1), p=self.dropout, training
=self.training)
Q = F.dropout(F.normalize(Q, p=2, dim=-1), p=self.dropout, training
=self.training)
S_mask = torch.matmul(c_mask.unsqueeze(-1), q_mask.unsqueeze(-2))
S = torch.matmul(C, Q.transpose(-2, -1))
masked_S = S - 10000000000.0 * (1 - S_mask)
return masked_S, S_mask
def forward(self, input_0, input_1, input_2, input_3):
arg1_1 = input_0
arg0_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0], output[1], output[2], output[3]
|
vivekrajput566/03testing2022
|
StructuredAttention_bi
| false
| 16,699
|
[
"MIT"
] | 49
|
f7e04f921c6607d383806ca2bbb85d2de84e0369
|
https://github.com/vivekrajput566/03testing2022/tree/f7e04f921c6607d383806ca2bbb85d2de84e0369
|
AlignEA
|
import torch
import torch.nn.functional as F
class AlignEA(torch.nn.Module):
def __init__(self, p, feat_drop, params):
super(AlignEA, self).__init__()
self.params = params
def forward(self, e1, r, e2):
return torch.sum(torch.pow(e1 + r - e2, 2), 1)
def only_pos_loss(self, e1, r, e2):
return -F.logsigmoid(-torch.sum(torch.pow(e1 + r - e2, 2), 1)).sum()
def loss(self, pos_score, neg_score, target):
return F.relu(pos_score - self.params[0]).sum() + self.params[1
] * F.relu(self.params[2] - neg_score).sum()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'p': 4, 'feat_drop': 4, 'params': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_pow_sub_sum_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr2 + (x0 + 64 * x1), xmask)
tmp6 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp7 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask)
tmp9 = tl.load(in_ptr2 + (16 + x0 + 64 * x1), xmask)
tmp13 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp14 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask)
tmp16 = tl.load(in_ptr2 + (32 + x0 + 64 * x1), xmask)
tmp20 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp21 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask)
tmp23 = tl.load(in_ptr2 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp5 = tmp4 * tmp4
tmp8 = tmp6 + tmp7
tmp10 = tmp8 - tmp9
tmp11 = tmp10 * tmp10
tmp12 = tmp5 + tmp11
tmp15 = tmp13 + tmp14
tmp17 = tmp15 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp12 + tmp18
tmp22 = tmp20 + tmp21
tmp24 = tmp22 - tmp23
tmp25 = tmp24 * tmp24
tmp26 = tmp19 + tmp25
tl.store(out_ptr0 + x2, tmp26, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_pow_sub_sum_0[grid(64)](arg0_1, arg1_1, arg2_1,
buf0, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf0,
class AlignEANew(torch.nn.Module):
def __init__(self, p, feat_drop, params):
super(AlignEANew, self).__init__()
self.params = params
def only_pos_loss(self, e1, r, e2):
return -F.logsigmoid(-torch.sum(torch.pow(e1 + r - e2, 2), 1)).sum()
def loss(self, pos_score, neg_score, target):
return F.relu(pos_score - self.params[0]).sum() + self.params[1
] * F.relu(self.params[2] - neg_score).sum()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
weihangzhang/EAkit
|
AlignEA
| false
| 16,700
|
[
"MIT"
] | 102
|
dde8e914480cd1a3585271f70db11d567d9c2a04
|
https://github.com/weihangzhang/EAkit/tree/dde8e914480cd1a3585271f70db11d567d9c2a04
|
N_R_Align
|
import torch
import torch.nn as nn
class N_R_Align(torch.nn.Module):
def __init__(self, params):
super(N_R_Align, self).__init__()
self.params = params
self.cos_sim = nn.CosineSimilarity(dim=1, eps=1e-06)
def forward(self, e1, e2, n1, n2):
return self.params * torch.sigmoid(self.cos_sim(n1, n2)) + (1 -
self.params) * torch.sigmoid(self.cos_sim(e1, e2))
def loss(self, pos_score, neg_score, target):
return -torch.log(pos_score).sum()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'params': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr1 + x3, xmask)
tmp17 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp22 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-06
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tmp18 = tmp17 * tmp17
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = libdevice.sqrt(tmp27)
tmp29 = triton_helpers.maximum(tmp28, tmp13)
tmp30 = tmp16 / tmp29
tmp31 = tmp15 * tmp30
tl.store(out_ptr0 + x3, tmp31, xmask)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_sum_1(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp10 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask)
tmp11 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask)
tmp13 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask)
tmp15 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = tl.sigmoid(tmp6)
tmp8 = 4.0
tmp9 = tmp7 * tmp8
tmp12 = tmp10 + tmp11
tmp14 = tmp12 + tmp13
tmp16 = tmp14 + tmp15
tmp17 = tl.sigmoid(tmp16)
tmp18 = -3.0
tmp19 = tmp17 * tmp18
tmp20 = tmp9 + tmp19
tl.store(out_ptr0 + x2, tmp20, xmask)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0[grid(256)](
arg1_1, arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0[grid(256)](
arg3_1, arg2_1, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg2_1
del arg3_1
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_mul_sigmoid_sum_1[grid(64)](buf0, buf1, buf2,
64, XBLOCK=64, num_warps=1, num_stages=1)
del buf0
del buf1
return buf2,
class N_R_AlignNew(torch.nn.Module):
def __init__(self, params):
super(N_R_AlignNew, self).__init__()
self.params = params
self.cos_sim = nn.CosineSimilarity(dim=1, eps=1e-06)
def loss(self, pos_score, neg_score, target):
return -torch.log(pos_score).sum()
def forward(self, input_0, input_1, input_2, input_3):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0]
|
weihangzhang/EAkit
|
N_R_Align
| false
| 16,701
|
[
"MIT"
] | 102
|
dde8e914480cd1a3585271f70db11d567d9c2a04
|
https://github.com/weihangzhang/EAkit/tree/dde8e914480cd1a3585271f70db11d567d9c2a04
|
HLoss
|
import torch
import torch.nn as nn
class HLoss(nn.Module):
def __init__(self):
super(HLoss, self).__init__()
def forward(self, x):
b = x * torch.log(x)
b[torch.isnan(b)] = 0
b = -1.0 * b.sum()
return b
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_index_put_lift_fresh_log_mul_sum_0(in_out_ptr0,
in_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl_math.log(tmp0)
tmp2 = tmp0 * tmp1
tmp3 = libdevice.isnan(tmp2).to(tl.int1)
tmp4 = 0.0
tmp5 = tl.where(tmp3, tmp4, tmp2)
tmp6 = tl.broadcast_to(tmp5, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = -1.0
tmp10 = tmp8 * tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_index_put_lift_fresh_log_mul_sum_0[grid(1)](buf2,
arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
return buf2,
class HLossNew(nn.Module):
def __init__(self):
super(HLossNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
wengong-jin/chemprop
|
HLoss
| false
| 16,702
|
[
"MIT"
] | 77
|
3ad3577367d8a53f28aade0be41b56b1f25b6125
|
https://github.com/wengong-jin/chemprop/tree/3ad3577367d8a53f28aade0be41b56b1f25b6125
|
depthwise_block
|
import torch
import torch.nn as nn
import torch.utils.data
class depthwise_conv(nn.Module):
def __init__(self, kernel_size=3, stride=1, padding=1):
super(depthwise_conv, self).__init__()
self.depthwise = nn.Conv2d(1, 1, kernel_size=kernel_size, stride=
stride, padding=padding)
def forward(self, x):
C, H, W = x.shape[1:]
x = x.reshape(-1, 1, H, W)
x = self.depthwise(x)
x = x.view(-1, C, H, W)
return x
class depthwise_block(nn.Module):
def __init__(self, kernel_size=3, stride=1, padding=1, activation='relu'):
super(depthwise_block, self).__init__()
self.depthwise = depthwise_conv(kernel_size=3, stride=1, padding=1)
if activation == 'relu':
self.activation = nn.ReLU()
elif activation == 'lrelu':
self.activation = nn.LeakyReLU()
elif activation == 'tanh':
self.activation = nn.Tanh()
def forward(self, x, act=True):
x = self.depthwise(x)
if act:
x = self.activation(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = 0.0
tmp7 = tmp5 <= tmp6
tl.store(in_out_ptr0 + x0, tmp5, xmask)
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (16,
1, 4, 4), (16, 16, 4, 1), 0), primals_2, stride=(1, 1), padding
=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0,
0), groups=1, bias=None)
assert_size_stride(buf0, (16, 1, 4, 4), (16, 16, 4, 1))
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_3, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
return buf1, primals_2, reinterpret_tensor(primals_1, (16, 1, 4, 4), (
16, 16, 4, 1), 0), buf2
class depthwise_conv(nn.Module):
def __init__(self, kernel_size=3, stride=1, padding=1):
super(depthwise_conv, self).__init__()
self.depthwise = nn.Conv2d(1, 1, kernel_size=kernel_size, stride=
stride, padding=padding)
def forward(self, x):
C, H, W = x.shape[1:]
x = x.reshape(-1, 1, H, W)
x = self.depthwise(x)
x = x.view(-1, C, H, W)
return x
class depthwise_blockNew(nn.Module):
def __init__(self, kernel_size=3, stride=1, padding=1, activation='relu'):
super(depthwise_blockNew, self).__init__()
self.depthwise = depthwise_conv(kernel_size=3, stride=1, padding=1)
if activation == 'relu':
self.activation = nn.ReLU()
elif activation == 'lrelu':
self.activation = nn.LeakyReLU()
elif activation == 'tanh':
self.activation = nn.Tanh()
def forward(self, input_0):
primals_2 = self.depthwise.depthwise.weight
primals_3 = self.depthwise.depthwise.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
whiteking64/lang-seg
|
depthwise_block
| false
| 16,703
|
[
"MIT"
] | 202
|
9d063b126f1b64e38ddb20cc75fc74435bfdcbd3
|
https://github.com/whiteking64/lang-seg/tree/9d063b126f1b64e38ddb20cc75fc74435bfdcbd3
|
AttentionCollapse
|
import torch
import torch.nn as nn
class AttentionCollapse(nn.Module):
"""Collapsing over the channels with attention.
Parameters
----------
n_channels : int
Number of input channels.
Attributes
----------
affine : nn.Module
Fully connected layer performing linear mapping.
context_vector : nn.Module
Fully connected layer encoding direction importance.
"""
def __init__(self, n_channels):
super().__init__()
self.affine = nn.Linear(n_channels, n_channels)
self.context_vector = nn.Linear(n_channels, 1, bias=False)
def forward(self, x):
"""Perform forward pass.
Parameters
----------
x : torch.Tensor
Tensor of shape `(n_samples, n_channels, lookback, n_assets)`.
Returns
-------
torch.Tensor
Tensor of shape `(n_samples, n_channels, n_assets)`.
"""
n_samples, _n_channels, _lookback, _n_assets = x.shape
res_list = []
for i in range(n_samples):
inp_single = x[i].permute(2, 1, 0)
tformed = self.affine(inp_single)
w = self.context_vector(tformed)
scaled_w = torch.nn.functional.softmax(w, dim=1)
weighted_sum = (inp_single * scaled_w).mean(dim=1)
res_list.append(weighted_sum.permute(1, 0))
return torch.stack(res_list, dim=0)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex % 4
x2 = xindex // 4
y0 = yindex
x3 = xindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * x1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x3 + 16 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex % 4
x2 = xindex // 4
y0 = yindex
x3 = xindex
tmp0 = tl.load(in_ptr0 + (128 + y0 + 4 * x2 + 16 * x1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x3 + 16 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex % 4
x2 = xindex // 4
y0 = yindex
x3 = xindex
tmp0 = tl.load(in_ptr0 + (192 + y0 + 4 * x2 + 16 * x1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x3 + 16 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex % 4
x2 = xindex // 4
y0 = yindex
x3 = xindex
tmp0 = tl.load(in_ptr0 + (64 + y0 + 4 * x2 + 16 * x1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x3 + 16 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_4(in_out_ptr0, in_out_ptr1, in_out_ptr2,
in_out_ptr3, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_out_ptr1 + x2, xmask)
tmp5 = tl.load(in_out_ptr2 + x2, xmask)
tmp7 = tl.load(in_out_ptr3 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp3 + tmp1
tmp6 = tmp5 + tmp1
tmp8 = tmp7 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
tl.store(in_out_ptr1 + x2, tmp4, xmask)
tl.store(in_out_ptr2 + x2, tmp6, xmask)
tl.store(in_out_ptr3 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_stack_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1), tmp4 & xmask, other=0.0)
tmp6 = tl.load(in_ptr1 + 4 * x0, tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tmp5 * tmp6
tmp8 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), tmp4 & xmask, other=0.0)
tmp9 = tl.load(in_ptr1 + (1 + 4 * x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp10 = tmp8 * tmp9
tmp11 = tmp7 + tmp10
tmp12 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), tmp4 & xmask, other=0.0)
tmp13 = tl.load(in_ptr1 + (2 + 4 * x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp14 = tmp12 * tmp13
tmp15 = tmp11 + tmp14
tmp16 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), tmp4 & xmask, other=0.0)
tmp17 = tl.load(in_ptr1 + (3 + 4 * x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp18 = tmp16 * tmp17
tmp19 = tmp15 + tmp18
tmp20 = 4.0
tmp21 = tmp19 / tmp20
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp4, tmp21, tmp22)
tmp24 = tmp0 >= tmp3
tmp25 = tl.full([1], 8, tl.int64)
tmp26 = tmp0 < tmp25
tmp27 = tmp24 & tmp26
tmp28 = tl.load(in_ptr0 + (64 + x0 + 16 * (-4 + x1)), tmp27 & xmask,
other=0.0)
tmp29 = tl.load(in_ptr2 + 4 * x0, tmp27 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp30 = tmp28 * tmp29
tmp31 = tl.load(in_ptr0 + (68 + x0 + 16 * (-4 + x1)), tmp27 & xmask,
other=0.0)
tmp32 = tl.load(in_ptr2 + (1 + 4 * x0), tmp27 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp33 = tmp31 * tmp32
tmp34 = tmp30 + tmp33
tmp35 = tl.load(in_ptr0 + (72 + x0 + 16 * (-4 + x1)), tmp27 & xmask,
other=0.0)
tmp36 = tl.load(in_ptr2 + (2 + 4 * x0), tmp27 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp37 = tmp35 * tmp36
tmp38 = tmp34 + tmp37
tmp39 = tl.load(in_ptr0 + (76 + x0 + 16 * (-4 + x1)), tmp27 & xmask,
other=0.0)
tmp40 = tl.load(in_ptr2 + (3 + 4 * x0), tmp27 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp41 = tmp39 * tmp40
tmp42 = tmp38 + tmp41
tmp43 = tmp42 / tmp20
tmp44 = tl.full(tmp43.shape, 0.0, tmp43.dtype)
tmp45 = tl.where(tmp27, tmp43, tmp44)
tmp46 = tmp0 >= tmp25
tmp47 = tl.full([1], 12, tl.int64)
tmp48 = tmp0 < tmp47
tmp49 = tmp46 & tmp48
tmp50 = tl.load(in_ptr0 + (128 + x0 + 16 * (-8 + x1)), tmp49 & xmask,
other=0.0)
tmp51 = tl.load(in_ptr3 + 4 * x0, tmp49 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp52 = tmp50 * tmp51
tmp53 = tl.load(in_ptr0 + (132 + x0 + 16 * (-8 + x1)), tmp49 & xmask,
other=0.0)
tmp54 = tl.load(in_ptr3 + (1 + 4 * x0), tmp49 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp55 = tmp53 * tmp54
tmp56 = tmp52 + tmp55
tmp57 = tl.load(in_ptr0 + (136 + x0 + 16 * (-8 + x1)), tmp49 & xmask,
other=0.0)
tmp58 = tl.load(in_ptr3 + (2 + 4 * x0), tmp49 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp59 = tmp57 * tmp58
tmp60 = tmp56 + tmp59
tmp61 = tl.load(in_ptr0 + (140 + x0 + 16 * (-8 + x1)), tmp49 & xmask,
other=0.0)
tmp62 = tl.load(in_ptr3 + (3 + 4 * x0), tmp49 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp63 = tmp61 * tmp62
tmp64 = tmp60 + tmp63
tmp65 = tmp64 / tmp20
tmp66 = tl.full(tmp65.shape, 0.0, tmp65.dtype)
tmp67 = tl.where(tmp49, tmp65, tmp66)
tmp68 = tmp0 >= tmp47
tl.full([1], 16, tl.int64)
tmp71 = tl.load(in_ptr0 + (192 + x0 + 16 * (-12 + x1)), tmp68 & xmask,
other=0.0)
tmp72 = tl.load(in_ptr4 + 4 * x0, tmp68 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp73 = tmp71 * tmp72
tmp74 = tl.load(in_ptr0 + (196 + x0 + 16 * (-12 + x1)), tmp68 & xmask,
other=0.0)
tmp75 = tl.load(in_ptr4 + (1 + 4 * x0), tmp68 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp76 = tmp74 * tmp75
tmp77 = tmp73 + tmp76
tmp78 = tl.load(in_ptr0 + (200 + x0 + 16 * (-12 + x1)), tmp68 & xmask,
other=0.0)
tmp79 = tl.load(in_ptr4 + (2 + 4 * x0), tmp68 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp80 = tmp78 * tmp79
tmp81 = tmp77 + tmp80
tmp82 = tl.load(in_ptr0 + (204 + x0 + 16 * (-12 + x1)), tmp68 & xmask,
other=0.0)
tmp83 = tl.load(in_ptr4 + (3 + 4 * x0), tmp68 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp84 = tmp82 * tmp83
tmp85 = tmp81 + tmp84
tmp86 = tmp85 / tmp20
tmp87 = tl.full(tmp86.shape, 0.0, tmp86.dtype)
tmp88 = tl.where(tmp68, tmp86, tmp87)
tmp89 = tl.where(tmp49, tmp67, tmp88)
tmp90 = tl.where(tmp27, tmp45, tmp89)
tmp91 = tl.where(tmp4, tmp23, tmp90)
tl.store(out_ptr0 + x2, tmp91, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (1, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(4, 16)](primals_1, buf0, 4, 16,
XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_1[grid(4, 16)](primals_1, buf12, 4, 16,
XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1)
buf13 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf12, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf13)
buf18 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_2[grid(4, 16)](primals_1, buf18, 4, 16,
XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1)
buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf19)
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_3[grid(4, 16)](primals_1, buf6, 4, 16,
XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1)
buf7 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf6, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf7)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0)
del buf1
buf8 = reinterpret_tensor(buf7, (4, 4, 4), (16, 4, 1), 0)
del buf7
buf14 = reinterpret_tensor(buf13, (4, 4, 4), (16, 4, 1), 0)
del buf13
buf20 = reinterpret_tensor(buf19, (4, 4, 4), (16, 4, 1), 0)
del buf19
triton_poi_fused_add_4[grid(64)](buf2, buf8, buf14, buf20,
primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
buf3 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused__softmax_5[grid(16)](buf3, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused__softmax_6[grid(16)](buf4, buf5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf4, (16, 1), (1, 1), 0)
del buf4
extern_kernels.mm(reinterpret_tensor(buf8, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused__softmax_5[grid(16)](buf9, buf10, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused__softmax_6[grid(16)](buf10, buf11, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf15 = reinterpret_tensor(buf10, (16, 1), (1, 1), 0)
del buf10
extern_kernels.mm(reinterpret_tensor(buf14, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), out=buf15)
buf16 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused__softmax_5[grid(16)](buf15, buf16, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf17 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused__softmax_6[grid(16)](buf16, buf17, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf21 = reinterpret_tensor(buf16, (16, 1), (1, 1), 0)
del buf16
extern_kernels.mm(reinterpret_tensor(buf20, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), out=buf21)
buf22 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused__softmax_5[grid(16)](buf21, buf22, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf23 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused__softmax_6[grid(16)](buf22, buf23, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf22
buf24 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
triton_poi_fused_stack_7[grid(64)](primals_1, buf5, buf11, buf17,
buf23, buf24, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf11
del buf17
del buf23
del buf5
return reinterpret_tensor(buf24, (4, 4, 4), (16, 4, 1), 0
), primals_1, reinterpret_tensor(buf0, (16, 4), (4, 1), 0
), reinterpret_tensor(buf2, (16, 4), (4, 1), 0
), buf3, reinterpret_tensor(buf6, (16, 4), (4, 1), 0
), reinterpret_tensor(buf8, (16, 4), (4, 1), 0
), buf9, reinterpret_tensor(buf12, (16, 4), (4, 1), 0
), reinterpret_tensor(buf14, (16, 4), (4, 1), 0
), buf15, reinterpret_tensor(buf18, (16, 4), (4, 1), 0
), reinterpret_tensor(buf20, (16, 4), (4, 1), 0), buf21, primals_4
class AttentionCollapseNew(nn.Module):
"""Collapsing over the channels with attention.
Parameters
----------
n_channels : int
Number of input channels.
Attributes
----------
affine : nn.Module
Fully connected layer performing linear mapping.
context_vector : nn.Module
Fully connected layer encoding direction importance.
"""
def __init__(self, n_channels):
super().__init__()
self.affine = nn.Linear(n_channels, n_channels)
self.context_vector = nn.Linear(n_channels, 1, bias=False)
def forward(self, input_0):
primals_2 = self.affine.weight
primals_3 = self.affine.bias
primals_4 = self.context_vector.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
vishalbelsare/deepdow
|
AttentionCollapse
| false
| 16,704
|
[
"Apache-2.0"
] | 511
|
cbb99347fba9a447d4fcae64fe5137c203643e44
|
https://github.com/vishalbelsare/deepdow/tree/cbb99347fba9a447d4fcae64fe5137c203643e44
|
Gating
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Gating(nn.Module):
"""
FCN architecture for large scale scene coordiante regression.
"""
def __init__(self, num_experts, capacity=1):
"""
Constructor.
"""
super(Gating, self).__init__()
self.capacity = capacity
self.conv1 = nn.Conv2d(3, 8, 3, 1, 1)
self.conv2 = nn.Conv2d(8, 16, 3, 2, 1)
self.conv3 = nn.Conv2d(16, 32, 3, 2, 1)
self.conv4 = nn.Conv2d(32, 64 * capacity, 3, 2, 1)
self.res1_conv1 = nn.Conv2d(64 * capacity, 64 * capacity, 3, 1, 1)
self.res1_conv2 = nn.Conv2d(64 * capacity, 64 * capacity, 1, 1, 0)
self.res1_conv3 = nn.Conv2d(64 * capacity, 64 * capacity, 3, 1, 1)
self.fc1 = nn.Conv2d(64 * capacity, 64 * capacity ** 2, 1, 1, 0)
self.fc2 = nn.Conv2d(64 * capacity ** 2, 64 * capacity ** 2, 1, 1, 0)
self.fc3 = nn.Conv2d(64 * capacity ** 2, num_experts, 1, 1, 0)
def forward(self, inputs):
"""
Forward pass.
inputs -- 4D data tensor (BxCxHxW)
"""
x = inputs
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = F.relu(self.res1_conv1(x))
x = F.relu(self.res1_conv2(x))
x = F.relu(self.res1_conv3(x))
if self.capacity == 1:
x = torch.tanh(x)
x = F.avg_pool2d(x, x.size()[2:])
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
x = F.log_softmax(x, dim=1)
return x[:, :, 0, 0]
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {'num_experts': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 8
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 16
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_tanh_threshold_backward_4(in_ptr0,
in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = libdevice.tanh(tmp4)
tmp6 = 0.0
tmp7 = tmp4 <= tmp6
tl.store(out_ptr0 + x3, tmp5, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused__log_softmax_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21) = args
args.clear()
assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_2, (8, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_3, (8,), (1,))
assert_size_stride(primals_4, (16, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (32, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_7, (32,), (1,))
assert_size_stride(primals_8, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_9, (64,), (1,))
assert_size_stride(primals_10, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_11, (64,), (1,))
assert_size_stride(primals_12, (64, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_13, (64,), (1,))
assert_size_stride(primals_14, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_15, (64,), (1,))
assert_size_stride(primals_16, (64, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_17, (64,), (1,))
assert_size_stride(primals_18, (64, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_19, (64,), (1,))
assert_size_stride(primals_20, (4, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_21, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 8, 64, 64), (32768, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(131072)](buf1, primals_3,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_3
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 16, 32, 32), (16384, 1024, 32, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(65536)](buf3, primals_5,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 32, 16, 16), (8192, 256, 16, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(32768)](buf5, primals_7,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 64, 8, 8), (4096, 64, 8, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_relu_3[grid(16384)](buf7, primals_9,
16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_9
buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 64, 8, 8), (4096, 64, 8, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_relu_3[grid(16384)](buf9, primals_11,
16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_11
buf10 = extern_kernels.convolution(buf9, primals_12, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 64, 8, 8), (4096, 64, 8, 1))
buf11 = buf10
del buf10
triton_poi_fused_convolution_relu_3[grid(16384)](buf11, primals_13,
16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_13
buf12 = extern_kernels.convolution(buf11, primals_14, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 64, 8, 8), (4096, 64, 8, 1))
buf13 = empty_strided_cuda((4, 64, 8, 8), (4096, 64, 8, 1), torch.
float32)
buf24 = empty_strided_cuda((4, 64, 8, 8), (4096, 64, 8, 1), torch.bool)
triton_poi_fused_convolution_relu_tanh_threshold_backward_4[grid(16384)
](buf12, primals_15, buf13, buf24, 16384, XBLOCK=256, num_warps
=4, num_stages=1)
del buf12
del primals_15
buf14 = torch.ops.aten.avg_pool2d.default(buf13, [8, 8], [8, 8], [0,
0], False, True, None)
buf15 = buf14
del buf14
buf16 = extern_kernels.convolution(buf15, primals_16, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 64, 1, 1), (64, 1, 1, 1))
buf17 = buf16
del buf16
triton_poi_fused_convolution_relu_5[grid(256)](buf17, primals_17,
256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_17
buf18 = extern_kernels.convolution(buf17, primals_18, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 64, 1, 1), (64, 1, 1, 1))
buf19 = buf18
del buf18
triton_poi_fused_convolution_relu_5[grid(256)](buf19, primals_19,
256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_19
buf20 = extern_kernels.convolution(buf19, primals_20, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 4, 1, 1), (4, 1, 1, 1))
buf21 = buf20
del buf20
triton_poi_fused_convolution_6[grid(16)](buf21, primals_21, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_21
buf22 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
triton_poi_fused__log_softmax_7[grid(16)](buf21, buf22, 16, XBLOCK=
16, num_warps=1, num_stages=1)
buf23 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
triton_poi_fused__log_softmax_8[grid(16)](buf22, buf23, 16, XBLOCK=
16, num_warps=1, num_stages=1)
del buf22
return (reinterpret_tensor(buf23, (4, 4), (4, 1), 0), primals_1,
primals_2, primals_4, primals_6, primals_8, primals_10, primals_12,
primals_14, primals_16, primals_18, primals_20, buf1, buf3, buf5,
buf7, buf9, buf11, buf13, buf15, buf17, buf19, buf21, buf24)
class GatingNew(nn.Module):
"""
FCN architecture for large scale scene coordiante regression.
"""
def __init__(self, num_experts, capacity=1):
"""
Constructor.
"""
super(GatingNew, self).__init__()
self.capacity = capacity
self.conv1 = nn.Conv2d(3, 8, 3, 1, 1)
self.conv2 = nn.Conv2d(8, 16, 3, 2, 1)
self.conv3 = nn.Conv2d(16, 32, 3, 2, 1)
self.conv4 = nn.Conv2d(32, 64 * capacity, 3, 2, 1)
self.res1_conv1 = nn.Conv2d(64 * capacity, 64 * capacity, 3, 1, 1)
self.res1_conv2 = nn.Conv2d(64 * capacity, 64 * capacity, 1, 1, 0)
self.res1_conv3 = nn.Conv2d(64 * capacity, 64 * capacity, 3, 1, 1)
self.fc1 = nn.Conv2d(64 * capacity, 64 * capacity ** 2, 1, 1, 0)
self.fc2 = nn.Conv2d(64 * capacity ** 2, 64 * capacity ** 2, 1, 1, 0)
self.fc3 = nn.Conv2d(64 * capacity ** 2, num_experts, 1, 1, 0)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.res1_conv1.weight
primals_11 = self.res1_conv1.bias
primals_12 = self.res1_conv2.weight
primals_13 = self.res1_conv2.bias
primals_14 = self.res1_conv3.weight
primals_15 = self.res1_conv3.bias
primals_16 = self.fc1.weight
primals_17 = self.fc1.bias
primals_18 = self.fc2.weight
primals_19 = self.fc2.bias
primals_20 = self.fc3.weight
primals_21 = self.fc3.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21])
return output[0]
|
vislearn/esac
|
Gating
| false
| 16,705
|
[
"BSD-3-Clause"
] | 62
|
4004b251525fa238a1cb6e1043fb41a4719a4ff2
|
https://github.com/vislearn/esac/tree/4004b251525fa238a1cb6e1043fb41a4719a4ff2
|
LearnedKernel
|
from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
class LearnedKernel(nn.Module):
def __init__(self, args: 'Namespace'):
super(LearnedKernel, self).__init__()
self.A = nn.Linear(args.ffn_hidden_size, args.ffn_hidden_size)
def forward(self, encodings: 'torch.Tensor'):
return (self.A(encodings[:, 1, :].squeeze(1)) * encodings[:, 0, :].
squeeze(1)).sum(dim=1, keepdim=True)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'args': _mock_config(ffn_hidden_size=4)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_mul_sum_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp7 = tl.load(in_ptr2 + (4 + x0 + 64 * x1), xmask)
tmp10 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp12 = tl.load(in_ptr2 + (8 + x0 + 64 * x1), xmask)
tmp15 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp17 = tl.load(in_ptr2 + (12 + x0 + 64 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp5 + tmp1
tmp8 = tmp6 * tmp7
tmp9 = tmp4 + tmp8
tmp11 = tmp10 + tmp1
tmp13 = tmp11 * tmp12
tmp14 = tmp9 + tmp13
tmp16 = tmp15 + tmp1
tmp18 = tmp16 * tmp17
tmp19 = tmp14 + tmp18
tl.store(out_ptr0 + x2, tmp19, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
triton_poi_fused_add_mul_sum_1[grid(16)](buf1, primals_3, primals_1,
buf2, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf1
del primals_3
return buf2, reinterpret_tensor(buf0, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_1, (4, 4, 4), (64, 4, 1), 0)
class LearnedKernelNew(nn.Module):
def __init__(self, args: 'Namespace'):
super(LearnedKernelNew, self).__init__()
self.A = nn.Linear(args.ffn_hidden_size, args.ffn_hidden_size)
def forward(self, input_0):
primals_2 = self.A.weight
primals_3 = self.A.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
wengong-jin/chemprop
|
LearnedKernel
| false
| 16,706
|
[
"MIT"
] | 77
|
3ad3577367d8a53f28aade0be41b56b1f25b6125
|
https://github.com/wengong-jin/chemprop/tree/3ad3577367d8a53f28aade0be41b56b1f25b6125
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.