entry_point
stringlengths 1
65
| original_triton_python_code
stringlengths 208
619k
| optimised_triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
listlengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
|
|---|---|---|---|---|---|---|---|---|---|---|
SmoothL1Loss
|
import functools
import torch
import torch.nn.functional as F
import torch.nn as nn
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
loss = loss * weight
if avg_factor is None:
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
loss = loss.sum() / avg_factor
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, avg_factor=2)
tensor(1.5000)
"""
@functools.wraps(loss_func)
def wrapper(pred, target, weight=None, reduction='mean', avg_factor=
None, **kwargs):
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper
@weighted_loss
def smooth_l1_loss(pred, target, beta=1.0):
"""Smooth L1 loss.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
beta (float, optional): The threshold in the piecewise function.
Defaults to 1.0.
Returns:
torch.Tensor: Calculated loss
"""
assert beta > 0
assert pred.size() == target.size() and target.numel() > 0
diff = torch.abs(pred - target)
loss = torch.where(diff < beta, 0.5 * diff * diff / beta, diff - 0.5 * beta
)
return loss
class SmoothL1Loss(nn.Module):
"""Smooth L1 loss.
Args:
beta (float, optional): The threshold in the piecewise function.
Defaults to 1.0.
reduction (str, optional): The method to reduce the loss.
Options are "none", "mean" and "sum". Defaults to "mean".
loss_weight (float, optional): The weight of loss.
"""
def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0):
super(SmoothL1Loss, self).__init__()
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, pred, target, weight=None, avg_factor=None,
reduction_override=None, **kwargs):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (reduction_override if reduction_override else self.
reduction)
loss_bbox = self.loss_weight * smooth_l1_loss(pred, target, weight,
beta=self.beta, reduction=reduction, avg_factor=avg_factor, **
kwargs)
return loss_bbox
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import functools
import torch.nn.functional as F
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_div_lt_mean_mul_sub_where_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = 1.0
tmp5 = tmp3 < tmp4
tmp6 = 0.5
tmp7 = tmp3 * tmp6
tmp8 = tmp7 * tmp3
tmp9 = tmp8 * tmp4
tmp10 = tmp3 - tmp6
tmp11 = tl.where(tmp5, tmp9, tmp10)
tmp12 = tl.broadcast_to(tmp11, [RBLOCK])
tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0))
tmp15 = 256.0
tmp16 = tmp14 / tmp15
tmp17 = tmp16 * tmp4
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_abs_div_lt_mean_mul_sub_where_0[grid(1)](buf1,
arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
loss = loss * weight
if avg_factor is None:
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
loss = loss.sum() / avg_factor
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, avg_factor=2)
tensor(1.5000)
"""
@functools.wraps(loss_func)
def wrapper(pred, target, weight=None, reduction='mean', avg_factor=
None, **kwargs):
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper
@weighted_loss
def smooth_l1_loss(pred, target, beta=1.0):
"""Smooth L1 loss.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
beta (float, optional): The threshold in the piecewise function.
Defaults to 1.0.
Returns:
torch.Tensor: Calculated loss
"""
assert beta > 0
assert pred.size() == target.size() and target.numel() > 0
diff = torch.abs(pred - target)
loss = torch.where(diff < beta, 0.5 * diff * diff / beta, diff - 0.5 * beta
)
return loss
class SmoothL1LossNew(nn.Module):
"""Smooth L1 loss.
Args:
beta (float, optional): The threshold in the piecewise function.
Defaults to 1.0.
reduction (str, optional): The method to reduce the loss.
Options are "none", "mean" and "sum". Defaults to "mean".
loss_weight (float, optional): The weight of loss.
"""
def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0):
super(SmoothL1LossNew, self).__init__()
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
CityU-AIM-Group/HTD
|
SmoothL1Loss
| false
| 17,115
|
[
"MIT"
] | 5
|
0be9fd844118c275abc6053b3cbd5ffb589e62ee
|
https://github.com/CityU-AIM-Group/HTD/tree/0be9fd844118c275abc6053b3cbd5ffb589e62ee
|
AppendDim
|
import torch
from torch import nn
class AppendDim(nn.Module):
"""
Append a new dim to states with size out_dim
"""
def __init__(self, out_dim=1):
super().__init__()
self.out_dim = out_dim
def forward(self, states, **kwargs):
x = states.unsqueeze(len(states.size()))
x = x.repeat(*([1] * len(states.size()) + [self.out_dim]))
return x
def reset_parameters(self, *args, **kwargs):
pass
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_repeat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tl.store(out_ptr0 + x0, tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch
.float32)
get_raw_stream(0)
triton_poi_fused_repeat_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class AppendDimNew(nn.Module):
"""
Append a new dim to states with size out_dim
"""
def __init__(self, out_dim=1):
super().__init__()
self.out_dim = out_dim
def reset_parameters(self, *args, **kwargs):
pass
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
C-SUNSHINE/TOQ-Nets-PyTorch-Release
|
AppendDim
| false
| 17,116
|
[
"MIT"
] | 6
|
05e06bf633fb3c6b610dda9a5126ecd7af1db02f
|
https://github.com/C-SUNSHINE/TOQ-Nets-PyTorch-Release/tree/05e06bf633fb3c6b610dda9a5126ecd7af1db02f
|
ConvWS2d
|
import torch
import torch.nn.functional as F
import torch.nn as nn
def conv_ws_2d(input, weight, bias=None, stride=1, padding=0, dilation=1,
groups=1, eps=1e-05):
c_in = weight.size(0)
weight_flat = weight.view(c_in, -1)
mean = weight_flat.mean(dim=1, keepdim=True).view(c_in, 1, 1, 1)
std = weight_flat.std(dim=1, keepdim=True).view(c_in, 1, 1, 1)
weight = (weight - mean) / (std + eps)
return F.conv2d(input, weight, bias, stride, padding, dilation, groups)
class ConvWS2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True, eps=1e-05):
super(ConvWS2d, self).__init__(in_channels, out_channels,
kernel_size, stride=stride, padding=padding, dilation=dilation,
groups=groups, bias=bias)
self.eps = eps
def forward(self, x):
return conv_ws_2d(x, self.weight, self.bias, self.stride, self.
padding, self.dilation, self.groups, self.eps)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn.functional as F
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_div_mean_std_sub_0(in_out_ptr0, in_out_ptr1,
in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp1 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 64.0
tmp20 = tmp4 / tmp19
tmp21 = 63.0
tmp22 = tmp18 / tmp21
tmp23 = libdevice.sqrt(tmp22)
tmp24 = tmp0 - tmp20
tmp25 = 1e-05
tmp26 = tmp23 + tmp25
tmp27 = tmp24 / tmp26
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp20, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x0, tmp23, xmask)
tl.store(out_ptr0 + (r1 + 64 * x0), tmp27, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf3 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 1), (1, 1), 0)
del buf0
buf5 = reinterpret_tensor(buf3, (4, 1), (1, 1), 0)
del buf3
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_add_div_mean_std_sub_0[grid(4)](buf1, buf5,
primals_1, buf6, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
buf7 = extern_kernels.convolution(primals_3, buf6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 4, 1, 1), (4, 1, 1, 1))
buf8 = buf7
del buf7
triton_poi_fused_convolution_1[grid(16)](buf8, primals_2, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
return buf8, primals_1, primals_3, buf1, buf5, buf6
def conv_ws_2d(input, weight, bias=None, stride=1, padding=0, dilation=1,
groups=1, eps=1e-05):
c_in = weight.size(0)
weight_flat = weight.view(c_in, -1)
mean = weight_flat.mean(dim=1, keepdim=True).view(c_in, 1, 1, 1)
std = weight_flat.std(dim=1, keepdim=True).view(c_in, 1, 1, 1)
weight = (weight - mean) / (std + eps)
return F.conv2d(input, weight, bias, stride, padding, dilation, groups)
class ConvWS2dNew(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True, eps=1e-05):
super(ConvWS2dNew, self).__init__(in_channels, out_channels,
kernel_size, stride=stride, padding=padding, dilation=dilation,
groups=groups, bias=bias)
self.eps = eps
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
CityU-AIM-Group/HTD
|
ConvWS2d
| false
| 17,117
|
[
"MIT"
] | 5
|
0be9fd844118c275abc6053b3cbd5ffb589e62ee
|
https://github.com/CityU-AIM-Group/HTD/tree/0be9fd844118c275abc6053b3cbd5ffb589e62ee
|
GHMR
|
import torch
import torch.nn as nn
class GHMR(nn.Module):
"""GHM Regression Loss.
Details of the theorem can be viewed in the paper
`Gradient Harmonized Single-stage Detector
<https://arxiv.org/abs/1811.05181>`_.
Args:
mu (float): The parameter for the Authentic Smooth L1 loss.
bins (int): Number of the unit regions for distribution calculation.
momentum (float): The parameter for moving average.
loss_weight (float): The weight of the total GHM-R loss.
"""
def __init__(self, mu=0.02, bins=10, momentum=0, loss_weight=1.0):
super(GHMR, self).__init__()
self.mu = mu
self.bins = bins
edges = torch.arange(bins + 1).float() / bins
self.register_buffer('edges', edges)
self.edges[-1] = 1000.0
self.momentum = momentum
if momentum > 0:
acc_sum = torch.zeros(bins)
self.register_buffer('acc_sum', acc_sum)
self.loss_weight = loss_weight
def forward(self, pred, target, label_weight, avg_factor=None):
"""Calculate the GHM-R loss.
Args:
pred (float tensor of size [batch_num, 4 (* class_num)]):
The prediction of box regression layer. Channel number can be 4
or 4 * class_num depending on whether it is class-agnostic.
target (float tensor of size [batch_num, 4 (* class_num)]):
The target regression values with the same size of pred.
label_weight (float tensor of size [batch_num, 4 (* class_num)]):
The weight of each sample, 0 if ignored.
Returns:
The gradient harmonized loss.
"""
mu = self.mu
edges = self.edges
mmt = self.momentum
diff = pred - target
loss = torch.sqrt(diff * diff + mu * mu) - mu
g = torch.abs(diff / torch.sqrt(mu * mu + diff * diff)).detach()
weights = torch.zeros_like(g)
valid = label_weight > 0
tot = max(label_weight.float().sum().item(), 1.0)
n = 0
for i in range(self.bins):
inds = (g >= edges[i]) & (g < edges[i + 1]) & valid
num_in_bin = inds.sum().item()
if num_in_bin > 0:
n += 1
if mmt > 0:
self.acc_sum[i] = mmt * self.acc_sum[i] + (1 - mmt
) * num_in_bin
weights[inds] = tot / self.acc_sum[i]
else:
weights[inds] = tot / num_in_bin
if n > 0:
weights /= n
loss = loss * weights
loss = loss.sum() / tot
return loss * self.loss_weight
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_gt_sum_0(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0))
tmp4 = 0.0
tmp5 = tmp0 > tmp4
tl.store(out_ptr1 + tl.broadcast_to(r0, [RBLOCK]), tmp5, None)
tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp3, None)
@triton.jit
def triton_poi_fused_abs_add_div_mul_sqrt_sub_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = 0.0004
tmp5 = tmp3 + tmp4
tmp6 = libdevice.sqrt(tmp5)
tmp7 = 0.02
tmp8 = tmp6 - tmp7
tmp9 = tmp2 / tmp6
tmp10 = tl_math.abs(tmp9)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp10, xmask)
@triton.jit
def triton_poi_fused_zeros_like_2(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + x0, tmp0, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_per_fused_gt_sum_0[grid(1)](arg2_1, buf0, buf4, 1, 256,
num_warps=2, num_stages=1)
del arg2_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_abs_add_div_mul_sqrt_sub_1[grid(256)](arg0_1,
arg1_1, buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_zeros_like_2[grid(256)](buf3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
return buf0, buf1, buf2, buf3, buf4
class GHMRNew(nn.Module):
"""GHM Regression Loss.
Details of the theorem can be viewed in the paper
`Gradient Harmonized Single-stage Detector
<https://arxiv.org/abs/1811.05181>`_.
Args:
mu (float): The parameter for the Authentic Smooth L1 loss.
bins (int): Number of the unit regions for distribution calculation.
momentum (float): The parameter for moving average.
loss_weight (float): The weight of the total GHM-R loss.
"""
def __init__(self, mu=0.02, bins=10, momentum=0, loss_weight=1.0):
super(GHMRNew, self).__init__()
self.mu = mu
self.bins = bins
edges = torch.arange(bins + 1).float() / bins
self.register_buffer('edges', edges)
self.edges[-1] = 1000.0
self.momentum = momentum
if momentum > 0:
acc_sum = torch.zeros(bins)
self.register_buffer('acc_sum', acc_sum)
self.loss_weight = loss_weight
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
CityU-AIM-Group/HTD
|
GHMR
| false
| 17,118
|
[
"MIT"
] | 5
|
0be9fd844118c275abc6053b3cbd5ffb589e62ee
|
https://github.com/CityU-AIM-Group/HTD/tree/0be9fd844118c275abc6053b3cbd5ffb589e62ee
|
AlignDifferential
|
import torch
from torch import nn
class AlignDifferential(nn.Module):
def __init__(self):
super().__init__()
def new_length(self, length):
return length
def forward(self, states):
"""
:param states: [batch, length, *]
"""
padded_states = torch.cat([states[:, 0:1] * 2 - states[:, 1:2],
states, states[:, -1:] * 2 - states[:, -2:-1]], dim=1)
return (padded_states[:, 2:] - padded_states[:, :-2]) / 2
def show(self, name='AlignDifferential', indent=0, log=print, **kwargs):
log(' ' * indent + '- %s(x) = AlignDifferential()' % (name,))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 6
x0 = xindex % 16
x2 = xindex // 96
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = 2.0
tmp7 = tmp5 * tmp6
tmp8 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp9 = tmp7 - tmp8
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tmp13 = tl.full([1], 5, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = tmp12 & tmp14
tmp16 = tl.load(in_ptr0 + (x0 + 16 * (-1 + x1) + 64 * x2), tmp15 &
xmask, other=0.0)
tmp17 = tmp0 >= tmp13
tl.full([1], 6, tl.int64)
tmp20 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp17 & xmask,
eviction_policy='evict_last', other=0.0)
tmp21 = tmp20 * tmp6
tmp22 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp17 & xmask,
eviction_policy='evict_last', other=0.0)
tmp23 = tmp21 - tmp22
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp17, tmp23, tmp24)
tmp26 = tl.where(tmp15, tmp16, tmp25)
tmp27 = tl.where(tmp4, tmp11, tmp26)
tl.store(out_ptr0 + x3, tmp27, xmask)
@triton.jit
def triton_poi_fused_div_sub_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = xindex // 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + (32 + x0 + 96 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 96 * x1), xmask)
tmp2 = tmp0 - tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + x2, tmp4, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 6, 4, 4), (96, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(384)](arg0_1, buf0, 384, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_div_sub_1[grid(256)](buf0, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf0
return buf1,
class AlignDifferentialNew(nn.Module):
def __init__(self):
super().__init__()
def new_length(self, length):
return length
def show(self, name='AlignDifferential', indent=0, log=print, **kwargs):
log(' ' * indent + '- %s(x) = AlignDifferential()' % (name,))
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
C-SUNSHINE/TOQ-Nets-PyTorch-Release
|
AlignDifferential
| false
| 17,119
|
[
"MIT"
] | 6
|
05e06bf633fb3c6b610dda9a5126ecd7af1db02f
|
https://github.com/C-SUNSHINE/TOQ-Nets-PyTorch-Release/tree/05e06bf633fb3c6b610dda9a5126ecd7af1db02f
|
C1
|
import torch
import torch.nn as nn
from collections import OrderedDict
class C1(nn.Module):
def __init__(self):
super(C1, self).__init__()
self.c1 = nn.Sequential(OrderedDict([('c1', nn.Conv2d(1, 6,
kernel_size=(5, 5))), ('relu1', nn.ReLU()), ('s1', nn.MaxPool2d
(kernel_size=(2, 2), stride=2))]))
def forward(self, img):
output = self.c1(img)
return output
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from collections import OrderedDict
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 86400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3600 % 6
x0 = xindex % 3600
x4 = xindex // 3600
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + 3616 * x4), tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 21600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 30
x1 = xindex // 30 % 30
x2 = xindex // 900
x5 = xindex
x4 = xindex // 5400
x6 = xindex % 5400
tmp0 = tl.load(in_ptr0 + (2 * x0 + 120 * x1 + 3616 * x2), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 120 * x1 + 3616 * x2), xmask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (60 + 2 * x0 + 120 * x1 + 3616 * x2), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (61 + 2 * x0 + 120 * x1 + 3616 * x2), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x5, tmp6, xmask)
tl.store(out_ptr1 + (x6 + 5504 * x4), tmp16, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (6, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_2, (6,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 6, 60, 60), (21600, 3600, 60, 1))
buf1 = empty_strided_cuda((4, 6, 60, 60), (21696, 3616, 60, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(86400)](buf0, primals_2,
buf1, 86400, XBLOCK=512, num_warps=8, num_stages=1)
del buf0
del primals_2
buf2 = empty_strided_cuda((4, 6, 30, 30), (5400, 900, 30, 1), torch
.float32)
buf3 = empty_strided_cuda((4, 6, 30, 30), (5504, 900, 30, 1), torch
.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(21600)](buf1, buf2,
buf3, 21600, XBLOCK=128, num_warps=4, num_stages=1)
return buf2, primals_1, primals_3, buf1, buf3
class C1New(nn.Module):
def __init__(self):
super(C1New, self).__init__()
self.c1 = nn.Sequential(OrderedDict([('c1', nn.Conv2d(1, 6,
kernel_size=(5, 5))), ('relu1', nn.ReLU()), ('s1', nn.MaxPool2d
(kernel_size=(2, 2), stride=2))]))
def forward(self, input_0):
primals_1 = self.c1.c1.weight
primals_2 = self.c1.c1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
ConstantinSeibold/SGL
|
C1
| false
| 17,120
|
[
"MIT"
] | 7
|
fab4d2df515608c2a6a89b2ac8c2655ce8e08b1a
|
https://github.com/ConstantinSeibold/SGL/tree/fab4d2df515608c2a6a89b2ac8c2655ce8e08b1a
|
GaussianFocalLoss
|
import functools
import torch
import torch.nn.functional as F
import torch.nn as nn
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
loss = loss * weight
if avg_factor is None:
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
loss = loss.sum() / avg_factor
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, avg_factor=2)
tensor(1.5000)
"""
@functools.wraps(loss_func)
def wrapper(pred, target, weight=None, reduction='mean', avg_factor=
None, **kwargs):
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper
@weighted_loss
def gaussian_focal_loss(pred, gaussian_target, alpha=2.0, gamma=4.0):
"""`Focal Loss <https://arxiv.org/abs/1708.02002>`_ for targets in gaussian
distribution.
Args:
pred (torch.Tensor): The prediction.
gaussian_target (torch.Tensor): The learning target of the prediction
in gaussian distribution.
alpha (float, optional): A balanced form for Focal Loss.
Defaults to 2.0.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 4.0.
"""
eps = 1e-12
pos_weights = gaussian_target.eq(1)
neg_weights = (1 - gaussian_target).pow(gamma)
pos_loss = -(pred + eps).log() * (1 - pred).pow(alpha) * pos_weights
neg_loss = -(1 - pred + eps).log() * pred.pow(alpha) * neg_weights
return pos_loss + neg_loss
class GaussianFocalLoss(nn.Module):
"""GaussianFocalLoss is a variant of focal loss.
More details can be found in the `paper
<https://arxiv.org/abs/1808.01244>`_
Code is modified from `kp_utils.py
<https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/kp_utils.py#L152>`_ # noqa: E501
Please notice that the target in GaussianFocalLoss is a gaussian heatmap,
not 0/1 binary target.
Args:
alpha (float): Power of prediction.
gamma (float): Power of target for negtive samples.
reduction (str): Options are "none", "mean" and "sum".
loss_weight (float): Loss weight of current loss.
"""
def __init__(self, alpha=2.0, gamma=4.0, reduction='mean', loss_weight=1.0
):
super(GaussianFocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, pred, target, weight=None, avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction
in gaussian distribution.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (reduction_override if reduction_override else self.
reduction)
loss_reg = self.loss_weight * gaussian_focal_loss(pred, target,
weight, alpha=self.alpha, gamma=self.gamma, reduction=reduction,
avg_factor=avg_factor)
return loss_reg
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import functools
import torch.nn.functional as F
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_eq_log_mean_mul_neg_pow_rsub_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp9 = tl.load(in_ptr1 + r0, None)
tmp1 = 1e-12
tmp2 = tmp0 + tmp1
tmp3 = tl_math.log(tmp2)
tmp4 = -tmp3
tmp5 = 1.0
tmp6 = tmp5 - tmp0
tmp7 = tmp6 * tmp6
tmp8 = tmp4 * tmp7
tmp10 = tmp9 == tmp5
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp8 * tmp11
tmp13 = tmp6 + tmp1
tmp14 = tl_math.log(tmp13)
tmp15 = -tmp14
tmp16 = tmp0 * tmp0
tmp17 = tmp15 * tmp16
tmp18 = tmp5 - tmp9
tmp19 = tmp18 * tmp18
tmp20 = tmp19 * tmp19
tmp21 = tmp17 * tmp20
tmp22 = tmp12 + tmp21
tmp23 = tl.broadcast_to(tmp22, [RBLOCK])
tmp25 = triton_helpers.promote_to_tensor(tl.sum(tmp23, 0))
tmp26 = 256.0
tmp27 = tmp25 / tmp26
tmp28 = tmp27 * tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp28, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_eq_log_mean_mul_neg_pow_rsub_0[grid(1)](buf1,
arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
loss = loss * weight
if avg_factor is None:
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
loss = loss.sum() / avg_factor
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, avg_factor=2)
tensor(1.5000)
"""
@functools.wraps(loss_func)
def wrapper(pred, target, weight=None, reduction='mean', avg_factor=
None, **kwargs):
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper
@weighted_loss
def gaussian_focal_loss(pred, gaussian_target, alpha=2.0, gamma=4.0):
"""`Focal Loss <https://arxiv.org/abs/1708.02002>`_ for targets in gaussian
distribution.
Args:
pred (torch.Tensor): The prediction.
gaussian_target (torch.Tensor): The learning target of the prediction
in gaussian distribution.
alpha (float, optional): A balanced form for Focal Loss.
Defaults to 2.0.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 4.0.
"""
eps = 1e-12
pos_weights = gaussian_target.eq(1)
neg_weights = (1 - gaussian_target).pow(gamma)
pos_loss = -(pred + eps).log() * (1 - pred).pow(alpha) * pos_weights
neg_loss = -(1 - pred + eps).log() * pred.pow(alpha) * neg_weights
return pos_loss + neg_loss
class GaussianFocalLossNew(nn.Module):
"""GaussianFocalLoss is a variant of focal loss.
More details can be found in the `paper
<https://arxiv.org/abs/1808.01244>`_
Code is modified from `kp_utils.py
<https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/kp_utils.py#L152>`_ # noqa: E501
Please notice that the target in GaussianFocalLoss is a gaussian heatmap,
not 0/1 binary target.
Args:
alpha (float): Power of prediction.
gamma (float): Power of target for negtive samples.
reduction (str): Options are "none", "mean" and "sum".
loss_weight (float): Loss weight of current loss.
"""
def __init__(self, alpha=2.0, gamma=4.0, reduction='mean', loss_weight=1.0
):
super(GaussianFocalLossNew, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
CityU-AIM-Group/HTD
|
GaussianFocalLoss
| false
| 17,121
|
[
"MIT"
] | 5
|
0be9fd844118c275abc6053b3cbd5ffb589e62ee
|
https://github.com/CityU-AIM-Group/HTD/tree/0be9fd844118c275abc6053b3cbd5ffb589e62ee
|
Differential
|
import torch
from torch import nn
class Differential(nn.Module):
def __init__(self, kernel_size=3, stride=1, padding=0):
super().__init__()
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
def new_length(self, length):
new_length = (length + self.padding * 2 - self.kernel_size + 1
) // self.stride
return new_length
def forward(self, states):
"""
:param states: [batch, length, *]
"""
_batch, length, _n_agents, _state_dim = states.size()
padding = self.padding
kernel_size = self.kernel_size
stride = self.stride
if padding != 0:
if padding > 0:
states = torch.cat([states[:, :1].repeat(1, padding, 1, 1),
states, states[:, -1:].repeat(1, padding, 1, 1)], dim=1)
else:
states = states[:, -padding:padding]
new_length = (length + padding * 2 - kernel_size + 1) // stride
differentials = states[:, 0:new_length * stride:stride] - states[:,
kernel_size - 1:kernel_size - 1 + new_length * stride:stride]
return differentials
def show(self, name='Differential', indent=0, log=print, **kwargs):
log(' ' * indent +
'- %s(x) = Differential(ks=%d, stride=%d, padding=%d)' % (name,
self.kernel_size, self.stride, self.padding))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp2 = tmp0 - tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_sub_0[grid(128)](arg0_1, buf0, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class DifferentialNew(nn.Module):
def __init__(self, kernel_size=3, stride=1, padding=0):
super().__init__()
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
def new_length(self, length):
new_length = (length + self.padding * 2 - self.kernel_size + 1
) // self.stride
return new_length
def show(self, name='Differential', indent=0, log=print, **kwargs):
log(' ' * indent +
'- %s(x) = Differential(ks=%d, stride=%d, padding=%d)' % (name,
self.kernel_size, self.stride, self.padding))
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
C-SUNSHINE/TOQ-Nets-PyTorch-Release
|
Differential
| false
| 17,122
|
[
"MIT"
] | 6
|
05e06bf633fb3c6b610dda9a5126ecd7af1db02f
|
https://github.com/C-SUNSHINE/TOQ-Nets-PyTorch-Release/tree/05e06bf633fb3c6b610dda9a5126ecd7af1db02f
|
MultiheadAttention
|
import torch
import torch.nn as nn
class MultiheadAttention(nn.Module):
"""A warpper for torch.nn.MultiheadAttention.
This module implements MultiheadAttention with residual connection,
and positional encoding used in DETR is also passed as input.
Args:
embed_dims (int): The embedding dimension.
num_heads (int): Parallel attention heads. Same as
`nn.MultiheadAttention`.
dropout (float): A Dropout layer on attn_output_weights. Default 0.0.
"""
def __init__(self, embed_dims, num_heads, dropout=0.0):
super(MultiheadAttention, self).__init__()
assert embed_dims % num_heads == 0, f'embed_dims must be divisible by num_heads. got {embed_dims} and {num_heads}.'
self.embed_dims = embed_dims
self.num_heads = num_heads
self.dropout = dropout
self.attn = nn.MultiheadAttention(embed_dims, num_heads, dropout)
self.dropout = nn.Dropout(dropout)
def forward(self, x, key=None, value=None, residual=None, query_pos=
None, key_pos=None, attn_mask=None, key_padding_mask=None):
"""Forward function for `MultiheadAttention`.
Args:
x (Tensor): The input query with shape [num_query, bs,
embed_dims]. Same in `nn.MultiheadAttention.forward`.
key (Tensor): The key tensor with shape [num_key, bs,
embed_dims]. Same in `nn.MultiheadAttention.forward`.
Default None. If None, the `query` will be used.
value (Tensor): The value tensor with same shape as `key`.
Same in `nn.MultiheadAttention.forward`. Default None.
If None, the `key` will be used.
residual (Tensor): The tensor used for addition, with the
same shape as `x`. Default None. If None, `x` will be used.
query_pos (Tensor): The positional encoding for query, with
the same shape as `x`. Default None. If not None, it will
be added to `x` before forward function.
key_pos (Tensor): The positional encoding for `key`, with the
same shape as `key`. Default None. If not None, it will
be added to `key` before forward function. If None, and
`query_pos` has the same shape as `key`, then `query_pos`
will be used for `key_pos`.
attn_mask (Tensor): ByteTensor mask with shape [num_query,
num_key]. Same in `nn.MultiheadAttention.forward`.
Default None.
key_padding_mask (Tensor): ByteTensor with shape [bs, num_key].
Same in `nn.MultiheadAttention.forward`. Default None.
Returns:
Tensor: forwarded results with shape [num_query, bs, embed_dims].
"""
query = x
if key is None:
key = query
if value is None:
value = key
if residual is None:
residual = x
if key_pos is None:
if query_pos is not None and key is not None:
if query_pos.shape == key.shape:
key_pos = query_pos
if query_pos is not None:
query = query + query_pos
if key_pos is not None:
key = key + key_pos
out = self.attn(query, key, value=value, attn_mask=attn_mask,
key_padding_mask=key_padding_mask)[0]
return residual + self.dropout(out)
def __repr__(self):
"""str: a string that describes the module"""
repr_str = self.__class__.__name__
repr_str += f'(embed_dims={self.embed_dims}, '
repr_str += f'num_heads={self.num_heads}, '
repr_str += f'dropout={self.dropout})'
return repr_str
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'embed_dims': 4, 'num_heads': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (12,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4),
(1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_3, (4,), (1,), 4),
primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 16),
alpha=1, beta=1, out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_3, (4,), (1,), 8),
primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 32),
alpha=1, beta=1, out=buf2)
del primals_2
buf3 = reinterpret_tensor(buf0, (4, 4, 1), (1, 4, 16), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](buf3, primals_3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_3
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf3, reinterpret_tensor(buf1, (4, 1, 4), (1, 1,
4), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(64)](buf4, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf6 = buf4
del buf4
triton_poi_fused__softmax_2[grid(64)](buf5, buf6, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf5
buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (4, 4, 1), (1, 4,
1), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
triton_poi_fused_clone_3[grid(4, 4)](buf7, buf8, 4, 4, XBLOCK=4,
YBLOCK=4, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf7, (4, 4), (4, 1), 0)
del buf7
extern_kernels.mm(reinterpret_tensor(buf8, (4, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf9)
buf10 = buf9
del buf9
triton_poi_fused_add_4[grid(16)](buf10, primals_1, primals_5, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_5
return buf10, primals_1, buf6, reinterpret_tensor(buf8, (4, 4), (4, 1), 0
), primals_4, reinterpret_tensor(buf2, (4, 1, 4), (1, 1, 4), 0
), reinterpret_tensor(buf3, (4, 1, 4), (1, 1, 4), 0
), reinterpret_tensor(buf1, (4, 4, 1), (1, 4, 1), 0)
class MultiheadAttentionNew(nn.Module):
"""A warpper for torch.nn.MultiheadAttention.
This module implements MultiheadAttention with residual connection,
and positional encoding used in DETR is also passed as input.
Args:
embed_dims (int): The embedding dimension.
num_heads (int): Parallel attention heads. Same as
`nn.MultiheadAttention`.
dropout (float): A Dropout layer on attn_output_weights. Default 0.0.
"""
def __init__(self, embed_dims, num_heads, dropout=0.0):
super(MultiheadAttentionNew, self).__init__()
assert embed_dims % num_heads == 0, f'embed_dims must be divisible by num_heads. got {embed_dims} and {num_heads}.'
self.embed_dims = embed_dims
self.num_heads = num_heads
self.dropout = dropout
self.attn = nn.MultiheadAttention(embed_dims, num_heads, dropout)
self.dropout = nn.Dropout(dropout)
def __repr__(self):
"""str: a string that describes the module"""
repr_str = self.__class__.__name__
repr_str += f'(embed_dims={self.embed_dims}, '
repr_str += f'num_heads={self.num_heads}, '
repr_str += f'dropout={self.dropout})'
return repr_str
def forward(self, input_0):
primals_2 = self.attn.in_proj_weight
primals_3 = self.attn.in_proj_bias
primals_1 = self.attn.out_proj.weight
primals_5 = self.attn.out_proj.bias
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
CityU-AIM-Group/HTD
|
MultiheadAttention
| false
| 17,123
|
[
"MIT"
] | 5
|
0be9fd844118c275abc6053b3cbd5ffb589e62ee
|
https://github.com/CityU-AIM-Group/HTD/tree/0be9fd844118c275abc6053b3cbd5ffb589e62ee
|
C2
|
import torch
import torch.nn as nn
from collections import OrderedDict
class C2(nn.Module):
def __init__(self):
super(C2, self).__init__()
self.c2 = nn.Sequential(OrderedDict([('c2', nn.Conv2d(6, 16,
kernel_size=(5, 5))), ('relu2', nn.ReLU()), ('s2', nn.MaxPool2d
(kernel_size=(2, 2), stride=2))]))
def forward(self, img):
output = self.c2(img)
return output
def get_inputs():
return [torch.rand([4, 6, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from collections import OrderedDict
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 230400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3600 % 16
x0 = xindex % 3600
x4 = xindex // 3600
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + 3616 * x4), tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 57600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 30
x1 = xindex // 30 % 30
x2 = xindex // 900
x5 = xindex
x4 = xindex // 14400
x6 = xindex % 14400
tmp0 = tl.load(in_ptr0 + (2 * x0 + 120 * x1 + 3616 * x2), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 120 * x1 + 3616 * x2), xmask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (60 + 2 * x0 + 120 * x1 + 3616 * x2), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (61 + 2 * x0 + 120 * x1 + 3616 * x2), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x5, tmp6, xmask)
tl.store(out_ptr1 + (x6 + 14464 * x4), tmp16, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (16, 6, 5, 5), (150, 25, 5, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 6, 64, 64), (24576, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 60, 60), (57600, 3600, 60, 1))
buf1 = empty_strided_cuda((4, 16, 60, 60), (57856, 3616, 60, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(230400)](buf0, primals_2,
buf1, 230400, XBLOCK=512, num_warps=8, num_stages=1)
del buf0
del primals_2
buf2 = empty_strided_cuda((4, 16, 30, 30), (14400, 900, 30, 1),
torch.float32)
buf3 = empty_strided_cuda((4, 16, 30, 30), (14464, 900, 30, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(57600)](buf1, buf2,
buf3, 57600, XBLOCK=256, num_warps=4, num_stages=1)
return buf2, primals_1, primals_3, buf1, buf3
class C2New(nn.Module):
def __init__(self):
super(C2New, self).__init__()
self.c2 = nn.Sequential(OrderedDict([('c2', nn.Conv2d(6, 16,
kernel_size=(5, 5))), ('relu2', nn.ReLU()), ('s2', nn.MaxPool2d
(kernel_size=(2, 2), stride=2))]))
def forward(self, input_0):
primals_1 = self.c2.c2.weight
primals_2 = self.c2.c2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
ConstantinSeibold/SGL
|
C2
| false
| 17,124
|
[
"MIT"
] | 7
|
fab4d2df515608c2a6a89b2ac8c2655ce8e08b1a
|
https://github.com/ConstantinSeibold/SGL/tree/fab4d2df515608c2a6a89b2ac8c2655ce8e08b1a
|
Distance
|
import torch
from torch import nn
def apply_last_dim(model, x):
size = list(x.size())
y = model(x.contiguous().view(-1, size[-1]))
size[-1] = y.size(-1)
y = y.view(torch.Size(size))
return y
def get_int_dim_index(name):
if isinstance(name, int):
return name
name_list = 'axyz'
assert name in name_list
return [i for i in range(len(name_list)) if name_list[i] == name][0] - 1
class Length(nn.Module):
def __init__(self, dim_index=-1):
super().__init__()
self.dim_index = dim_index
def forward(self, states, dim_index=None):
if dim_index is None:
dim_index = self.dim_index
if isinstance(dim_index, int):
dim_index = [dim_index]
else:
dim_index = [get_int_dim_index(x) for x in dim_index]
if -1 in dim_index:
def extractor(x):
return torch.sqrt(torch.sum(x * x, dim=1, keepdim=True))
else:
def extractor(x):
return torch.sqrt(torch.sum(x[:, dim_index].pow(2), dim=1,
keepdim=True))
return apply_last_dim(extractor, states)
def show(self, name='Length', indent=0, log=print, **kwargs):
log(' ' * indent + "- %s(x) = |x's dim %s|" % (name, str(self.
dim_index)))
class Distance(nn.Module):
def __init__(self, dim_index=-1):
super().__init__()
self.dim_index = dim_index
self.length = Length(dim_index)
def forward(self, states1, states2, dim_index=None):
return self.length(states1 - states2, dim_index)
def show(self, name='Distance', indent=0, log=print, **kwargs):
log(' ' * indent + '- %s(x1, x2) = |x1 - x2|' % name)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_sqrt_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = libdevice.sqrt(tmp18)
tl.store(out_ptr0 + x0, tmp19, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sqrt_sum_0[grid(64)](arg0_1, arg1_1, buf0, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
return reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0),
def apply_last_dim(model, x):
size = list(x.size())
y = model(x.contiguous().view(-1, size[-1]))
size[-1] = y.size(-1)
y = y.view(torch.Size(size))
return y
def get_int_dim_index(name):
if isinstance(name, int):
return name
name_list = 'axyz'
assert name in name_list
return [i for i in range(len(name_list)) if name_list[i] == name][0] - 1
class Length(nn.Module):
def __init__(self, dim_index=-1):
super().__init__()
self.dim_index = dim_index
def forward(self, states, dim_index=None):
if dim_index is None:
dim_index = self.dim_index
if isinstance(dim_index, int):
dim_index = [dim_index]
else:
dim_index = [get_int_dim_index(x) for x in dim_index]
if -1 in dim_index:
def extractor(x):
return torch.sqrt(torch.sum(x * x, dim=1, keepdim=True))
else:
def extractor(x):
return torch.sqrt(torch.sum(x[:, dim_index].pow(2), dim=1,
keepdim=True))
return apply_last_dim(extractor, states)
def show(self, name='Length', indent=0, log=print, **kwargs):
log(' ' * indent + "- %s(x) = |x's dim %s|" % (name, str(self.
dim_index)))
class DistanceNew(nn.Module):
def __init__(self, dim_index=-1):
super().__init__()
self.dim_index = dim_index
self.length = Length(dim_index)
def show(self, name='Distance', indent=0, log=print, **kwargs):
log(' ' * indent + '- %s(x1, x2) = |x1 - x2|' % name)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
C-SUNSHINE/TOQ-Nets-PyTorch-Release
|
Distance
| false
| 17,125
|
[
"MIT"
] | 6
|
05e06bf633fb3c6b610dda9a5126ecd7af1db02f
|
https://github.com/C-SUNSHINE/TOQ-Nets-PyTorch-Release/tree/05e06bf633fb3c6b610dda9a5126ecd7af1db02f
|
SIMSE
|
import torch
import torch.nn as nn
class SIMSE(nn.Module):
def __init__(self):
super(SIMSE, self).__init__()
def forward(self, pred, real):
diffs = torch.add(real, -pred)
n = torch.numel(diffs.data)
simse = torch.sum(diffs).pow(2) / n ** 2
return simse
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_neg_pow_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = -tmp1
tmp3 = tmp0 + tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = tmp6 * tmp6
tmp8 = 1.52587890625e-05
tmp9 = tmp7 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp9, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_neg_pow_sum_0[grid(1)](buf1, arg1_1,
arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class SIMSENew(nn.Module):
def __init__(self):
super(SIMSENew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Columbine21/TFR-Net
|
SIMSE
| false
| 17,126
|
[
"MIT"
] | 7
|
1da01577542e7f477fdf7323ec0696aebc632357
|
https://github.com/Columbine21/TFR-Net/tree/1da01577542e7f477fdf7323ec0696aebc632357
|
SoftSmall
|
import math
import torch
from torch import nn
class SoftCompare(nn.Module):
def __init__(self, alpha=None, beta=None):
super().__init__()
self.alpha = nn.Parameter(torch.ones(1) * (0 if alpha is None else
alpha), requires_grad=True)
self.beta = nn.Parameter(torch.ones(1) * (0 if beta is None else
beta), requires_grad=True)
if alpha is None:
nn.init.normal_(self.alpha.data, 0, 1)
else:
self.alpha.requires_grad_(False)
if beta is not None:
self.beta.requires_grad_(False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
raise NotImplementedError
class SoftSmall(SoftCompare):
"""
Sigmoid((alpha - x) / e^beta)
"""
def __init__(self, alpha=None, beta=None):
super().__init__(alpha, beta)
def forward(self, x, beta=None):
alpha = self.alpha
if beta is None:
beta = self.beta
return self.sigmoid((alpha - x) / torch.exp(beta))
def show(self, name='SoftSmall', indent=0, log=print, **kwargs):
alpha = kwargs['alpha'] if 'alpha' in kwargs else self.alpha
beta = kwargs['beta'] if 'beta' in kwargs else self.beta
log(' ' * indent + '- %s(x) = Sigmoid((%lf - x) / %lf)' % (name,
alpha, math.exp(beta)))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_exp_sigmoid_sub_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp4 = tl.load(in_ptr2 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp3 = tmp1 - tmp2
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp3 / tmp6
tmp8 = tl.sigmoid(tmp7)
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1,), (1,))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_exp_sigmoid_sub_0[grid(256)](primals_1,
primals_3, primals_2, buf0, 256, XBLOCK=256, num_warps=4,
num_stages=1)
return buf0, primals_1, primals_2, primals_3, buf0
class SoftCompare(nn.Module):
def __init__(self, alpha=None, beta=None):
super().__init__()
self.alpha = nn.Parameter(torch.ones(1) * (0 if alpha is None else
alpha), requires_grad=True)
self.beta = nn.Parameter(torch.ones(1) * (0 if beta is None else
beta), requires_grad=True)
if alpha is None:
nn.init.normal_(self.alpha.data, 0, 1)
else:
self.alpha.requires_grad_(False)
if beta is not None:
self.beta.requires_grad_(False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
raise NotImplementedError
class SoftSmallNew(SoftCompare):
"""
Sigmoid((alpha - x) / e^beta)
"""
def __init__(self, alpha=None, beta=None):
super().__init__(alpha, beta)
def show(self, name='SoftSmall', indent=0, log=print, **kwargs):
alpha = kwargs['alpha'] if 'alpha' in kwargs else self.alpha
beta = kwargs['beta'] if 'beta' in kwargs else self.beta
log(' ' * indent + '- %s(x) = Sigmoid((%lf - x) / %lf)' % (name,
alpha, math.exp(beta)))
def forward(self, input_0):
primals_1 = self.alpha
primals_2 = self.beta
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
C-SUNSHINE/TOQ-Nets-PyTorch-Release
|
SoftSmall
| false
| 17,127
|
[
"MIT"
] | 6
|
05e06bf633fb3c6b610dda9a5126ecd7af1db02f
|
https://github.com/C-SUNSHINE/TOQ-Nets-PyTorch-Release/tree/05e06bf633fb3c6b610dda9a5126ecd7af1db02f
|
GHMC
|
import torch
import torch.nn.functional as F
import torch.nn as nn
def _expand_onehot_labels(labels, label_weights, label_channels):
bin_labels = labels.new_full((labels.size(0), label_channels), 0)
inds = torch.nonzero((labels >= 0) & (labels < label_channels),
as_tuple=False).squeeze()
if inds.numel() > 0:
bin_labels[inds, labels[inds]] = 1
bin_label_weights = label_weights.view(-1, 1).expand(label_weights.size
(0), label_channels)
return bin_labels, bin_label_weights
class GHMC(nn.Module):
"""GHM Classification Loss.
Details of the theorem can be viewed in the paper
`Gradient Harmonized Single-stage Detector
<https://arxiv.org/abs/1811.05181>`_.
Args:
bins (int): Number of the unit regions for distribution calculation.
momentum (float): The parameter for moving average.
use_sigmoid (bool): Can only be true for BCE based loss now.
loss_weight (float): The weight of the total GHM-C loss.
"""
def __init__(self, bins=10, momentum=0, use_sigmoid=True, loss_weight=1.0):
super(GHMC, self).__init__()
self.bins = bins
self.momentum = momentum
edges = torch.arange(bins + 1).float() / bins
self.register_buffer('edges', edges)
self.edges[-1] += 1e-06
if momentum > 0:
acc_sum = torch.zeros(bins)
self.register_buffer('acc_sum', acc_sum)
self.use_sigmoid = use_sigmoid
if not self.use_sigmoid:
raise NotImplementedError
self.loss_weight = loss_weight
def forward(self, pred, target, label_weight, *args, **kwargs):
"""Calculate the GHM-C loss.
Args:
pred (float tensor of size [batch_num, class_num]):
The direct prediction of classification fc layer.
target (float tensor of size [batch_num, class_num]):
Binary class target for each sample.
label_weight (float tensor of size [batch_num, class_num]):
the value is 1 if the sample is valid and 0 if ignored.
Returns:
The gradient harmonized loss.
"""
if pred.dim() != target.dim():
target, label_weight = _expand_onehot_labels(target,
label_weight, pred.size(-1))
target, label_weight = target.float(), label_weight.float()
edges = self.edges
mmt = self.momentum
weights = torch.zeros_like(pred)
g = torch.abs(pred.sigmoid().detach() - target)
valid = label_weight > 0
tot = max(valid.float().sum().item(), 1.0)
n = 0
for i in range(self.bins):
inds = (g >= edges[i]) & (g < edges[i + 1]) & valid
num_in_bin = inds.sum().item()
if num_in_bin > 0:
if mmt > 0:
self.acc_sum[i] = mmt * self.acc_sum[i] + (1 - mmt
) * num_in_bin
weights[inds] = tot / self.acc_sum[i]
else:
weights[inds] = tot / num_in_bin
n += 1
if n > 0:
weights = weights / n
loss = F.binary_cross_entropy_with_logits(pred, target, weights,
reduction='sum') / tot
return loss * self.loss_weight
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused__to_copy_gt_sum_0(in_ptr0, out_ptr0, out_ptr1, xnumel,
rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = tmp2.to(tl.float32)
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tl.store(out_ptr0 + tl.broadcast_to(r0, [RBLOCK]), tmp2, None)
tl.store(out_ptr1 + tl.full([1], 0, tl.int32), tmp6, None)
@triton.jit
def triton_poi_fused_zeros_like_1(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_abs_sigmoid_sub_2(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 - tmp2
tmp4 = tl_math.abs(tmp3)
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf1 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused__to_copy_gt_sum_0[grid(1)](arg2_1, buf0, buf1, 1,
256, num_warps=2, num_stages=1)
del arg2_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_zeros_like_1[grid(256)](buf2, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_abs_sigmoid_sub_2[grid(256)](arg0_1, arg1_1, buf3,
256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf1, arg1_1, buf2, buf3, buf0
def _expand_onehot_labels(labels, label_weights, label_channels):
bin_labels = labels.new_full((labels.size(0), label_channels), 0)
inds = torch.nonzero((labels >= 0) & (labels < label_channels),
as_tuple=False).squeeze()
if inds.numel() > 0:
bin_labels[inds, labels[inds]] = 1
bin_label_weights = label_weights.view(-1, 1).expand(label_weights.size
(0), label_channels)
return bin_labels, bin_label_weights
class GHMCNew(nn.Module):
"""GHM Classification Loss.
Details of the theorem can be viewed in the paper
`Gradient Harmonized Single-stage Detector
<https://arxiv.org/abs/1811.05181>`_.
Args:
bins (int): Number of the unit regions for distribution calculation.
momentum (float): The parameter for moving average.
use_sigmoid (bool): Can only be true for BCE based loss now.
loss_weight (float): The weight of the total GHM-C loss.
"""
def __init__(self, bins=10, momentum=0, use_sigmoid=True, loss_weight=1.0):
super(GHMCNew, self).__init__()
self.bins = bins
self.momentum = momentum
edges = torch.arange(bins + 1).float() / bins
self.register_buffer('edges', edges)
self.edges[-1] += 1e-06
if momentum > 0:
acc_sum = torch.zeros(bins)
self.register_buffer('acc_sum', acc_sum)
self.use_sigmoid = use_sigmoid
if not self.use_sigmoid:
raise NotImplementedError
self.loss_weight = loss_weight
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
CityU-AIM-Group/HTD
|
GHMC
| false
| 17,128
|
[
"MIT"
] | 5
|
0be9fd844118c275abc6053b3cbd5ffb589e62ee
|
https://github.com/CityU-AIM-Group/HTD/tree/0be9fd844118c275abc6053b3cbd5ffb589e62ee
|
Inequality
|
import math
import torch
from torch import nn
class Normalize(nn.Module):
def __init__(self, distribution=None, **kwargs):
super().__init__()
self.distribution = distribution
self.data_ = []
if distribution is None:
pass
elif distribution == 'normal':
mean = kwargs['mean'] if 'mean' in kwargs else 0
std = kwargs['std'] if 'std' in kwargs else 1
self.param = nn.Parameter(torch.Tensor([mean, std]), False)
elif distribution == 'uniform':
vmin = kwargs['minv'] if 'minv' in kwargs else 0
vmax = kwargs['maxv'] if 'maxv' in kwargs else 1
self.param = nn.Parameter(torch.Tensor([vmin, vmax]), False)
else:
raise NotImplementedError()
def forward(self, x, keep_data=False):
if keep_data:
self.data_.append(x.detach().cpu().view(-1))
return x
if self.distribution is None:
return x
elif self.distribution == 'normal':
mean = self.param[0]
std = self.param[1]
return (x - mean) / std
elif self.distribution == 'uniform':
vmin = self.param[0]
vmax = self.param[1]
return (x - vmin) / (vmax - vmin + 1e-05)
else:
raise NotImplementedError()
def reset_parameters(self, name=None):
assert len(self.data_) > 0
data = torch.cat(self.data_)
self.data_ = []
if self.distribution is None:
pass
elif self.distribution == 'normal':
with torch.no_grad():
self.param[0] = data.mean().item()
self.param[1] = data.std().item()
if name is not None:
None
elif self.distribution == 'uniform':
with torch.no_grad():
self.param[0] = data.min().item()
self.param[1] = data.max().item()
if name is not None:
None
else:
raise NotImplementedError()
def recover_threshold(self, x):
if self.distribution is None:
return x
elif self.distribution == 'normal':
return x * float(self.param[1]) + float(self.param[0])
elif self.distribution == 'uniform':
return x * float(self.param[1] - self.param[0] + 1e-05) + float(
self.param[0])
else:
raise NotImplementedError()
def init_thresholds(self, x):
if self.distribution is None:
nn.init.normal_(x, 0, 1)
elif self.distribution == 'normal':
nn.init.normal_(x, 0, 1)
elif self.distribution == 'uniform':
nn.init.uniform_(x, 0, 1)
else:
raise NotImplementedError()
class SoftCmp(nn.Module):
"""
Sigmoid((x - y) / e^beta)
"""
def __init__(self):
super().__init__()
self.sigmoid = nn.Sigmoid()
def forward(self, x, y, beta):
return self.sigmoid((x - y) / math.exp(beta))
class Inequality(nn.Module):
def __init__(self, out_dim=1, distribution=None, **kwargs):
super().__init__()
self.out_dim = out_dim
self.thresholds = nn.Parameter(torch.zeros(out_dim), requires_grad=True
)
self.distribution = distribution
self.normalize = Normalize(distribution)
self.cmp = SoftCmp()
self.normalize.init_thresholds(self.thresholds)
def forward(self, states, beta=0, **kwargs):
"""
:param states: [batch, length, n_agents, ... ]
"""
states_expand = states.view(*(states.size() + (1,)))
estimate_parameters = 'estimate_parameters' in kwargs and kwargs[
'estimate_parameters']
states_expand = self.normalize(states_expand, keep_data=
estimate_parameters)
return self.cmp(states_expand, self.thresholds.view(*([1] * len(
states.size()) + [self.out_dim])), beta)
def reset_parameters(self, parameter_name, name=None):
if parameter_name == 'primitive_inequality':
self.normalize.reset_parameters(name=name)
self.normalize.init_thresholds(self.thresholds)
def get_descriptions(self, name='Inequality'):
theta = self.thresholds.detach().cpu().view(self.out_dim)
descroptions = []
for k in range(theta.size(0)):
t = self.normalize.recover_threshold(theta[k])
if 'speed' in name:
t = t * 8
if 'acc' in name:
t = t * 64
descroptions.append('%s > %.2lf' % (name, t))
return descroptions
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_sigmoid_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 - tmp2
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp6 = tl.sigmoid(tmp5)
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch
.float32)
get_raw_stream(0)
triton_poi_fused_div_sigmoid_sub_0[grid(256)](primals_1, primals_2,
buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
return buf0, buf0
class Normalize(nn.Module):
def __init__(self, distribution=None, **kwargs):
super().__init__()
self.distribution = distribution
self.data_ = []
if distribution is None:
pass
elif distribution == 'normal':
mean = kwargs['mean'] if 'mean' in kwargs else 0
std = kwargs['std'] if 'std' in kwargs else 1
self.param = nn.Parameter(torch.Tensor([mean, std]), False)
elif distribution == 'uniform':
vmin = kwargs['minv'] if 'minv' in kwargs else 0
vmax = kwargs['maxv'] if 'maxv' in kwargs else 1
self.param = nn.Parameter(torch.Tensor([vmin, vmax]), False)
else:
raise NotImplementedError()
def forward(self, x, keep_data=False):
if keep_data:
self.data_.append(x.detach().cpu().view(-1))
return x
if self.distribution is None:
return x
elif self.distribution == 'normal':
mean = self.param[0]
std = self.param[1]
return (x - mean) / std
elif self.distribution == 'uniform':
vmin = self.param[0]
vmax = self.param[1]
return (x - vmin) / (vmax - vmin + 1e-05)
else:
raise NotImplementedError()
def reset_parameters(self, name=None):
assert len(self.data_) > 0
data = torch.cat(self.data_)
self.data_ = []
if self.distribution is None:
pass
elif self.distribution == 'normal':
with torch.no_grad():
self.param[0] = data.mean().item()
self.param[1] = data.std().item()
if name is not None:
None
elif self.distribution == 'uniform':
with torch.no_grad():
self.param[0] = data.min().item()
self.param[1] = data.max().item()
if name is not None:
None
else:
raise NotImplementedError()
def recover_threshold(self, x):
if self.distribution is None:
return x
elif self.distribution == 'normal':
return x * float(self.param[1]) + float(self.param[0])
elif self.distribution == 'uniform':
return x * float(self.param[1] - self.param[0] + 1e-05) + float(
self.param[0])
else:
raise NotImplementedError()
def init_thresholds(self, x):
if self.distribution is None:
nn.init.normal_(x, 0, 1)
elif self.distribution == 'normal':
nn.init.normal_(x, 0, 1)
elif self.distribution == 'uniform':
nn.init.uniform_(x, 0, 1)
else:
raise NotImplementedError()
class SoftCmp(nn.Module):
"""
Sigmoid((x - y) / e^beta)
"""
def __init__(self):
super().__init__()
self.sigmoid = nn.Sigmoid()
def forward(self, x, y, beta):
return self.sigmoid((x - y) / math.exp(beta))
class InequalityNew(nn.Module):
def __init__(self, out_dim=1, distribution=None, **kwargs):
super().__init__()
self.out_dim = out_dim
self.thresholds = nn.Parameter(torch.zeros(out_dim), requires_grad=True
)
self.distribution = distribution
self.normalize = Normalize(distribution)
self.cmp = SoftCmp()
self.normalize.init_thresholds(self.thresholds)
def reset_parameters(self, parameter_name, name=None):
if parameter_name == 'primitive_inequality':
self.normalize.reset_parameters(name=name)
self.normalize.init_thresholds(self.thresholds)
def get_descriptions(self, name='Inequality'):
theta = self.thresholds.detach().cpu().view(self.out_dim)
descroptions = []
for k in range(theta.size(0)):
t = self.normalize.recover_threshold(theta[k])
if 'speed' in name:
t = t * 8
if 'acc' in name:
t = t * 64
descroptions.append('%s > %.2lf' % (name, t))
return descroptions
def forward(self, input_0):
primals_2 = self.thresholds
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
C-SUNSHINE/TOQ-Nets-PyTorch-Release
|
Inequality
| false
| 17,129
|
[
"MIT"
] | 6
|
05e06bf633fb3c6b610dda9a5126ecd7af1db02f
|
https://github.com/C-SUNSHINE/TOQ-Nets-PyTorch-Release/tree/05e06bf633fb3c6b610dda9a5126ecd7af1db02f
|
MinPoolTrinary
|
import torch
from torch import nn
class MinPoolTrinary(nn.Module):
def __init__(self):
super().__init__()
def new_length(self, length):
return length
def forward(self, states):
"""
:param states: [batch, length, *]
"""
assert states.size(1) >= 3
side_length = (states.size(1) + 1) // 3
return torch.cat([torch.min(states[:, :side_length], dim=1, keepdim
=True)[0], torch.min(states[:, side_length:-side_length], dim=1,
keepdim=True)[0], torch.min(states[:, -side_length:], dim=1,
keepdim=True)[0]], dim=1)
def show(self, name='MinPoolTrinary', indent=0, log=print, **kwargs):
log(' ' * indent + '- %s(x) = MinPoolTrinary()' % (name,))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 3
x0 = xindex % 16
x2 = xindex // 48
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype)
tmp14 = tl.where(tmp9, tmp12, tmp13)
tmp15 = tmp0 >= tmp7
tl.full([1], 3, tl.int64)
tmp18 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp15 & xmask,
eviction_policy='evict_last', other=0.0)
tmp19 = tl.where(tmp9, tmp14, tmp18)
tmp20 = tl.where(tmp4, tmp5, tmp19)
tl.store(out_ptr0 + x3, tmp20, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(192)](arg0_1, buf0, 192, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class MinPoolTrinaryNew(nn.Module):
def __init__(self):
super().__init__()
def new_length(self, length):
return length
def show(self, name='MinPoolTrinary', indent=0, log=print, **kwargs):
log(' ' * indent + '- %s(x) = MinPoolTrinary()' % (name,))
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
C-SUNSHINE/TOQ-Nets-PyTorch-Release
|
MinPoolTrinary
| false
| 17,130
|
[
"MIT"
] | 6
|
05e06bf633fb3c6b610dda9a5126ecd7af1db02f
|
https://github.com/C-SUNSHINE/TOQ-Nets-PyTorch-Release/tree/05e06bf633fb3c6b610dda9a5126ecd7af1db02f
|
MaxPoolTrinary
|
import torch
from torch import nn
class MaxPoolTrinary(nn.Module):
def __init__(self):
super().__init__()
def new_length(self, length):
return length
def forward(self, states):
"""
:param states: [batch, length, *]
"""
assert states.size(1) >= 3
side_length = (states.size(1) + 1) // 3
return torch.cat([torch.max(states[:, :side_length], dim=1, keepdim
=True)[0], torch.max(states[:, side_length:-side_length], dim=1,
keepdim=True)[0], torch.max(states[:, -side_length:], dim=1,
keepdim=True)[0]], dim=1)
def show(self, name='MaxPoolTrinary', indent=0, log=print, **kwargs):
log(' ' * indent + '- %s(x) = MaxPoolTrinary()' % (name,))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 3
x0 = xindex % 16
x2 = xindex // 48
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp12 = triton_helpers.maximum(tmp10, tmp11)
tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype)
tmp14 = tl.where(tmp9, tmp12, tmp13)
tmp15 = tmp0 >= tmp7
tl.full([1], 3, tl.int64)
tmp18 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp15 & xmask,
eviction_policy='evict_last', other=0.0)
tmp19 = tl.where(tmp9, tmp14, tmp18)
tmp20 = tl.where(tmp4, tmp5, tmp19)
tl.store(out_ptr0 + x3, tmp20, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(192)](arg0_1, buf0, 192, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class MaxPoolTrinaryNew(nn.Module):
def __init__(self):
super().__init__()
def new_length(self, length):
return length
def show(self, name='MaxPoolTrinary', indent=0, log=print, **kwargs):
log(' ' * indent + '- %s(x) = MaxPoolTrinary()' % (name,))
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
C-SUNSHINE/TOQ-Nets-PyTorch-Release
|
MaxPoolTrinary
| false
| 17,131
|
[
"MIT"
] | 6
|
05e06bf633fb3c6b610dda9a5126ecd7af1db02f
|
https://github.com/C-SUNSHINE/TOQ-Nets-PyTorch-Release/tree/05e06bf633fb3c6b610dda9a5126ecd7af1db02f
|
BalancedL1Loss
|
import functools
import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
loss = loss * weight
if avg_factor is None:
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
loss = loss.sum() / avg_factor
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, avg_factor=2)
tensor(1.5000)
"""
@functools.wraps(loss_func)
def wrapper(pred, target, weight=None, reduction='mean', avg_factor=
None, **kwargs):
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper
@weighted_loss
def balanced_l1_loss(pred, target, beta=1.0, alpha=0.5, gamma=1.5,
reduction='mean'):
"""Calculate balanced L1 loss.
Please see the `Libra R-CNN <https://arxiv.org/pdf/1904.02701.pdf>`_
Args:
pred (torch.Tensor): The prediction with shape (N, 4).
target (torch.Tensor): The learning target of the prediction with
shape (N, 4).
beta (float): The loss is a piecewise function of prediction and target
and ``beta`` serves as a threshold for the difference between the
prediction and target. Defaults to 1.0.
alpha (float): The denominator ``alpha`` in the balanced L1 loss.
Defaults to 0.5.
gamma (float): The ``gamma`` in the balanced L1 loss.
Defaults to 1.5.
reduction (str, optional): The method that reduces the loss to a
scalar. Options are "none", "mean" and "sum".
Returns:
torch.Tensor: The calculated loss
"""
assert beta > 0
assert pred.size() == target.size() and target.numel() > 0
diff = torch.abs(pred - target)
b = np.e ** (gamma / alpha) - 1
loss = torch.where(diff < beta, alpha / b * (b * diff + 1) * torch.log(
b * diff / beta + 1) - alpha * diff, gamma * diff + gamma / b -
alpha * beta)
return loss
class BalancedL1Loss(nn.Module):
"""Balanced L1 Loss.
arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)
Args:
alpha (float): The denominator ``alpha`` in the balanced L1 loss.
Defaults to 0.5.
gamma (float): The ``gamma`` in the balanced L1 loss. Defaults to 1.5.
beta (float, optional): The loss is a piecewise function of prediction
and target. ``beta`` serves as a threshold for the difference
between the prediction and target. Defaults to 1.0.
reduction (str, optional): The method that reduces the loss to a
scalar. Options are "none", "mean" and "sum".
loss_weight (float, optional): The weight of the loss. Defaults to 1.0
"""
def __init__(self, alpha=0.5, gamma=1.5, beta=1.0, reduction='mean',
loss_weight=1.0):
super(BalancedL1Loss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, pred, target, weight=None, avg_factor=None,
reduction_override=None, **kwargs):
"""Forward function of loss.
Args:
pred (torch.Tensor): The prediction with shape (N, 4).
target (torch.Tensor): The learning target of the prediction with
shape (N, 4).
weight (torch.Tensor, optional): Sample-wise loss weight with
shape (N, ).
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Options are "none", "mean" and "sum".
Returns:
torch.Tensor: The calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (reduction_override if reduction_override else self.
reduction)
loss_bbox = self.loss_weight * balanced_l1_loss(pred, target,
weight, alpha=self.alpha, gamma=self.gamma, beta=self.beta,
reduction=reduction, avg_factor=avg_factor, **kwargs)
return loss_bbox
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import functools
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_add_div_log_lt_mean_mul_sub_where_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = 1.0
tmp5 = tmp3 < tmp4
tmp6 = 19.085536923187664
tmp7 = tmp3 * tmp6
tmp8 = tmp7 + tmp4
tmp9 = 0.02619784824562798
tmp10 = tmp8 * tmp9
tmp11 = tmp7 * tmp4
tmp12 = tmp11 + tmp4
tmp13 = tl_math.log(tmp12)
tmp14 = tmp10 * tmp13
tmp15 = 0.5
tmp16 = tmp3 * tmp15
tmp17 = tmp14 - tmp16
tmp18 = 1.5
tmp19 = tmp3 * tmp18
tmp20 = 0.07859354473688394
tmp21 = tmp19 + tmp20
tmp22 = tmp21 - tmp15
tmp23 = tl.where(tmp5, tmp17, tmp22)
tmp24 = tl.broadcast_to(tmp23, [RBLOCK])
tmp26 = triton_helpers.promote_to_tensor(tl.sum(tmp24, 0))
tmp27 = 256.0
tmp28 = tmp26 / tmp27
tmp29 = tmp28 * tmp4
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp29, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_abs_add_div_log_lt_mean_mul_sub_where_0[grid(1)](buf1,
arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
loss = loss * weight
if avg_factor is None:
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
loss = loss.sum() / avg_factor
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, avg_factor=2)
tensor(1.5000)
"""
@functools.wraps(loss_func)
def wrapper(pred, target, weight=None, reduction='mean', avg_factor=
None, **kwargs):
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper
@weighted_loss
def balanced_l1_loss(pred, target, beta=1.0, alpha=0.5, gamma=1.5,
reduction='mean'):
"""Calculate balanced L1 loss.
Please see the `Libra R-CNN <https://arxiv.org/pdf/1904.02701.pdf>`_
Args:
pred (torch.Tensor): The prediction with shape (N, 4).
target (torch.Tensor): The learning target of the prediction with
shape (N, 4).
beta (float): The loss is a piecewise function of prediction and target
and ``beta`` serves as a threshold for the difference between the
prediction and target. Defaults to 1.0.
alpha (float): The denominator ``alpha`` in the balanced L1 loss.
Defaults to 0.5.
gamma (float): The ``gamma`` in the balanced L1 loss.
Defaults to 1.5.
reduction (str, optional): The method that reduces the loss to a
scalar. Options are "none", "mean" and "sum".
Returns:
torch.Tensor: The calculated loss
"""
assert beta > 0
assert pred.size() == target.size() and target.numel() > 0
diff = torch.abs(pred - target)
b = np.e ** (gamma / alpha) - 1
loss = torch.where(diff < beta, alpha / b * (b * diff + 1) * torch.log(
b * diff / beta + 1) - alpha * diff, gamma * diff + gamma / b -
alpha * beta)
return loss
class BalancedL1LossNew(nn.Module):
"""Balanced L1 Loss.
arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)
Args:
alpha (float): The denominator ``alpha`` in the balanced L1 loss.
Defaults to 0.5.
gamma (float): The ``gamma`` in the balanced L1 loss. Defaults to 1.5.
beta (float, optional): The loss is a piecewise function of prediction
and target. ``beta`` serves as a threshold for the difference
between the prediction and target. Defaults to 1.0.
reduction (str, optional): The method that reduces the loss to a
scalar. Options are "none", "mean" and "sum".
loss_weight (float, optional): The weight of the loss. Defaults to 1.0
"""
def __init__(self, alpha=0.5, gamma=1.5, beta=1.0, reduction='mean',
loss_weight=1.0):
super(BalancedL1LossNew, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
CityU-AIM-Group/HTD
|
BalancedL1Loss
| false
| 17,132
|
[
"MIT"
] | 5
|
0be9fd844118c275abc6053b3cbd5ffb589e62ee
|
https://github.com/CityU-AIM-Group/HTD/tree/0be9fd844118c275abc6053b3cbd5ffb589e62ee
|
C3
|
import torch
import torch.nn as nn
from collections import OrderedDict
class C3(nn.Module):
def __init__(self):
super(C3, self).__init__()
self.c3 = nn.Sequential(OrderedDict([('c3', nn.Conv2d(16, 120,
kernel_size=(5, 5))), ('relu3', nn.ReLU())]))
def forward(self, img):
output = self.c3(img)
return output
def get_inputs():
return [torch.rand([4, 16, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from collections import OrderedDict
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 1920
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 16
y1 = yindex // 16
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 16 * x2 + 400 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 16
y1 = yindex // 16
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 16 * x2 + 65536 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_2(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.
constexpr):
ynumel = 480
xnumel = 3600
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 120
y1 = yindex // 120
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 120 * x2 + 432000 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2 + 3600 * y3), tmp4, xmask & ymask)
tl.store(out_ptr1 + (y0 + 120 * x2 + 432000 * y1), tmp6, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (120, 16, 5, 5), (400, 25, 5, 1))
assert_size_stride(primals_2, (120,), (1,))
assert_size_stride(primals_3, (4, 16, 64, 64), (65536, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((120, 16, 5, 5), (400, 1, 80, 16), torch.
float32)
get_raw_stream(0)
triton_poi_fused_0[grid(1920, 25)](primals_1, buf0, 1920, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 16, 64, 64), (65536, 1, 1024, 16),
torch.float32)
triton_poi_fused_1[grid(64, 4096)](primals_3, buf1, 64, 4096,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_3
buf2 = extern_kernels.convolution(buf1, buf0, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 120, 60, 60), (432000, 1, 7200, 120))
buf3 = empty_strided_cuda((4, 120, 60, 60), (432000, 3600, 60, 1),
torch.float32)
buf4 = empty_strided_cuda((4, 120, 60, 60), (432000, 1, 7200, 120),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_2[grid(480, 3600)
](buf2, primals_2, buf3, buf4, 480, 3600, XBLOCK=32, YBLOCK=32,
num_warps=4, num_stages=1)
del buf2
del primals_2
return buf3, buf0, buf1, buf4
class C3New(nn.Module):
def __init__(self):
super(C3New, self).__init__()
self.c3 = nn.Sequential(OrderedDict([('c3', nn.Conv2d(16, 120,
kernel_size=(5, 5))), ('relu3', nn.ReLU())]))
def forward(self, input_0):
primals_1 = self.c3.c3.weight
primals_2 = self.c3.c3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
ConstantinSeibold/SGL
|
C3
| false
| 17,133
|
[
"MIT"
] | 7
|
fab4d2df515608c2a6a89b2ac8c2655ce8e08b1a
|
https://github.com/ConstantinSeibold/SGL/tree/fab4d2df515608c2a6a89b2ac8c2655ce8e08b1a
|
Subsample
|
import torch
import torch.utils.data
import torch.nn as nn
class Subsample(nn.Module):
def __init__(self):
super().__init__()
def forward(self, feats, lengths):
out = feats[:, ::2]
lengths = lengths // 2
return out, lengths
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.utils.data
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_floor_divide_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = libdevice.floor(tmp2)
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_floor_divide_0[grid(256)](arg1_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg1_1
return reinterpret_tensor(arg0_1, (4, 2, 4, 4), (64, 32, 4, 1), 0), buf0
class SubsampleNew(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0], output[1]
|
CoraJung/flexible-input-slu
|
Subsample
| false
| 17,134
|
[
"Apache-2.0"
] | 7
|
6a1a6bf105f1a0c07e8d483aa6da1df7a554392d
|
https://github.com/CoraJung/flexible-input-slu/tree/6a1a6bf105f1a0c07e8d483aa6da1df7a554392d
|
MultiheadAttention
|
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn.parameter import Parameter
from torch.nn import Parameter
class MultiheadAttention(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, embed_dim, num_heads, attn_dropout=0.0, bias=True,
add_bias_kv=False, add_zero_attn=False):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.attn_dropout = attn_dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads'
self.scaling = self.head_dim ** -0.5
self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim))
self.register_parameter('in_proj_bias', None)
if bias:
self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim))
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.in_proj_weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.in_proj_bias is not None:
nn.init.constant_(self.in_proj_bias, 0.0)
nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def forward(self, query, key, value, attn_mask=None):
"""Input shape: Time x Batch x Channel
Self-attention can be implemented by passing in the same arguments for
query, key and value. Timesteps can be masked by supplying a T x T mask in the
`attn_mask` argument. Padding elements can be excluded from
the key by passing a binary ByteTensor (`key_padding_mask`) with shape:
batch x src_len, where padding elements are indicated by 1s.
"""
qkv_same = query.data_ptr() == key.data_ptr() == value.data_ptr()
kv_same = key.data_ptr() == value.data_ptr()
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
assert key.size() == value.size()
if qkv_same:
q, k, v = self.in_proj_qkv(query)
elif kv_same:
q = self.in_proj_q(query)
if key is None:
assert value is None
k = v = None
else:
k, v = self.in_proj_kv(key)
else:
q = self.in_proj_q(query)
k = self.in_proj_k(key)
v = self.in_proj_v(value)
q = self.scaling * q
if self.bias_k is not None:
assert self.bias_v is not None
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(
attn_mask.size(0), 1)], dim=1)
q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
src_len = k.size(1)
if self.add_zero_attn:
src_len += 1
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])],
dim=1)
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])],
dim=1)
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(
attn_mask.size(0), 1)], dim=1)
attn_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len,
src_len]
if attn_mask is not None:
try:
attn_weights += attn_mask.unsqueeze(0)
except:
None
None
assert False
attn_weights = F.softmax(attn_weights.float(), dim=-1).type_as(
attn_weights)
attn_weights = F.dropout(attn_weights, p=self.attn_dropout,
training=self.training)
attn = torch.bmm(attn_weights, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.
head_dim]
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.sum(dim=1) / self.num_heads
return attn, attn_weights
def in_proj_qkv(self, query):
return self._in_proj(query).chunk(3, dim=-1)
def in_proj_kv(self, key):
return self._in_proj(key, start=self.embed_dim).chunk(2, dim=-1)
def in_proj_q(self, query, **kwargs):
return self._in_proj(query, end=self.embed_dim, **kwargs)
def in_proj_k(self, key):
return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim)
def in_proj_v(self, value):
return self._in_proj(value, start=2 * self.embed_dim)
def _in_proj(self, input, start=0, end=None, **kwargs):
weight = kwargs.get('weight', self.in_proj_weight)
bias = kwargs.get('bias', self.in_proj_bias)
weight = weight[start:end, :]
if bias is not None:
bias = bias[start:end]
return F.linear(input, weight, bias)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4,
4, 4, 4])]
def get_init_inputs():
return [[], {'embed_dim': 4, 'num_heads': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn.functional as F
import torch.nn as nn
from torch.nn.parameter import Parameter
from torch.nn import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + 16 * x0), tmp11, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_div_sum_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = xindex // 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 256 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (128 + x0 + 256 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (192 + x0 + 256 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 4),
reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 16), alpha=1,
beta=1, out=buf1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 8),
reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha=1,
beta=1, out=buf2)
del primals_4
buf3 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_mul_0[grid(64)](buf3, primals_5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((16, 4, 16), (64, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (1, 16, 0),
0), reinterpret_tensor(buf1, (16, 1, 16), (1, 1, 16), 0), out=buf4)
buf7 = empty_strided_cuda((16, 4, 16), (64, 16, 1), torch.float32)
triton_per_fused__softmax_1[grid(64)](buf4, buf7, 64, 16, XBLOCK=32,
num_warps=4, num_stages=1)
del buf4
buf8 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf7, reinterpret_tensor(buf2, (16, 16, 1), (1,
16, 1), 0), out=buf8)
buf9 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32)
triton_poi_fused_clone_2[grid(4, 16)](buf8, buf9, 4, 16, XBLOCK=16,
YBLOCK=4, num_warps=1, num_stages=1)
buf10 = reinterpret_tensor(buf8, (16, 4), (4, 1), 0)
del buf8
extern_kernels.addmm(primals_7, reinterpret_tensor(buf9, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf10)
del primals_7
buf11 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
triton_poi_fused_div_sum_3[grid(256)](buf7, buf11, 256, XBLOCK=128,
num_warps=4, num_stages=1)
return reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0
), buf11, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf7, reinterpret_tensor(buf9, (16, 4), (4, 1), 0
), primals_6, reinterpret_tensor(buf2, (16, 1, 16), (1, 1, 16), 0
), reinterpret_tensor(buf3, (16, 1, 4), (1, 1, 16), 0
), reinterpret_tensor(buf1, (16, 16, 1), (1, 16, 1), 0)
class MultiheadAttentionNew(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, embed_dim, num_heads, attn_dropout=0.0, bias=True,
add_bias_kv=False, add_zero_attn=False):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.attn_dropout = attn_dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads'
self.scaling = self.head_dim ** -0.5
self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim))
self.register_parameter('in_proj_bias', None)
if bias:
self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim))
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.in_proj_weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.in_proj_bias is not None:
nn.init.constant_(self.in_proj_bias, 0.0)
nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def in_proj_qkv(self, query):
return self._in_proj(query).chunk(3, dim=-1)
def in_proj_kv(self, key):
return self._in_proj(key, start=self.embed_dim).chunk(2, dim=-1)
def in_proj_q(self, query, **kwargs):
return self._in_proj(query, end=self.embed_dim, **kwargs)
def in_proj_k(self, key):
return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim)
def in_proj_v(self, value):
return self._in_proj(value, start=2 * self.embed_dim)
def _in_proj(self, input, start=0, end=None, **kwargs):
weight = kwargs.get('weight', self.in_proj_weight)
bias = kwargs.get('bias', self.in_proj_bias)
weight = weight[start:end, :]
if bias is not None:
bias = bias[start:end]
return F.linear(input, weight, bias)
def forward(self, input_0, input_1, input_2):
primals_4 = self.in_proj_weight
primals_5 = self.in_proj_bias
primals_6 = self.out_proj.weight
primals_7 = self.out_proj.bias
primals_1 = input_0
primals_2 = input_1
primals_3 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0], output[1]
|
Columbine21/TFR-Net
|
MultiheadAttention
| false
| 17,135
|
[
"MIT"
] | 7
|
1da01577542e7f477fdf7323ec0696aebc632357
|
https://github.com/Columbine21/TFR-Net/tree/1da01577542e7f477fdf7323ec0696aebc632357
|
L2Norm
|
import torch
import torch.nn as nn
from math import sqrt as sqrt
from itertools import product as product
import torch.nn.init as init
class L2Norm(nn.Module):
def __init__(self, n_channels, scale):
super(L2Norm, self).__init__()
self.n_channels = n_channels
self.gamma = scale or None
self.eps = 1e-10
self.weight = nn.Parameter(torch.Tensor(self.n_channels))
self.reset_parameters()
def reset_parameters(self):
init.constant_(self.weight, self.gamma)
def forward(self, x):
norm = x.pow(2).sum(dim=1, keepdim=True).sqrt() + self.eps
x = torch.div(x, norm)
out = self.weight.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand_as(x
) * x
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_channels': 4, 'scale': 1.0}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from math import sqrt as sqrt
from itertools import product as product
import torch.nn.init as init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mul_pow_sqrt_sum_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 4
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp2 * tmp2
tmp5 = tmp4 * tmp4
tmp6 = tmp3 + tmp5
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = libdevice.sqrt(tmp12)
tmp14 = 1e-10
tmp15 = tmp13 + tmp14
tmp16 = tmp1 / tmp15
tmp17 = tmp0 * tmp16
tl.store(out_ptr0 + x3, tmp17, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mul_pow_sqrt_sum_0[grid(256)](primals_2,
primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return buf0, primals_1
class L2NormNew(nn.Module):
def __init__(self, n_channels, scale):
super(L2NormNew, self).__init__()
self.n_channels = n_channels
self.gamma = scale or None
self.eps = 1e-10
self.weight = nn.Parameter(torch.Tensor(self.n_channels))
self.reset_parameters()
def reset_parameters(self):
init.constant_(self.weight, self.gamma)
def forward(self, input_0):
primals_2 = self.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
Coral-SH/TextBoxes_PyTorch
|
L2Norm
| false
| 17,136
|
[
"MIT"
] | 8
|
fb1636139d69e762b567a234c3a4b69e3dd43071
|
https://github.com/Coral-SH/TextBoxes_PyTorch/tree/fb1636139d69e762b567a234c3a4b69e3dd43071
|
Ternary
|
import torch
from torch import nn
class Ternary(nn.Module):
"""
Ternarize the input activations to -1, 0, 1.
"""
def __init__(self, left=-0.25, right=0.25):
super().__init__()
self.left = left
self.right = right
def forward(self, input):
input = input.clone()
left_index = input.lt(self.left)
right_index = input.ge(self.right)
input[left_index] = -1
input[right_index] = 1
input[~(left_index | right_index)] = 0
return input
def backward(self, grad_output):
grad_input = grad_output.clone()
return grad_input
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_index_put_lift_fresh_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = -0.25
tmp2 = tmp0 < tmp1
tmp3 = -1.0
tmp4 = tl.where(tmp2, tmp3, tmp0)
tmp5 = 0.25
tmp6 = tmp0 >= tmp5
tmp7 = 1.0
tmp8 = tl.where(tmp6, tmp7, tmp4)
tmp9 = tmp2 | tmp6
tmp10 = tmp9 == 0
tmp11 = 0.0
tmp12 = tl.where(tmp10, tmp11, tmp8)
tl.store(in_out_ptr0 + x0, tmp12, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = buf0
del buf0
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_index_put_lift_fresh_0[grid(256)](buf2, arg0_1,
256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf2,
class TernaryNew(nn.Module):
"""
Ternarize the input activations to -1, 0, 1.
"""
def __init__(self, left=-0.25, right=0.25):
super().__init__()
self.left = left
self.right = right
def backward(self, grad_output):
grad_input = grad_output.clone()
return grad_input
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
C-SUNSHINE/TOQ-Nets-PyTorch-Release
|
Ternary
| false
| 17,137
|
[
"MIT"
] | 6
|
05e06bf633fb3c6b610dda9a5126ecd7af1db02f
|
https://github.com/C-SUNSHINE/TOQ-Nets-PyTorch-Release/tree/05e06bf633fb3c6b610dda9a5126ecd7af1db02f
|
NonLocalLayer
|
import torch
from torch import nn
class NonLocalLayer(nn.Module):
def __init__(self, input_dim, output_dim, hidden_dim=None,
t_kernel_size=1, t_stride=1, t_padding=None, t_dilation=1, bias=
True, residual=True):
super().__init__()
if t_padding is None:
t_padding = (t_kernel_size - 1) // 2
self.input_dim = input_dim
self.output_dim = output_dim
hidden_dim = hidden_dim if hidden_dim is not None else (input_dim +
output_dim) // 2
self.hidden_dim = hidden_dim
self.theta = nn.Linear(hidden_dim, hidden_dim)
self.phi = nn.Linear(hidden_dim, hidden_dim)
self.g = nn.Linear(hidden_dim, hidden_dim)
self.f = nn.Linear(hidden_dim, output_dim)
self.conv = nn.Conv2d(input_dim, hidden_dim, kernel_size=(
t_kernel_size, 1), padding=(t_padding, 0), stride=(t_stride, 1),
dilation=(t_dilation, 1), bias=bias)
if not residual:
self.residual = lambda x: 0
elif input_dim == output_dim and t_stride == 1:
self.residual = lambda x: x
else:
self.residual = nn.Sequential(nn.Conv2d(input_dim, output_dim,
kernel_size=1, stride=(t_stride, 1)))
self.relu = nn.ReLU()
def forward(self, x):
"""
:param x: [batch, length, n_agents, input_dim]
return output[batch, length, n_agents, output_dim]
"""
batch, length, n_agents, _ = x.size()
x = x.permute(0, 3, 1, 2)
res = self.residual(x)
if not isinstance(res, int):
res = res.permute(0, 2, 3, 1)
x = self.conv(x).permute(0, 2, 3, 1)
length = x.size(1)
x = x.reshape(batch, length * n_agents, self.hidden_dim)
theta = self.theta(x.reshape(-1, self.hidden_dim)).reshape(batch,
length * n_agents, -1)
phi = self.phi(x.reshape(-1, self.hidden_dim)).reshape(batch,
length * n_agents, -1).permute(0, 2, 1)
g = self.g(x.reshape(-1, self.hidden_dim)).reshape(batch, length *
n_agents, -1)
y = (torch.bmm(theta, phi) / theta.size(-1) ** 0.5).softmax(dim=2)
z = torch.bmm(y, g) / y.size(-1) ** 0.5
z = self.f(z.view(batch, length * n_agents, -1)).reshape(batch,
length, n_agents, self.output_dim)
return z + res
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'output_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, float('-inf'))
tmp6 = triton_helpers.max2(tmp5, 1)[:, None]
tmp7 = tmp2 - tmp6
tmp8 = 0.5
tmp9 = tmp7 * tmp8
tmp10 = tl_math.exp(tmp9)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tmp15 = tmp10 / tmp14
tl.store(out_ptr2 + (r1 + 16 * x0), tmp15, xmask)
@triton.jit
def triton_poi_fused_div_2(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = 0.25
tmp2 = tmp0 * tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4,
4, 4, 4), (64, 1, 16, 4), 0), primals_2, stride=(1, 1), padding
=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0,
0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 1, 16, 4))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(256)](buf1, primals_3, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_7
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_9
buf5 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf2, (4, 16, 4), (64, 4, 1),
0), reinterpret_tensor(buf3, (4, 4, 16), (64, 1, 4), 0), out=buf5)
buf8 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
triton_per_fused__softmax_1[grid(64)](buf5, buf8, 64, 16, XBLOCK=32,
num_warps=4, num_stages=1)
del buf5
buf9 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
extern_kernels.bmm(buf8, reinterpret_tensor(buf4, (4, 16, 4), (64,
4, 1), 0), out=buf9)
buf10 = buf9
del buf9
triton_poi_fused_div_2[grid(256)](buf10, 256, XBLOCK=256, num_warps
=4, num_stages=1)
buf11 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf10, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf11)
buf12 = reinterpret_tensor(buf11, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf11
triton_poi_fused_add_3[grid(256)](buf12, primals_11, primals_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
return buf12, primals_2, reinterpret_tensor(primals_1, (4, 4, 4, 4), (
64, 1, 16, 4), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0
), buf8, reinterpret_tensor(buf10, (64, 4), (4, 1), 0
), primals_10, reinterpret_tensor(buf4, (4, 4, 16), (64, 1, 4), 0
), reinterpret_tensor(buf2, (4, 4, 16), (64, 1, 4), 0
), reinterpret_tensor(buf3, (4, 16, 4), (64, 4, 1), 0
), primals_8, primals_6, primals_4
class NonLocalLayerNew(nn.Module):
def __init__(self, input_dim, output_dim, hidden_dim=None,
t_kernel_size=1, t_stride=1, t_padding=None, t_dilation=1, bias=
True, residual=True):
super().__init__()
if t_padding is None:
t_padding = (t_kernel_size - 1) // 2
self.input_dim = input_dim
self.output_dim = output_dim
hidden_dim = hidden_dim if hidden_dim is not None else (input_dim +
output_dim) // 2
self.hidden_dim = hidden_dim
self.theta = nn.Linear(hidden_dim, hidden_dim)
self.phi = nn.Linear(hidden_dim, hidden_dim)
self.g = nn.Linear(hidden_dim, hidden_dim)
self.f = nn.Linear(hidden_dim, output_dim)
self.conv = nn.Conv2d(input_dim, hidden_dim, kernel_size=(
t_kernel_size, 1), padding=(t_padding, 0), stride=(t_stride, 1),
dilation=(t_dilation, 1), bias=bias)
if not residual:
self.residual = lambda x: 0
elif input_dim == output_dim and t_stride == 1:
self.residual = lambda x: x
else:
self.residual = nn.Sequential(nn.Conv2d(input_dim, output_dim,
kernel_size=1, stride=(t_stride, 1)))
self.relu = nn.ReLU()
def forward(self, input_0):
primals_4 = self.theta.weight
primals_3 = self.theta.bias
primals_6 = self.phi.weight
primals_5 = self.phi.bias
primals_8 = self.g.weight
primals_7 = self.g.bias
primals_10 = self.f.weight
primals_9 = self.f.bias
primals_2 = self.conv.weight
primals_11 = self.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
C-SUNSHINE/TOQ-Nets-PyTorch-Release
|
NonLocalLayer
| false
| 17,138
|
[
"MIT"
] | 6
|
05e06bf633fb3c6b610dda9a5126ecd7af1db02f
|
https://github.com/C-SUNSHINE/TOQ-Nets-PyTorch-Release/tree/05e06bf633fb3c6b610dda9a5126ecd7af1db02f
|
Abs
|
import torch
import torch.utils.data
class Abs(torch.nn.Module):
def __init__(self):
super(Abs, self).__init__()
def forward(self, input):
return torch.abs(input)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_abs_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl_math.abs(tmp0)
tl.store(out_ptr0 + x0, tmp1, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_abs_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class AbsNew(torch.nn.Module):
def __init__(self):
super(AbsNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
CoraJung/flexible-input-slu
|
Abs
| false
| 17,139
|
[
"Apache-2.0"
] | 7
|
6a1a6bf105f1a0c07e8d483aa6da1df7a554392d
|
https://github.com/CoraJung/flexible-input-slu/tree/6a1a6bf105f1a0c07e8d483aa6da1df7a554392d
|
AvgPoolPad
|
import torch
import torch.utils.data
import torch.nn as nn
import torch.backends.cudnn
class AvgPoolPad(nn.Module):
def __init__(self, stride=2, padding=1):
super(AvgPoolPad, self).__init__()
self.pad = nn.ZeroPad2d((1, 0, 1, 0))
self.pool = nn.AvgPool2d(3, stride=stride, padding=padding,
count_include_pad=False)
def forward(self, x):
x = self.pad(x)
x = self.pool(x)
x = x[:, :, 1:, 1:]
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
import torch.nn as nn
import torch.backends.cudnn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_avg_pool2d_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 3 % 3
x0 = xindex % 3
x2 = xindex // 9
x4 = xindex
tmp0 = -1 + 2 * x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 5, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + 2 * x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = -2 + 2 * x1
tmp12 = tmp11 >= tmp1
tmp13 = -2 + 2 * x0
tmp14 = tmp13 >= tmp1
tmp15 = tmp12 & tmp14
tmp16 = tmp15 & tmp10
tmp17 = tl.load(in_ptr0 + (-10 + 2 * x0 + 8 * x1 + 16 * x2), tmp16 &
xmask, eviction_policy='evict_last', other=0.0)
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp10, tmp17, tmp18)
tmp20 = 2 * x0
tmp21 = tmp20 >= tmp1
tmp22 = tmp20 < tmp3
tmp23 = tmp21 & tmp22
tmp24 = tmp5 & tmp23
tmp25 = tmp12 & tmp7
tmp26 = tmp25 & tmp24
tmp27 = tl.load(in_ptr0 + (-9 + 2 * x0 + 8 * x1 + 16 * x2), tmp26 &
xmask, eviction_policy='evict_last', other=0.0)
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp24, tmp27, tmp28)
tmp30 = tmp29 + tmp19
tmp31 = 1 + 2 * x0
tmp32 = tmp31 >= tmp1
tmp33 = tmp31 < tmp3
tmp34 = tmp32 & tmp33
tmp35 = tmp5 & tmp34
tmp36 = tmp12 & tmp21
tmp37 = tmp36 & tmp35
tmp38 = tl.load(in_ptr0 + (-8 + 2 * x0 + 8 * x1 + 16 * x2), tmp37 &
xmask, eviction_policy='evict_last', other=0.0)
tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype)
tmp40 = tl.where(tmp35, tmp38, tmp39)
tmp41 = tmp40 + tmp30
tmp42 = 2 * x1
tmp43 = tmp42 >= tmp1
tmp44 = tmp42 < tmp3
tmp45 = tmp43 & tmp44
tmp46 = tmp45 & tmp9
tmp47 = tmp2 & tmp14
tmp48 = tmp47 & tmp46
tmp49 = tl.load(in_ptr0 + (-6 + 2 * x0 + 8 * x1 + 16 * x2), tmp48 &
xmask, eviction_policy='evict_last', other=0.0)
tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype)
tmp51 = tl.where(tmp46, tmp49, tmp50)
tmp52 = tmp51 + tmp41
tmp53 = tmp45 & tmp23
tmp54 = tmp2 & tmp7
tmp55 = tmp54 & tmp53
tmp56 = tl.load(in_ptr0 + (-5 + 2 * x0 + 8 * x1 + 16 * x2), tmp55 &
xmask, eviction_policy='evict_last', other=0.0)
tmp57 = tl.full(tmp56.shape, 0.0, tmp56.dtype)
tmp58 = tl.where(tmp53, tmp56, tmp57)
tmp59 = tmp58 + tmp52
tmp60 = tmp45 & tmp34
tmp61 = tmp2 & tmp21
tmp62 = tmp61 & tmp60
tmp63 = tl.load(in_ptr0 + (-4 + 2 * x0 + 8 * x1 + 16 * x2), tmp62 &
xmask, eviction_policy='evict_last', other=0.0)
tmp64 = tl.full(tmp63.shape, 0.0, tmp63.dtype)
tmp65 = tl.where(tmp60, tmp63, tmp64)
tmp66 = tmp65 + tmp59
tmp67 = 1 + 2 * x1
tmp68 = tmp67 >= tmp1
tmp69 = tmp67 < tmp3
tmp70 = tmp68 & tmp69
tmp71 = tmp70 & tmp9
tmp72 = tmp43 & tmp14
tmp73 = tmp72 & tmp71
tmp74 = tl.load(in_ptr0 + (-2 + 2 * x0 + 8 * x1 + 16 * x2), tmp73 &
xmask, eviction_policy='evict_last', other=0.0)
tmp75 = tl.full(tmp74.shape, 0.0, tmp74.dtype)
tmp76 = tl.where(tmp71, tmp74, tmp75)
tmp77 = tmp76 + tmp66
tmp78 = tmp70 & tmp23
tmp79 = tmp43 & tmp7
tmp80 = tmp79 & tmp78
tmp81 = tl.load(in_ptr0 + (-1 + 2 * x0 + 8 * x1 + 16 * x2), tmp80 &
xmask, eviction_policy='evict_last', other=0.0)
tmp82 = tl.full(tmp81.shape, 0.0, tmp81.dtype)
tmp83 = tl.where(tmp78, tmp81, tmp82)
tmp84 = tmp83 + tmp77
tmp85 = tmp70 & tmp34
tmp86 = tmp43 & tmp21
tmp87 = tmp86 & tmp85
tmp88 = tl.load(in_ptr0 + (2 * x0 + 8 * x1 + 16 * x2), tmp87 & xmask,
eviction_policy='evict_last', other=0.0)
tmp89 = tl.full(tmp88.shape, 0.0, tmp88.dtype)
tmp90 = tl.where(tmp85, tmp88, tmp89)
tmp91 = tmp90 + tmp84
tmp92 = (0 * (0 >= -1 + 2 * x0) + (-1 + 2 * x0) * (-1 + 2 * x0 > 0)) * (
0 * (0 >= -1 + 2 * x1) + (-1 + 2 * x1) * (-1 + 2 * x1 > 0)) + (5 *
(5 <= 2 + 2 * x0) + (2 + 2 * x0) * (2 + 2 * x0 < 5)) * (5 * (5 <= 2 +
2 * x1) + (2 + 2 * x1) * (2 + 2 * x1 < 5)) + -1 * (0 * (0 >= -1 + 2 *
x0) + (-1 + 2 * x0) * (-1 + 2 * x0 > 0)) * (5 * (5 <= 2 + 2 * x1) +
(2 + 2 * x1) * (2 + 2 * x1 < 5)) + -1 * (0 * (0 >= -1 + 2 * x1) + (
-1 + 2 * x1) * (-1 + 2 * x1 > 0)) * (5 * (5 <= 2 + 2 * x0) + (2 + 2 *
x0) * (2 + 2 * x0 < 5))
tmp93 = tmp91 / tmp92
tl.store(out_ptr0 + x4, tmp93, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_avg_pool2d_constant_pad_nd_0[grid(144)](arg0_1,
buf0, 144, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 4, 2, 2), (36, 9, 3, 1), 4),
class AvgPoolPadNew(nn.Module):
def __init__(self, stride=2, padding=1):
super(AvgPoolPadNew, self).__init__()
self.pad = nn.ZeroPad2d((1, 0, 1, 0))
self.pool = nn.AvgPool2d(3, stride=stride, padding=padding,
count_include_pad=False)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
CalebEverett/fastai-dl2
|
AvgPoolPad
| false
| 17,140
|
[
"Apache-2.0"
] | 4
|
64d23592eddca6ca1f3647e73c319e97c8eb392b
|
https://github.com/CalebEverett/fastai-dl2/tree/64d23592eddca6ca1f3647e73c319e97c8eb392b
|
GlobalAvgPool2d
|
import torch
import torch.nn as nn
import torch.utils.data
class GlobalAvgPool2d(nn.Module):
def __init__(self):
"""Global average pooling over the input's spatial dimensions"""
super(GlobalAvgPool2d, self).__init__()
def forward(self, inputs):
return nn.functional.adaptive_avg_pool2d(inputs, 1).view(inputs.
size(0), -1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, arg0_1, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del arg0_1
return reinterpret_tensor(buf1, (4, 4), (4, 1), 0),
class GlobalAvgPool2dNew(nn.Module):
def __init__(self):
"""Global average pooling over the input's spatial dimensions"""
super(GlobalAvgPool2dNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
BigFishMaster/tnt
|
GlobalAvgPool2d
| false
| 17,141
|
[
"BSD-3-Clause"
] | 3
|
8b80bb3b194eb87ac18924428ef0924c2fb263c5
|
https://github.com/BigFishMaster/tnt/tree/8b80bb3b194eb87ac18924428ef0924c2fb263c5
|
LayerNorm
|
import torch
import torch.utils.data
import torch.nn as nn
class LayerNorm(torch.nn.Module):
def __init__(self, dim, eps=1e-06):
super(LayerNorm, self).__init__()
self.gamma = nn.Parameter(torch.ones(dim))
self.beta = nn.Parameter(torch.zeros(dim))
self.eps = eps
def forward(self, x):
mean = x.mean(1, keepdim=True)
std = x.std(1, keepdim=True)
return self.gamma * (x - mean) / (std + self.eps) + self.beta
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.utils.data
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x4 = xindex
x3 = xindex // 64
x5 = xindex % 16
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x4, xmask)
tmp2 = tl.load(in_ptr1 + (x5 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr1 + (16 + x5 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr1 + (32 + x5 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr1 + (48 + x5 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp0 * tmp11
tmp13 = tmp2 - tmp10
tmp14 = tmp13 * tmp13
tmp15 = tmp3 - tmp10
tmp16 = tmp15 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = tmp5 - tmp10
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp21 = tmp7 - tmp10
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp24 = 3.0
tmp25 = tmp23 / tmp24
tmp26 = libdevice.sqrt(tmp25)
tmp27 = 1e-06
tmp28 = tmp26 + tmp27
tmp29 = tmp12 / tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + x4, tmp31, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_std_sub_0[grid(256)](primals_2,
primals_1, primals_3, buf0, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_2
del primals_3
return buf0, primals_1
class LayerNormNew(torch.nn.Module):
def __init__(self, dim, eps=1e-06):
super(LayerNormNew, self).__init__()
self.gamma = nn.Parameter(torch.ones(dim))
self.beta = nn.Parameter(torch.zeros(dim))
self.eps = eps
def forward(self, input_0):
primals_2 = self.gamma
primals_3 = self.beta
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
CoraJung/flexible-input-slu
|
LayerNorm
| false
| 17,142
|
[
"Apache-2.0"
] | 7
|
6a1a6bf105f1a0c07e8d483aa6da1df7a554392d
|
https://github.com/CoraJung/flexible-input-slu/tree/6a1a6bf105f1a0c07e8d483aa6da1df7a554392d
|
WithBall
|
import math
import torch
from torch import nn
def apply_last_dim(model, x):
size = list(x.size())
y = model(x.contiguous().view(-1, size[-1]))
size[-1] = y.size(-1)
y = y.view(torch.Size(size))
return y
def get_int_dim_index(name):
if isinstance(name, int):
return name
name_list = 'axyz'
assert name in name_list
return [i for i in range(len(name_list)) if name_list[i] == name][0] - 1
class MinPoolTrinary(nn.Module):
def __init__(self):
super().__init__()
def new_length(self, length):
return length
def forward(self, states):
"""
:param states: [batch, length, *]
"""
assert states.size(1) >= 3
side_length = (states.size(1) + 1) // 3
return torch.cat([torch.min(states[:, :side_length], dim=1, keepdim
=True)[0], torch.min(states[:, side_length:-side_length], dim=1,
keepdim=True)[0], torch.min(states[:, -side_length:], dim=1,
keepdim=True)[0]], dim=1)
def show(self, name='MinPoolTrinary', indent=0, log=print, **kwargs):
log(' ' * indent + '- %s(x) = MinPoolTrinary()' % (name,))
class Length(nn.Module):
def __init__(self, dim_index=-1):
super().__init__()
self.dim_index = dim_index
def forward(self, states, dim_index=None):
if dim_index is None:
dim_index = self.dim_index
if isinstance(dim_index, int):
dim_index = [dim_index]
else:
dim_index = [get_int_dim_index(x) for x in dim_index]
if -1 in dim_index:
def extractor(x):
return torch.sqrt(torch.sum(x * x, dim=1, keepdim=True))
else:
def extractor(x):
return torch.sqrt(torch.sum(x[:, dim_index].pow(2), dim=1,
keepdim=True))
return apply_last_dim(extractor, states)
def show(self, name='Length', indent=0, log=print, **kwargs):
log(' ' * indent + "- %s(x) = |x's dim %s|" % (name, str(self.
dim_index)))
class Distance(nn.Module):
def __init__(self, dim_index=-1):
super().__init__()
self.dim_index = dim_index
self.length = Length(dim_index)
def forward(self, states1, states2, dim_index=None):
return self.length(states1 - states2, dim_index)
def show(self, name='Distance', indent=0, log=print, **kwargs):
log(' ' * indent + '- %s(x1, x2) = |x1 - x2|' % name)
class Position(nn.Module):
def __init__(self, position_extractor=lambda x: x):
super().__init__()
self.position_extractor = position_extractor
def forward(self, states):
"""
:param states: [batch, length, n_agents, state_dim]
"""
return apply_last_dim(self.position_extractor, states)
def show(self, name='Position', indent=0, log=print, **kwargs):
log(' ' * indent + "- %s(x) = x's first three dims" % name)
class SoftCompare(nn.Module):
def __init__(self, alpha=None, beta=None):
super().__init__()
self.alpha = nn.Parameter(torch.ones(1) * (0 if alpha is None else
alpha), requires_grad=True)
self.beta = nn.Parameter(torch.ones(1) * (0 if beta is None else
beta), requires_grad=True)
if alpha is None:
nn.init.normal_(self.alpha.data, 0, 1)
else:
self.alpha.requires_grad_(False)
if beta is not None:
self.beta.requires_grad_(False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
raise NotImplementedError
class SoftSmall(SoftCompare):
"""
Sigmoid((alpha - x) / e^beta)
"""
def __init__(self, alpha=None, beta=None):
super().__init__(alpha, beta)
def forward(self, x, beta=None):
alpha = self.alpha
if beta is None:
beta = self.beta
return self.sigmoid((alpha - x) / torch.exp(beta))
def show(self, name='SoftSmall', indent=0, log=print, **kwargs):
alpha = kwargs['alpha'] if 'alpha' in kwargs else self.alpha
beta = kwargs['beta'] if 'beta' in kwargs else self.beta
log(' ' * indent + '- %s(x) = Sigmoid((%lf - x) / %lf)' % (name,
alpha, math.exp(beta)))
class WithBall(nn.Module):
def __init__(self, alpha=None, beta=None, trinary=False):
super().__init__()
self.position = Position()
self.trinary = trinary
self.distance = Distance()
if trinary:
self.min_pool_trinary = MinPoolTrinary()
self.small = SoftSmall(alpha, beta)
def new_length(self, length):
return 3 if self.trinary else length
def get_descriptions(self, n_agents, length):
n_players = n_agents // 2
agent_name = ['ball'] + [('A' + str(i)) for i in range(1, n_players +
1)] + [('B' + str(i)) for i in range(1, n_players + 1)]
res = []
if self.trinary:
trinary_name = ['pre', 'act', 'eff']
for i in range(3):
for p in range(1, n_agents):
res.append('WithBall(%s, %s)' % (agent_name[p],
trinary_name[i]))
else:
new_length = self.new_length(length)
for i in range(0, new_length):
for p in range(1, n_agents):
res.append('WithBall(%s, %.2f)' % (agent_name[p], (i +
0.5) / new_length))
return res
def forward(self, states, beta=None):
"""
:param states: [batch, length, n_agents, state_dim]
return [batch, length, n_agents - 1, 1]
"""
_batch, _length, _n_agents, _state_dim = states.size()
ball_pos = self.position(states[:, :, :1])
player_pos = self.position(states[:, :, 1:])
dists = self.distance(player_pos, ball_pos)
if self.trinary:
dists = self.min_pool_trinary(dists)
small = self.small(dists, beta=beta)
return small
def show(self, name='WithBall', indent=0, log=print, **kwargs):
log(' ' * indent + '- %s(p) = small(distance(p, ball))' % name)
self.distance.show('distance', indent + 2, **kwargs)
self.small.show('small', indent + 2, log=log, **kwargs)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_exp_sigmoid_sub_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x1 = xindex // 3 % 4
x2 = xindex // 12
x3 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + (4 + 4 * x0 + 16 * x1 + 64 * x2 + 128 * ((x0 +
3 * x1) // 12)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (16 * x1 + 64 * x2 + 64 * ((x0 + 3 * x1) // 12
)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (5 + 4 * x0 + 16 * x1 + 64 * x2 + 128 * ((x0 +
3 * x1) // 12)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 16 * x1 + 64 * x2 + 64 * ((x0 + 3 * x1) //
12)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr1 + (6 + 4 * x0 + 16 * x1 + 64 * x2 + 128 * ((x0 +
3 * x1) // 12)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (2 + 16 * x1 + 64 * x2 + 64 * ((x0 + 3 * x1) //
12)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr1 + (7 + 4 * x0 + 16 * x1 + 64 * x2 + 128 * ((x0 +
3 * x1) // 12)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr1 + (3 + 16 * x1 + 64 * x2 + 64 * ((x0 + 3 * x1) //
12)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr2 + 0)
tmp24 = tl.broadcast_to(tmp23, [XBLOCK])
tmp4 = tmp2 - tmp3
tmp5 = tmp4 * tmp4
tmp8 = tmp6 - tmp7
tmp9 = tmp8 * tmp8
tmp10 = tmp5 + tmp9
tmp13 = tmp11 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tmp10 + tmp14
tmp18 = tmp16 - tmp17
tmp19 = tmp18 * tmp18
tmp20 = tmp15 + tmp19
tmp21 = libdevice.sqrt(tmp20)
tmp22 = tmp1 - tmp21
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp22 / tmp25
tmp27 = tl.sigmoid(tmp26)
tmp28 = tmp26 / tmp25
tl.store(out_ptr1 + x3, tmp27, xmask)
tl.store(out_ptr2 + x3, tmp28, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 3, 1), (12, 3, 1, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4, 3, 1), (12, 3, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_exp_sigmoid_sub_0[grid(48)](primals_2,
primals_1, primals_3, buf1, buf2, 48, XBLOCK=64, num_warps=1,
num_stages=1)
del primals_1
del primals_2
return buf1, primals_3, buf1, buf2
def apply_last_dim(model, x):
size = list(x.size())
y = model(x.contiguous().view(-1, size[-1]))
size[-1] = y.size(-1)
y = y.view(torch.Size(size))
return y
def get_int_dim_index(name):
if isinstance(name, int):
return name
name_list = 'axyz'
assert name in name_list
return [i for i in range(len(name_list)) if name_list[i] == name][0] - 1
class MinPoolTrinary(nn.Module):
def __init__(self):
super().__init__()
def new_length(self, length):
return length
def forward(self, states):
"""
:param states: [batch, length, *]
"""
assert states.size(1) >= 3
side_length = (states.size(1) + 1) // 3
return torch.cat([torch.min(states[:, :side_length], dim=1, keepdim
=True)[0], torch.min(states[:, side_length:-side_length], dim=1,
keepdim=True)[0], torch.min(states[:, -side_length:], dim=1,
keepdim=True)[0]], dim=1)
def show(self, name='MinPoolTrinary', indent=0, log=print, **kwargs):
log(' ' * indent + '- %s(x) = MinPoolTrinary()' % (name,))
class Length(nn.Module):
def __init__(self, dim_index=-1):
super().__init__()
self.dim_index = dim_index
def forward(self, states, dim_index=None):
if dim_index is None:
dim_index = self.dim_index
if isinstance(dim_index, int):
dim_index = [dim_index]
else:
dim_index = [get_int_dim_index(x) for x in dim_index]
if -1 in dim_index:
def extractor(x):
return torch.sqrt(torch.sum(x * x, dim=1, keepdim=True))
else:
def extractor(x):
return torch.sqrt(torch.sum(x[:, dim_index].pow(2), dim=1,
keepdim=True))
return apply_last_dim(extractor, states)
def show(self, name='Length', indent=0, log=print, **kwargs):
log(' ' * indent + "- %s(x) = |x's dim %s|" % (name, str(self.
dim_index)))
class Distance(nn.Module):
def __init__(self, dim_index=-1):
super().__init__()
self.dim_index = dim_index
self.length = Length(dim_index)
def forward(self, states1, states2, dim_index=None):
return self.length(states1 - states2, dim_index)
def show(self, name='Distance', indent=0, log=print, **kwargs):
log(' ' * indent + '- %s(x1, x2) = |x1 - x2|' % name)
class Position(nn.Module):
def __init__(self, position_extractor=lambda x: x):
super().__init__()
self.position_extractor = position_extractor
def forward(self, states):
"""
:param states: [batch, length, n_agents, state_dim]
"""
return apply_last_dim(self.position_extractor, states)
def show(self, name='Position', indent=0, log=print, **kwargs):
log(' ' * indent + "- %s(x) = x's first three dims" % name)
class SoftCompare(nn.Module):
def __init__(self, alpha=None, beta=None):
super().__init__()
self.alpha = nn.Parameter(torch.ones(1) * (0 if alpha is None else
alpha), requires_grad=True)
self.beta = nn.Parameter(torch.ones(1) * (0 if beta is None else
beta), requires_grad=True)
if alpha is None:
nn.init.normal_(self.alpha.data, 0, 1)
else:
self.alpha.requires_grad_(False)
if beta is not None:
self.beta.requires_grad_(False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
raise NotImplementedError
class SoftSmall(SoftCompare):
"""
Sigmoid((alpha - x) / e^beta)
"""
def __init__(self, alpha=None, beta=None):
super().__init__(alpha, beta)
def forward(self, x, beta=None):
alpha = self.alpha
if beta is None:
beta = self.beta
return self.sigmoid((alpha - x) / torch.exp(beta))
def show(self, name='SoftSmall', indent=0, log=print, **kwargs):
alpha = kwargs['alpha'] if 'alpha' in kwargs else self.alpha
beta = kwargs['beta'] if 'beta' in kwargs else self.beta
log(' ' * indent + '- %s(x) = Sigmoid((%lf - x) / %lf)' % (name,
alpha, math.exp(beta)))
class WithBallNew(nn.Module):
def __init__(self, alpha=None, beta=None, trinary=False):
super().__init__()
self.position = Position()
self.trinary = trinary
self.distance = Distance()
if trinary:
self.min_pool_trinary = MinPoolTrinary()
self.small = SoftSmall(alpha, beta)
def new_length(self, length):
return 3 if self.trinary else length
def get_descriptions(self, n_agents, length):
n_players = n_agents // 2
agent_name = ['ball'] + [('A' + str(i)) for i in range(1, n_players +
1)] + [('B' + str(i)) for i in range(1, n_players + 1)]
res = []
if self.trinary:
trinary_name = ['pre', 'act', 'eff']
for i in range(3):
for p in range(1, n_agents):
res.append('WithBall(%s, %s)' % (agent_name[p],
trinary_name[i]))
else:
new_length = self.new_length(length)
for i in range(0, new_length):
for p in range(1, n_agents):
res.append('WithBall(%s, %.2f)' % (agent_name[p], (i +
0.5) / new_length))
return res
def show(self, name='WithBall', indent=0, log=print, **kwargs):
log(' ' * indent + '- %s(p) = small(distance(p, ball))' % name)
self.distance.show('distance', indent + 2, **kwargs)
self.small.show('small', indent + 2, log=log, **kwargs)
def forward(self, input_0):
primals_2 = self.small.alpha
primals_3 = self.small.beta
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
C-SUNSHINE/TOQ-Nets-PyTorch-Release
|
WithBall
| false
| 17,143
|
[
"MIT"
] | 6
|
05e06bf633fb3c6b610dda9a5126ecd7af1db02f
|
https://github.com/C-SUNSHINE/TOQ-Nets-PyTorch-Release/tree/05e06bf633fb3c6b610dda9a5126ecd7af1db02f
|
SoftLarge
|
import math
import torch
from torch import nn
class SoftCompare(nn.Module):
def __init__(self, alpha=None, beta=None):
super().__init__()
self.alpha = nn.Parameter(torch.ones(1) * (0 if alpha is None else
alpha), requires_grad=True)
self.beta = nn.Parameter(torch.ones(1) * (0 if beta is None else
beta), requires_grad=True)
if alpha is None:
nn.init.normal_(self.alpha.data, 0, 1)
else:
self.alpha.requires_grad_(False)
if beta is not None:
self.beta.requires_grad_(False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
raise NotImplementedError
class SoftLarge(SoftCompare):
"""
Sigmoid((x - alpha) / e^beta)
"""
def __init__(self, alpha=None, beta=None):
super().__init__(alpha, beta)
def forward(self, x, beta=None):
alpha = self.alpha
if beta is None:
beta = self.beta
return self.sigmoid((x - alpha) / torch.exp(beta))
def show(self, name='SoftLarge', indent=0, log=print, **kwargs):
alpha = kwargs['alpha'] if 'alpha' in kwargs else self.alpha
beta = kwargs['beta'] if 'beta' in kwargs else self.beta
log(' ' * indent + '- %s(x) = Sigmoid((x - %lf) / %lf)' % (name,
alpha, math.exp(beta)))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_exp_sigmoid_sub_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp3 = tmp0 - tmp2
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp3 / tmp6
tmp8 = tl.sigmoid(tmp7)
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1,), (1,))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_exp_sigmoid_sub_0[grid(256)](primals_3,
primals_1, primals_2, buf0, 256, XBLOCK=256, num_warps=4,
num_stages=1)
return buf0, primals_1, primals_2, primals_3, buf0
class SoftCompare(nn.Module):
def __init__(self, alpha=None, beta=None):
super().__init__()
self.alpha = nn.Parameter(torch.ones(1) * (0 if alpha is None else
alpha), requires_grad=True)
self.beta = nn.Parameter(torch.ones(1) * (0 if beta is None else
beta), requires_grad=True)
if alpha is None:
nn.init.normal_(self.alpha.data, 0, 1)
else:
self.alpha.requires_grad_(False)
if beta is not None:
self.beta.requires_grad_(False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
raise NotImplementedError
class SoftLargeNew(SoftCompare):
"""
Sigmoid((x - alpha) / e^beta)
"""
def __init__(self, alpha=None, beta=None):
super().__init__(alpha, beta)
def show(self, name='SoftLarge', indent=0, log=print, **kwargs):
alpha = kwargs['alpha'] if 'alpha' in kwargs else self.alpha
beta = kwargs['beta'] if 'beta' in kwargs else self.beta
log(' ' * indent + '- %s(x) = Sigmoid((x - %lf) / %lf)' % (name,
alpha, math.exp(beta)))
def forward(self, input_0):
primals_1 = self.alpha
primals_2 = self.beta
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
C-SUNSHINE/TOQ-Nets-PyTorch-Release
|
SoftLarge
| false
| 17,144
|
[
"MIT"
] | 6
|
05e06bf633fb3c6b610dda9a5126ecd7af1db02f
|
https://github.com/C-SUNSHINE/TOQ-Nets-PyTorch-Release/tree/05e06bf633fb3c6b610dda9a5126ecd7af1db02f
|
TernaryLinear
|
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import init
class Ternary(nn.Module):
"""
Ternarize the input activations to -1, 0, 1.
"""
def __init__(self, left=-0.25, right=0.25):
super().__init__()
self.left = left
self.right = right
def forward(self, input):
input = input.clone()
left_index = input.lt(self.left)
right_index = input.ge(self.right)
input[left_index] = -1
input[right_index] = 1
input[~(left_index | right_index)] = 0
return input
def backward(self, grad_output):
grad_input = grad_output.clone()
return grad_input
class TernaryLinear(nn.Module):
def __init__(self, in_features, out_features, ternarize_left=-0.25,
ternarize_right=0.25, bias=True):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.Tensor(out_features, in_features))
self.ternarize = Ternary(ternarize_left, ternarize_right)
if bias:
self.bias = nn.Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
init.normal_(self.weight.data, 0, 1)
def forward(self, input):
return F.linear(input, self.ternarize(self.weight), self.bias)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
from torch.nn import init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_bitwise_not_bitwise_or_ge_index_put_lift_fresh_lt_0(
in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = -0.25
tmp2 = tmp0 < tmp1
tmp3 = -1.0
tmp4 = tl.where(tmp2, tmp3, tmp0)
tmp5 = 0.25
tmp6 = tmp0 >= tmp5
tmp7 = 1.0
tmp8 = tl.where(tmp6, tmp7, tmp4)
tmp9 = tmp2 | tmp6
tmp10 = tmp9 == 0
tmp11 = 0.0
tmp12 = tl.where(tmp10, tmp11, tmp8)
tl.store(out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr1 + x0, tmp6, xmask)
tl.store(out_ptr2 + x0, tmp10, xmask)
tl.store(in_out_ptr0 + x0, tmp12, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
buf5 = buf3
del buf3
get_raw_stream(0)
triton_poi_fused_bitwise_not_bitwise_or_ge_index_put_lift_fresh_lt_0[
grid(16)](buf5, primals_1, buf0, buf1, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_1
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(buf5, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf6)
del buf5
del primals_2
return reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0
), buf0, buf1, buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0)
class Ternary(nn.Module):
"""
Ternarize the input activations to -1, 0, 1.
"""
def __init__(self, left=-0.25, right=0.25):
super().__init__()
self.left = left
self.right = right
def forward(self, input):
input = input.clone()
left_index = input.lt(self.left)
right_index = input.ge(self.right)
input[left_index] = -1
input[right_index] = 1
input[~(left_index | right_index)] = 0
return input
def backward(self, grad_output):
grad_input = grad_output.clone()
return grad_input
class TernaryLinearNew(nn.Module):
def __init__(self, in_features, out_features, ternarize_left=-0.25,
ternarize_right=0.25, bias=True):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.Tensor(out_features, in_features))
self.ternarize = Ternary(ternarize_left, ternarize_right)
if bias:
self.bias = nn.Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
init.normal_(self.weight.data, 0, 1)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
C-SUNSHINE/TOQ-Nets-PyTorch-Release
|
TernaryLinear
| false
| 17,145
|
[
"MIT"
] | 6
|
05e06bf633fb3c6b610dda9a5126ecd7af1db02f
|
https://github.com/C-SUNSHINE/TOQ-Nets-PyTorch-Release/tree/05e06bf633fb3c6b610dda9a5126ecd7af1db02f
|
FinalPool
|
import torch
import torch.utils.data
class FinalPool(torch.nn.Module):
def __init__(self):
super(FinalPool, self).__init__()
def forward(self, input):
"""
input : Tensor of shape (batch size, T, Cin)
Outputs a Tensor of shape (batch size, Cin).
"""
return input.max(dim=1)[0]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
class FinalPoolNew(torch.nn.Module):
def __init__(self):
super(FinalPoolNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
CoraJung/flexible-input-slu
|
FinalPool
| false
| 17,146
|
[
"Apache-2.0"
] | 7
|
6a1a6bf105f1a0c07e8d483aa6da1df7a554392d
|
https://github.com/CoraJung/flexible-input-slu/tree/6a1a6bf105f1a0c07e8d483aa6da1df7a554392d
|
BinaryPrimitivesPredefined_v2
|
import math
import torch
from torch import nn
def apply_last_dim(model, x):
size = list(x.size())
y = model(x.contiguous().view(-1, size[-1]))
size[-1] = y.size(-1)
y = y.view(torch.Size(size))
return y
def get_int_dim_index(name):
if isinstance(name, int):
return name
name_list = 'axyz'
assert name in name_list
return [i for i in range(len(name_list)) if name_list[i] == name][0] - 1
class Length(nn.Module):
def __init__(self, dim_index=-1):
super().__init__()
self.dim_index = dim_index
def forward(self, states, dim_index=None):
if dim_index is None:
dim_index = self.dim_index
if isinstance(dim_index, int):
dim_index = [dim_index]
else:
dim_index = [get_int_dim_index(x) for x in dim_index]
if -1 in dim_index:
def extractor(x):
return torch.sqrt(torch.sum(x * x, dim=1, keepdim=True))
else:
def extractor(x):
return torch.sqrt(torch.sum(x[:, dim_index].pow(2), dim=1,
keepdim=True))
return apply_last_dim(extractor, states)
def show(self, name='Length', indent=0, log=print, **kwargs):
log(' ' * indent + "- %s(x) = |x's dim %s|" % (name, str(self.
dim_index)))
class Distance(nn.Module):
def __init__(self, dim_index=-1):
super().__init__()
self.dim_index = dim_index
self.length = Length(dim_index)
def forward(self, states1, states2, dim_index=None):
return self.length(states1 - states2, dim_index)
def show(self, name='Distance', indent=0, log=print, **kwargs):
log(' ' * indent + '- %s(x1, x2) = |x1 - x2|' % name)
class Normalize(nn.Module):
def __init__(self, distribution=None, **kwargs):
super().__init__()
self.distribution = distribution
self.data_ = []
if distribution is None:
pass
elif distribution == 'normal':
mean = kwargs['mean'] if 'mean' in kwargs else 0
std = kwargs['std'] if 'std' in kwargs else 1
self.param = nn.Parameter(torch.Tensor([mean, std]), False)
elif distribution == 'uniform':
vmin = kwargs['minv'] if 'minv' in kwargs else 0
vmax = kwargs['maxv'] if 'maxv' in kwargs else 1
self.param = nn.Parameter(torch.Tensor([vmin, vmax]), False)
else:
raise NotImplementedError()
def forward(self, x, keep_data=False):
if keep_data:
self.data_.append(x.detach().cpu().view(-1))
return x
if self.distribution is None:
return x
elif self.distribution == 'normal':
mean = self.param[0]
std = self.param[1]
return (x - mean) / std
elif self.distribution == 'uniform':
vmin = self.param[0]
vmax = self.param[1]
return (x - vmin) / (vmax - vmin + 1e-05)
else:
raise NotImplementedError()
def reset_parameters(self, name=None):
assert len(self.data_) > 0
data = torch.cat(self.data_)
self.data_ = []
if self.distribution is None:
pass
elif self.distribution == 'normal':
with torch.no_grad():
self.param[0] = data.mean().item()
self.param[1] = data.std().item()
if name is not None:
None
elif self.distribution == 'uniform':
with torch.no_grad():
self.param[0] = data.min().item()
self.param[1] = data.max().item()
if name is not None:
None
else:
raise NotImplementedError()
def recover_threshold(self, x):
if self.distribution is None:
return x
elif self.distribution == 'normal':
return x * float(self.param[1]) + float(self.param[0])
elif self.distribution == 'uniform':
return x * float(self.param[1] - self.param[0] + 1e-05) + float(
self.param[0])
else:
raise NotImplementedError()
def init_thresholds(self, x):
if self.distribution is None:
nn.init.normal_(x, 0, 1)
elif self.distribution == 'normal':
nn.init.normal_(x, 0, 1)
elif self.distribution == 'uniform':
nn.init.uniform_(x, 0, 1)
else:
raise NotImplementedError()
class SoftCmp(nn.Module):
"""
Sigmoid((x - y) / e^beta)
"""
def __init__(self):
super().__init__()
self.sigmoid = nn.Sigmoid()
def forward(self, x, y, beta):
return self.sigmoid((x - y) / math.exp(beta))
class Inequality(nn.Module):
def __init__(self, out_dim=1, distribution=None, **kwargs):
super().__init__()
self.out_dim = out_dim
self.thresholds = nn.Parameter(torch.zeros(out_dim), requires_grad=True
)
self.distribution = distribution
self.normalize = Normalize(distribution)
self.cmp = SoftCmp()
self.normalize.init_thresholds(self.thresholds)
def forward(self, states, beta=0, **kwargs):
"""
:param states: [batch, length, n_agents, ... ]
"""
states_expand = states.view(*(states.size() + (1,)))
estimate_parameters = 'estimate_parameters' in kwargs and kwargs[
'estimate_parameters']
states_expand = self.normalize(states_expand, keep_data=
estimate_parameters)
return self.cmp(states_expand, self.thresholds.view(*([1] * len(
states.size()) + [self.out_dim])), beta)
def reset_parameters(self, parameter_name, name=None):
if parameter_name == 'primitive_inequality':
self.normalize.reset_parameters(name=name)
self.normalize.init_thresholds(self.thresholds)
def get_descriptions(self, name='Inequality'):
theta = self.thresholds.detach().cpu().view(self.out_dim)
descroptions = []
for k in range(theta.size(0)):
t = self.normalize.recover_threshold(theta[k])
if 'speed' in name:
t = t * 8
if 'acc' in name:
t = t * 64
descroptions.append('%s > %.2lf' % (name, t))
return descroptions
class N_aryPrimitivesPredefined(nn.Module):
def __init__(self):
super().__init__()
self.out_dim = 0
self.primitive_list = []
self.ineqs = nn.ModuleDict({})
def reset_parameters(self, parameter_name):
for k in self.primitive_list:
self.ineqs[k].reset_parameters(parameter_name, name=k)
def get_descriptions(self):
descriptions = []
for k in self.primitive_list:
descriptions += self.ineqs[k].get_descriptions(name=k)
return descriptions
class BinaryPrimitivesPredefined_v2(N_aryPrimitivesPredefined):
def __init__(self, cmp_dim=10):
super().__init__()
self.distance = Distance()
self.primitive_list = ['dist']
self.ineqs.update({'dist': Inequality(out_dim=cmp_dim, distribution
='normal')})
self.out_dim = sum([self.ineqs[k].out_dim for k in self.primitive_list]
)
def forward(self, states, beta=0, **kwargs):
"""
:param states: [batch, length, n_agents, state_dim]
return [batch, length, n_agents, n_agents, out_dim]
"""
n_agents = states.size(2)
p1 = states.unsqueeze(2).repeat(1, 1, n_agents, 1, 1)
p2 = states.unsqueeze(3).repeat(1, 1, 1, n_agents, 1)
ineqs_inputs = {'dist': self.distance(p1, p2, dim_index=(0, 1)).
squeeze(4)[:, :, 1:, 1:]}
output = torch.cat([self.ineqs[k](ineqs_inputs[k], beta, **kwargs) for
k in self.primitive_list], dim=-1)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x1 = xindex // 3 % 3
x2 = xindex // 9 % 4
x3 = xindex // 36
x4 = xindex
tmp16 = tl.load(in_ptr1 + 0)
tmp17 = tl.broadcast_to(tmp16, [XBLOCK])
tmp19 = tl.load(in_ptr1 + 1)
tmp20 = tl.broadcast_to(tmp19, [XBLOCK])
tmp0 = tl.full([1], 0, tl.int64)
tmp1 = tl.full([1], 1, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.where(tmp2, tmp0, tmp1)
tmp4 = tl.load(in_ptr0 + (tmp3 + 4 * ((5 + x0) % 4) + 16 * x2 + 16 * ((
5 + x0 + 4 * x1) // 16) + 64 * x3 + 64 * ((5 + x0 + 4 * x1 + 16 *
x2) // 64)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (tmp3 + 4 * x1 + 4 * ((5 + x0) // 4) + 16 * x2 +
16 * ((5 + x0 + 4 * x1) // 16) + 64 * x3 + 64 * ((5 + x0 + 4 * x1 +
16 * x2) // 64)), xmask, eviction_policy='evict_last')
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp1 < tmp1
tmp9 = tl.where(tmp8, tmp0, tmp1)
tmp10 = tl.load(in_ptr0 + (tmp9 + 4 * ((5 + x0) % 4) + 16 * x2 + 16 * (
(5 + x0 + 4 * x1) // 16) + 64 * x3 + 64 * ((5 + x0 + 4 * x1 + 16 *
x2) // 64)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (tmp9 + 4 * x1 + 4 * ((5 + x0) // 4) + 16 *
x2 + 16 * ((5 + x0 + 4 * x1) // 16) + 64 * x3 + 64 * ((5 + x0 + 4 *
x1 + 16 * x2) // 64)), xmask, eviction_policy='evict_last')
tmp12 = tmp10 - tmp11
tmp13 = tmp12 * tmp12
tmp14 = tmp7 + tmp13
tmp15 = libdevice.sqrt(tmp14)
tmp18 = tmp15 - tmp17
tmp21 = tmp18 / tmp20
tl.store(out_ptr0 + x4, tmp21, xmask)
@triton.jit
def triton_poi_fused_div_sigmoid_sub_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 1440
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 10
x0 = xindex % 10
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tmp5 = tl.sigmoid(tmp4)
tl.store(out_ptr0 + x2, tmp5, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2,), (1,))
assert_size_stride(primals_3, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 3, 3, 1), (36, 9, 3, 1, 144),
torch.float32)
get_raw_stream(0)
triton_poi_fused_div_sub_0[grid(144)](primals_1, primals_2, buf0,
144, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 3, 3, 10), (360, 90, 30, 10, 1),
torch.float32)
triton_poi_fused_div_sigmoid_sub_1[grid(1440)](buf0, primals_3,
buf1, 1440, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_3
return buf1, buf1
def apply_last_dim(model, x):
size = list(x.size())
y = model(x.contiguous().view(-1, size[-1]))
size[-1] = y.size(-1)
y = y.view(torch.Size(size))
return y
def get_int_dim_index(name):
if isinstance(name, int):
return name
name_list = 'axyz'
assert name in name_list
return [i for i in range(len(name_list)) if name_list[i] == name][0] - 1
class Length(nn.Module):
def __init__(self, dim_index=-1):
super().__init__()
self.dim_index = dim_index
def forward(self, states, dim_index=None):
if dim_index is None:
dim_index = self.dim_index
if isinstance(dim_index, int):
dim_index = [dim_index]
else:
dim_index = [get_int_dim_index(x) for x in dim_index]
if -1 in dim_index:
def extractor(x):
return torch.sqrt(torch.sum(x * x, dim=1, keepdim=True))
else:
def extractor(x):
return torch.sqrt(torch.sum(x[:, dim_index].pow(2), dim=1,
keepdim=True))
return apply_last_dim(extractor, states)
def show(self, name='Length', indent=0, log=print, **kwargs):
log(' ' * indent + "- %s(x) = |x's dim %s|" % (name, str(self.
dim_index)))
class Distance(nn.Module):
def __init__(self, dim_index=-1):
super().__init__()
self.dim_index = dim_index
self.length = Length(dim_index)
def forward(self, states1, states2, dim_index=None):
return self.length(states1 - states2, dim_index)
def show(self, name='Distance', indent=0, log=print, **kwargs):
log(' ' * indent + '- %s(x1, x2) = |x1 - x2|' % name)
class Normalize(nn.Module):
def __init__(self, distribution=None, **kwargs):
super().__init__()
self.distribution = distribution
self.data_ = []
if distribution is None:
pass
elif distribution == 'normal':
mean = kwargs['mean'] if 'mean' in kwargs else 0
std = kwargs['std'] if 'std' in kwargs else 1
self.param = nn.Parameter(torch.Tensor([mean, std]), False)
elif distribution == 'uniform':
vmin = kwargs['minv'] if 'minv' in kwargs else 0
vmax = kwargs['maxv'] if 'maxv' in kwargs else 1
self.param = nn.Parameter(torch.Tensor([vmin, vmax]), False)
else:
raise NotImplementedError()
def forward(self, x, keep_data=False):
if keep_data:
self.data_.append(x.detach().cpu().view(-1))
return x
if self.distribution is None:
return x
elif self.distribution == 'normal':
mean = self.param[0]
std = self.param[1]
return (x - mean) / std
elif self.distribution == 'uniform':
vmin = self.param[0]
vmax = self.param[1]
return (x - vmin) / (vmax - vmin + 1e-05)
else:
raise NotImplementedError()
def reset_parameters(self, name=None):
assert len(self.data_) > 0
data = torch.cat(self.data_)
self.data_ = []
if self.distribution is None:
pass
elif self.distribution == 'normal':
with torch.no_grad():
self.param[0] = data.mean().item()
self.param[1] = data.std().item()
if name is not None:
None
elif self.distribution == 'uniform':
with torch.no_grad():
self.param[0] = data.min().item()
self.param[1] = data.max().item()
if name is not None:
None
else:
raise NotImplementedError()
def recover_threshold(self, x):
if self.distribution is None:
return x
elif self.distribution == 'normal':
return x * float(self.param[1]) + float(self.param[0])
elif self.distribution == 'uniform':
return x * float(self.param[1] - self.param[0] + 1e-05) + float(
self.param[0])
else:
raise NotImplementedError()
def init_thresholds(self, x):
if self.distribution is None:
nn.init.normal_(x, 0, 1)
elif self.distribution == 'normal':
nn.init.normal_(x, 0, 1)
elif self.distribution == 'uniform':
nn.init.uniform_(x, 0, 1)
else:
raise NotImplementedError()
class SoftCmp(nn.Module):
"""
Sigmoid((x - y) / e^beta)
"""
def __init__(self):
super().__init__()
self.sigmoid = nn.Sigmoid()
def forward(self, x, y, beta):
return self.sigmoid((x - y) / math.exp(beta))
class Inequality(nn.Module):
def __init__(self, out_dim=1, distribution=None, **kwargs):
super().__init__()
self.out_dim = out_dim
self.thresholds = nn.Parameter(torch.zeros(out_dim), requires_grad=True
)
self.distribution = distribution
self.normalize = Normalize(distribution)
self.cmp = SoftCmp()
self.normalize.init_thresholds(self.thresholds)
def forward(self, states, beta=0, **kwargs):
"""
:param states: [batch, length, n_agents, ... ]
"""
states_expand = states.view(*(states.size() + (1,)))
estimate_parameters = 'estimate_parameters' in kwargs and kwargs[
'estimate_parameters']
states_expand = self.normalize(states_expand, keep_data=
estimate_parameters)
return self.cmp(states_expand, self.thresholds.view(*([1] * len(
states.size()) + [self.out_dim])), beta)
def reset_parameters(self, parameter_name, name=None):
if parameter_name == 'primitive_inequality':
self.normalize.reset_parameters(name=name)
self.normalize.init_thresholds(self.thresholds)
def get_descriptions(self, name='Inequality'):
theta = self.thresholds.detach().cpu().view(self.out_dim)
descroptions = []
for k in range(theta.size(0)):
t = self.normalize.recover_threshold(theta[k])
if 'speed' in name:
t = t * 8
if 'acc' in name:
t = t * 64
descroptions.append('%s > %.2lf' % (name, t))
return descroptions
class N_aryPrimitivesPredefined(nn.Module):
def __init__(self):
super().__init__()
self.out_dim = 0
self.primitive_list = []
self.ineqs = nn.ModuleDict({})
def reset_parameters(self, parameter_name):
for k in self.primitive_list:
self.ineqs[k].reset_parameters(parameter_name, name=k)
def get_descriptions(self):
descriptions = []
for k in self.primitive_list:
descriptions += self.ineqs[k].get_descriptions(name=k)
return descriptions
class BinaryPrimitivesPredefined_v2New(N_aryPrimitivesPredefined):
def __init__(self, cmp_dim=10):
super().__init__()
self.distance = Distance()
self.primitive_list = ['dist']
self.ineqs.update({'dist': Inequality(out_dim=cmp_dim, distribution
='normal')})
self.out_dim = sum([self.ineqs[k].out_dim for k in self.primitive_list]
)
def forward(self, input_0):
primals_3 = self.ineqs.dist.thresholds
primals_2 = self.ineqs.dist.normalize.param
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
C-SUNSHINE/TOQ-Nets-PyTorch-Release
|
BinaryPrimitivesPredefined_v2
| false
| 17,147
|
[
"MIT"
] | 6
|
05e06bf633fb3c6b610dda9a5126ecd7af1db02f
|
https://github.com/C-SUNSHINE/TOQ-Nets-PyTorch-Release/tree/05e06bf633fb3c6b610dda9a5126ecd7af1db02f
|
BinaryPrimitivesPredefined
|
import math
import torch
from torch import nn
def apply_last_dim(model, x):
size = list(x.size())
y = model(x.contiguous().view(-1, size[-1]))
size[-1] = y.size(-1)
y = y.view(torch.Size(size))
return y
def get_int_dim_index(name):
if isinstance(name, int):
return name
name_list = 'axyz'
assert name in name_list
return [i for i in range(len(name_list)) if name_list[i] == name][0] - 1
class Length(nn.Module):
def __init__(self, dim_index=-1):
super().__init__()
self.dim_index = dim_index
def forward(self, states, dim_index=None):
if dim_index is None:
dim_index = self.dim_index
if isinstance(dim_index, int):
dim_index = [dim_index]
else:
dim_index = [get_int_dim_index(x) for x in dim_index]
if -1 in dim_index:
def extractor(x):
return torch.sqrt(torch.sum(x * x, dim=1, keepdim=True))
else:
def extractor(x):
return torch.sqrt(torch.sum(x[:, dim_index].pow(2), dim=1,
keepdim=True))
return apply_last_dim(extractor, states)
def show(self, name='Length', indent=0, log=print, **kwargs):
log(' ' * indent + "- %s(x) = |x's dim %s|" % (name, str(self.
dim_index)))
class Distance(nn.Module):
def __init__(self, dim_index=-1):
super().__init__()
self.dim_index = dim_index
self.length = Length(dim_index)
def forward(self, states1, states2, dim_index=None):
return self.length(states1 - states2, dim_index)
def show(self, name='Distance', indent=0, log=print, **kwargs):
log(' ' * indent + '- %s(x1, x2) = |x1 - x2|' % name)
class Normalize(nn.Module):
def __init__(self, distribution=None, **kwargs):
super().__init__()
self.distribution = distribution
self.data_ = []
if distribution is None:
pass
elif distribution == 'normal':
mean = kwargs['mean'] if 'mean' in kwargs else 0
std = kwargs['std'] if 'std' in kwargs else 1
self.param = nn.Parameter(torch.Tensor([mean, std]), False)
elif distribution == 'uniform':
vmin = kwargs['minv'] if 'minv' in kwargs else 0
vmax = kwargs['maxv'] if 'maxv' in kwargs else 1
self.param = nn.Parameter(torch.Tensor([vmin, vmax]), False)
else:
raise NotImplementedError()
def forward(self, x, keep_data=False):
if keep_data:
self.data_.append(x.detach().cpu().view(-1))
return x
if self.distribution is None:
return x
elif self.distribution == 'normal':
mean = self.param[0]
std = self.param[1]
return (x - mean) / std
elif self.distribution == 'uniform':
vmin = self.param[0]
vmax = self.param[1]
return (x - vmin) / (vmax - vmin + 1e-05)
else:
raise NotImplementedError()
def reset_parameters(self, name=None):
assert len(self.data_) > 0
data = torch.cat(self.data_)
self.data_ = []
if self.distribution is None:
pass
elif self.distribution == 'normal':
with torch.no_grad():
self.param[0] = data.mean().item()
self.param[1] = data.std().item()
if name is not None:
None
elif self.distribution == 'uniform':
with torch.no_grad():
self.param[0] = data.min().item()
self.param[1] = data.max().item()
if name is not None:
None
else:
raise NotImplementedError()
def recover_threshold(self, x):
if self.distribution is None:
return x
elif self.distribution == 'normal':
return x * float(self.param[1]) + float(self.param[0])
elif self.distribution == 'uniform':
return x * float(self.param[1] - self.param[0] + 1e-05) + float(
self.param[0])
else:
raise NotImplementedError()
def init_thresholds(self, x):
if self.distribution is None:
nn.init.normal_(x, 0, 1)
elif self.distribution == 'normal':
nn.init.normal_(x, 0, 1)
elif self.distribution == 'uniform':
nn.init.uniform_(x, 0, 1)
else:
raise NotImplementedError()
class SoftCmp(nn.Module):
"""
Sigmoid((x - y) / e^beta)
"""
def __init__(self):
super().__init__()
self.sigmoid = nn.Sigmoid()
def forward(self, x, y, beta):
return self.sigmoid((x - y) / math.exp(beta))
class Inequality(nn.Module):
def __init__(self, out_dim=1, distribution=None, **kwargs):
super().__init__()
self.out_dim = out_dim
self.thresholds = nn.Parameter(torch.zeros(out_dim), requires_grad=True
)
self.distribution = distribution
self.normalize = Normalize(distribution)
self.cmp = SoftCmp()
self.normalize.init_thresholds(self.thresholds)
def forward(self, states, beta=0, **kwargs):
"""
:param states: [batch, length, n_agents, ... ]
"""
states_expand = states.view(*(states.size() + (1,)))
estimate_parameters = 'estimate_parameters' in kwargs and kwargs[
'estimate_parameters']
states_expand = self.normalize(states_expand, keep_data=
estimate_parameters)
return self.cmp(states_expand, self.thresholds.view(*([1] * len(
states.size()) + [self.out_dim])), beta)
def reset_parameters(self, parameter_name, name=None):
if parameter_name == 'primitive_inequality':
self.normalize.reset_parameters(name=name)
self.normalize.init_thresholds(self.thresholds)
def get_descriptions(self, name='Inequality'):
theta = self.thresholds.detach().cpu().view(self.out_dim)
descroptions = []
for k in range(theta.size(0)):
t = self.normalize.recover_threshold(theta[k])
if 'speed' in name:
t = t * 8
if 'acc' in name:
t = t * 64
descroptions.append('%s > %.2lf' % (name, t))
return descroptions
class N_aryPrimitivesPredefined(nn.Module):
def __init__(self):
super().__init__()
self.out_dim = 0
self.primitive_list = []
self.ineqs = nn.ModuleDict({})
def reset_parameters(self, parameter_name):
for k in self.primitive_list:
self.ineqs[k].reset_parameters(parameter_name, name=k)
def get_descriptions(self):
descriptions = []
for k in self.primitive_list:
descriptions += self.ineqs[k].get_descriptions(name=k)
return descriptions
class BinaryPrimitivesPredefined(N_aryPrimitivesPredefined):
def __init__(self, cmp_dim=10):
super().__init__()
self.distance = Distance()
self.primitive_list = ['dist_xy']
self.ineqs.update({'dist_xy': Inequality(out_dim=cmp_dim,
distribution='normal')})
self.out_dim = sum([self.ineqs[k].out_dim for k in self.primitive_list]
)
def forward(self, states, beta=0, **kwargs):
"""
:param states: [batch, length, n_agents, state_dim]
return [batch, length, n_agents, n_agents, out_dim]
"""
n_agents = states.size(2)
p1 = states.unsqueeze(2).repeat(1, 1, n_agents, 1, 1)
p2 = states.unsqueeze(3).repeat(1, 1, 1, n_agents, 1)
ineqs_inputs = {'dist_xy': self.distance(p1, p2, dim_index=(0, 1)).
squeeze(4)}
output = torch.cat([self.ineqs[k](ineqs_inputs[k], beta, **kwargs) for
k in self.primitive_list], dim=-1)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp16 = tl.load(in_ptr1 + 0)
tmp17 = tl.broadcast_to(tmp16, [XBLOCK])
tmp19 = tl.load(in_ptr1 + 1)
tmp20 = tl.broadcast_to(tmp19, [XBLOCK])
tmp0 = tl.full([1], 0, tl.int64)
tmp1 = tl.full([1], 1, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.where(tmp2, tmp0, tmp1)
tmp4 = tl.load(in_ptr0 + (tmp3 + 4 * x0 + 16 * x2 + 16 * ((x0 + 4 * x1) //
16) + 64 * x3 + 64 * ((x0 + 4 * x1 + 16 * x2) // 64)), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (tmp3 + 4 * x1 + 16 * x2 + 16 * ((x0 + 4 * x1) //
16) + 64 * x3 + 64 * ((x0 + 4 * x1 + 16 * x2) // 64)), xmask,
eviction_policy='evict_last')
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp1 < tmp1
tmp9 = tl.where(tmp8, tmp0, tmp1)
tmp10 = tl.load(in_ptr0 + (tmp9 + 4 * x0 + 16 * x2 + 16 * ((x0 + 4 * x1
) // 16) + 64 * x3 + 64 * ((x0 + 4 * x1 + 16 * x2) // 64)), xmask,
eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (tmp9 + 4 * x1 + 16 * x2 + 16 * ((x0 + 4 * x1
) // 16) + 64 * x3 + 64 * ((x0 + 4 * x1 + 16 * x2) // 64)), xmask,
eviction_policy='evict_last')
tmp12 = tmp10 - tmp11
tmp13 = tmp12 * tmp12
tmp14 = tmp7 + tmp13
tmp15 = libdevice.sqrt(tmp14)
tmp18 = tmp15 - tmp17
tmp21 = tmp18 / tmp20
tl.store(out_ptr0 + x4, tmp21, xmask)
@triton.jit
def triton_poi_fused_div_sigmoid_sub_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 2560
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 10
x0 = xindex % 10
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tmp5 = tl.sigmoid(tmp4)
tl.store(out_ptr0 + x2, tmp5, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2,), (1,))
assert_size_stride(primals_3, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 256),
torch.float32)
get_raw_stream(0)
triton_poi_fused_div_sub_0[grid(256)](primals_1, primals_2, buf0,
256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4, 10), (640, 160, 40, 10, 1),
torch.float32)
triton_poi_fused_div_sigmoid_sub_1[grid(2560)](buf0, primals_3,
buf1, 2560, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_3
return buf1, buf1
def apply_last_dim(model, x):
size = list(x.size())
y = model(x.contiguous().view(-1, size[-1]))
size[-1] = y.size(-1)
y = y.view(torch.Size(size))
return y
def get_int_dim_index(name):
if isinstance(name, int):
return name
name_list = 'axyz'
assert name in name_list
return [i for i in range(len(name_list)) if name_list[i] == name][0] - 1
class Length(nn.Module):
def __init__(self, dim_index=-1):
super().__init__()
self.dim_index = dim_index
def forward(self, states, dim_index=None):
if dim_index is None:
dim_index = self.dim_index
if isinstance(dim_index, int):
dim_index = [dim_index]
else:
dim_index = [get_int_dim_index(x) for x in dim_index]
if -1 in dim_index:
def extractor(x):
return torch.sqrt(torch.sum(x * x, dim=1, keepdim=True))
else:
def extractor(x):
return torch.sqrt(torch.sum(x[:, dim_index].pow(2), dim=1,
keepdim=True))
return apply_last_dim(extractor, states)
def show(self, name='Length', indent=0, log=print, **kwargs):
log(' ' * indent + "- %s(x) = |x's dim %s|" % (name, str(self.
dim_index)))
class Distance(nn.Module):
def __init__(self, dim_index=-1):
super().__init__()
self.dim_index = dim_index
self.length = Length(dim_index)
def forward(self, states1, states2, dim_index=None):
return self.length(states1 - states2, dim_index)
def show(self, name='Distance', indent=0, log=print, **kwargs):
log(' ' * indent + '- %s(x1, x2) = |x1 - x2|' % name)
class Normalize(nn.Module):
def __init__(self, distribution=None, **kwargs):
super().__init__()
self.distribution = distribution
self.data_ = []
if distribution is None:
pass
elif distribution == 'normal':
mean = kwargs['mean'] if 'mean' in kwargs else 0
std = kwargs['std'] if 'std' in kwargs else 1
self.param = nn.Parameter(torch.Tensor([mean, std]), False)
elif distribution == 'uniform':
vmin = kwargs['minv'] if 'minv' in kwargs else 0
vmax = kwargs['maxv'] if 'maxv' in kwargs else 1
self.param = nn.Parameter(torch.Tensor([vmin, vmax]), False)
else:
raise NotImplementedError()
def forward(self, x, keep_data=False):
if keep_data:
self.data_.append(x.detach().cpu().view(-1))
return x
if self.distribution is None:
return x
elif self.distribution == 'normal':
mean = self.param[0]
std = self.param[1]
return (x - mean) / std
elif self.distribution == 'uniform':
vmin = self.param[0]
vmax = self.param[1]
return (x - vmin) / (vmax - vmin + 1e-05)
else:
raise NotImplementedError()
def reset_parameters(self, name=None):
assert len(self.data_) > 0
data = torch.cat(self.data_)
self.data_ = []
if self.distribution is None:
pass
elif self.distribution == 'normal':
with torch.no_grad():
self.param[0] = data.mean().item()
self.param[1] = data.std().item()
if name is not None:
None
elif self.distribution == 'uniform':
with torch.no_grad():
self.param[0] = data.min().item()
self.param[1] = data.max().item()
if name is not None:
None
else:
raise NotImplementedError()
def recover_threshold(self, x):
if self.distribution is None:
return x
elif self.distribution == 'normal':
return x * float(self.param[1]) + float(self.param[0])
elif self.distribution == 'uniform':
return x * float(self.param[1] - self.param[0] + 1e-05) + float(
self.param[0])
else:
raise NotImplementedError()
def init_thresholds(self, x):
if self.distribution is None:
nn.init.normal_(x, 0, 1)
elif self.distribution == 'normal':
nn.init.normal_(x, 0, 1)
elif self.distribution == 'uniform':
nn.init.uniform_(x, 0, 1)
else:
raise NotImplementedError()
class SoftCmp(nn.Module):
"""
Sigmoid((x - y) / e^beta)
"""
def __init__(self):
super().__init__()
self.sigmoid = nn.Sigmoid()
def forward(self, x, y, beta):
return self.sigmoid((x - y) / math.exp(beta))
class Inequality(nn.Module):
def __init__(self, out_dim=1, distribution=None, **kwargs):
super().__init__()
self.out_dim = out_dim
self.thresholds = nn.Parameter(torch.zeros(out_dim), requires_grad=True
)
self.distribution = distribution
self.normalize = Normalize(distribution)
self.cmp = SoftCmp()
self.normalize.init_thresholds(self.thresholds)
def forward(self, states, beta=0, **kwargs):
"""
:param states: [batch, length, n_agents, ... ]
"""
states_expand = states.view(*(states.size() + (1,)))
estimate_parameters = 'estimate_parameters' in kwargs and kwargs[
'estimate_parameters']
states_expand = self.normalize(states_expand, keep_data=
estimate_parameters)
return self.cmp(states_expand, self.thresholds.view(*([1] * len(
states.size()) + [self.out_dim])), beta)
def reset_parameters(self, parameter_name, name=None):
if parameter_name == 'primitive_inequality':
self.normalize.reset_parameters(name=name)
self.normalize.init_thresholds(self.thresholds)
def get_descriptions(self, name='Inequality'):
theta = self.thresholds.detach().cpu().view(self.out_dim)
descroptions = []
for k in range(theta.size(0)):
t = self.normalize.recover_threshold(theta[k])
if 'speed' in name:
t = t * 8
if 'acc' in name:
t = t * 64
descroptions.append('%s > %.2lf' % (name, t))
return descroptions
class N_aryPrimitivesPredefined(nn.Module):
def __init__(self):
super().__init__()
self.out_dim = 0
self.primitive_list = []
self.ineqs = nn.ModuleDict({})
def reset_parameters(self, parameter_name):
for k in self.primitive_list:
self.ineqs[k].reset_parameters(parameter_name, name=k)
def get_descriptions(self):
descriptions = []
for k in self.primitive_list:
descriptions += self.ineqs[k].get_descriptions(name=k)
return descriptions
class BinaryPrimitivesPredefinedNew(N_aryPrimitivesPredefined):
def __init__(self, cmp_dim=10):
super().__init__()
self.distance = Distance()
self.primitive_list = ['dist_xy']
self.ineqs.update({'dist_xy': Inequality(out_dim=cmp_dim,
distribution='normal')})
self.out_dim = sum([self.ineqs[k].out_dim for k in self.primitive_list]
)
def forward(self, input_0):
primals_3 = self.ineqs.dist_xy.thresholds
primals_2 = self.ineqs.dist_xy.normalize.param
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
C-SUNSHINE/TOQ-Nets-PyTorch-Release
|
BinaryPrimitivesPredefined
| false
| 17,148
|
[
"MIT"
] | 6
|
05e06bf633fb3c6b610dda9a5126ecd7af1db02f
|
https://github.com/C-SUNSHINE/TOQ-Nets-PyTorch-Release/tree/05e06bf633fb3c6b610dda9a5126ecd7af1db02f
|
Conv3BN
|
import torch
import torch.utils.data
import torch.nn as nn
import torch.backends.cudnn
def conv3x3(in_, out):
return nn.Conv2d(in_, out, 3, padding=1)
class Conv3BN(nn.Module):
def __init__(self, in_: 'int', out: 'int', bn=False):
super().__init__()
self.conv = conv3x3(in_, out)
self.bn = nn.BatchNorm2d(out) if bn else None
self.activation = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
x = self.activation(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_': 4, 'out': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
import torch.nn as nn
import torch.backends.cudnn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr0 + x3, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_convolution_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return buf1, primals_1, primals_3, buf2
def conv3x3(in_, out):
return nn.Conv2d(in_, out, 3, padding=1)
class Conv3BNNew(nn.Module):
def __init__(self, in_: 'int', out: 'int', bn=False):
super().__init__()
self.conv = conv3x3(in_, out)
self.bn = nn.BatchNorm2d(out) if bn else None
self.activation = nn.ReLU(inplace=True)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
CalebEverett/fastai-dl2
|
Conv3BN
| false
| 17,149
|
[
"Apache-2.0"
] | 4
|
64d23592eddca6ca1f3647e73c319e97c8eb392b
|
https://github.com/CalebEverett/fastai-dl2/tree/64d23592eddca6ca1f3647e73c319e97c8eb392b
|
MatrixTree
|
import torch
import torch.nn as nn
import torch.cuda
import torch.distributed
class MatrixTree(nn.Module):
"""Implementation of the matrix-tree theorem for computing marginals
of non-projective dependency parsing. This attention layer is used
in the paper "Learning Structured Text Representations"
:cite:`DBLP:journals/corr/LiuL17d`.
"""
def __init__(self, eps=1e-05):
self.eps = eps
super(MatrixTree, self).__init__()
def forward(self, input):
laplacian = input.exp() + self.eps
output = input.clone()
for b in range(input.size(0)):
lap = laplacian[b].masked_fill(torch.eye(input.size(1), device=
input.device).ne(0), 0)
lap = -lap + torch.diag(lap.sum(0))
lap[0] = input[b].diag().exp()
inv_laplacian = lap.inverse()
factor = inv_laplacian.diag().unsqueeze(1).expand_as(input[b]
).transpose(0, 1)
term1 = input[b].exp().mul(factor).clone()
term2 = input[b].exp().mul(inv_laplacian.transpose(0, 1)).clone()
term1[:, 0] = 0
term2[0] = 0
output[b] = term1 - term2
roots_output = input[b].diag().exp().mul(inv_laplacian.
transpose(0, 1)[0])
output[b] = output[b] + torch.diag(roots_output)
return output
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.cuda
import torch.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_eye_masked_fill_ne_sum_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp7 = tl.load(in_ptr0 + x0, xmask)
tmp16 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp25 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp34 = tl.load(in_ptr0 + (12 + x0), xmask)
tmp0 = tl.full([1], 0, tl.int64)
tmp1 = x0
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = tmp5 != tmp4
tmp8 = tl_math.exp(tmp7)
tmp9 = 1e-05
tmp10 = tmp8 + tmp9
tmp11 = tl.where(tmp6, tmp4, tmp10)
tmp12 = tl.full([1], 1, tl.int64)
tmp13 = tmp12 == tmp1
tmp14 = tl.where(tmp13, tmp3, tmp4)
tmp15 = tmp14 != tmp4
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp17 + tmp9
tmp19 = tl.where(tmp15, tmp4, tmp18)
tmp20 = tmp11 + tmp19
tmp21 = tl.full([1], 2, tl.int64)
tmp22 = tmp21 == tmp1
tmp23 = tl.where(tmp22, tmp3, tmp4)
tmp24 = tmp23 != tmp4
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp26 + tmp9
tmp28 = tl.where(tmp24, tmp4, tmp27)
tmp29 = tmp20 + tmp28
tmp30 = tl.full([1], 3, tl.int64)
tmp31 = tmp30 == tmp1
tmp32 = tl.where(tmp31, tmp3, tmp4)
tmp33 = tmp32 != tmp4
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp35 + tmp9
tmp37 = tl.where(tmp33, tmp4, tmp36)
tmp38 = tmp29 + tmp37
tl.store(out_ptr0 + x0, tmp38, xmask)
@triton.jit
def triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_1(
in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp3 = tl.load(in_ptr0 + 5 * x0, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + x2, xmask)
tmp18 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = tl_math.exp(tmp3)
tmp5 = x0
tmp6 = tmp0 == tmp5
tmp7 = 1.0
tmp8 = 0.0
tmp9 = tl.where(tmp6, tmp7, tmp8)
tmp10 = tmp9 != tmp8
tmp12 = tl_math.exp(tmp11)
tmp13 = 1e-05
tmp14 = tmp12 + tmp13
tmp15 = tl.where(tmp10, tmp8, tmp14)
tmp16 = -tmp15
tmp17 = tmp5 == tmp0
tmp19 = tl.where(tmp17, tmp18, tmp8)
tmp20 = tmp16 + tmp19
tmp21 = tl.where(tmp2, tmp4, tmp20)
tl.store(out_ptr0 + x2, tmp21, xmask)
@triton.jit
def triton_poi_fused_eye_masked_fill_ne_sum_2(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp7 = tl.load(in_ptr0 + (16 + x0), xmask)
tmp16 = tl.load(in_ptr0 + (20 + x0), xmask)
tmp25 = tl.load(in_ptr0 + (24 + x0), xmask)
tmp34 = tl.load(in_ptr0 + (28 + x0), xmask)
tmp0 = tl.full([1], 0, tl.int64)
tmp1 = x0
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = tmp5 != tmp4
tmp8 = tl_math.exp(tmp7)
tmp9 = 1e-05
tmp10 = tmp8 + tmp9
tmp11 = tl.where(tmp6, tmp4, tmp10)
tmp12 = tl.full([1], 1, tl.int64)
tmp13 = tmp12 == tmp1
tmp14 = tl.where(tmp13, tmp3, tmp4)
tmp15 = tmp14 != tmp4
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp17 + tmp9
tmp19 = tl.where(tmp15, tmp4, tmp18)
tmp20 = tmp11 + tmp19
tmp21 = tl.full([1], 2, tl.int64)
tmp22 = tmp21 == tmp1
tmp23 = tl.where(tmp22, tmp3, tmp4)
tmp24 = tmp23 != tmp4
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp26 + tmp9
tmp28 = tl.where(tmp24, tmp4, tmp27)
tmp29 = tmp20 + tmp28
tmp30 = tl.full([1], 3, tl.int64)
tmp31 = tmp30 == tmp1
tmp32 = tl.where(tmp31, tmp3, tmp4)
tmp33 = tmp32 != tmp4
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp35 + tmp9
tmp37 = tl.where(tmp33, tmp4, tmp36)
tmp38 = tmp29 + tmp37
tl.store(out_ptr0 + x0, tmp38, xmask)
@triton.jit
def triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_3(
in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp3 = tl.load(in_ptr0 + (16 + 5 * x0), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (16 + x2), xmask)
tmp18 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = tl_math.exp(tmp3)
tmp5 = x0
tmp6 = tmp0 == tmp5
tmp7 = 1.0
tmp8 = 0.0
tmp9 = tl.where(tmp6, tmp7, tmp8)
tmp10 = tmp9 != tmp8
tmp12 = tl_math.exp(tmp11)
tmp13 = 1e-05
tmp14 = tmp12 + tmp13
tmp15 = tl.where(tmp10, tmp8, tmp14)
tmp16 = -tmp15
tmp17 = tmp5 == tmp0
tmp19 = tl.where(tmp17, tmp18, tmp8)
tmp20 = tmp16 + tmp19
tmp21 = tl.where(tmp2, tmp4, tmp20)
tl.store(out_ptr0 + x2, tmp21, xmask)
@triton.jit
def triton_poi_fused_add_diag_embed_4(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp4 = tl.load(in_ptr0 + x2, xmask)
tmp6 = tl.load(in_ptr1 + 5 * x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + x2, xmask)
tmp18 = tl.load(in_ptr0 + 5 * x0, xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp0 = tl.full([1], 0, tl.int32)
tmp1 = tmp0 == tmp0
tmp2 = x0
tmp3 = tmp2 == tmp0
tmp5 = tl_math.exp(tmp4)
tmp7 = tmp5 * tmp6
tmp8 = 0.0
tmp9 = tl.where(tmp3, tmp8, tmp7)
tmp10 = x1
tmp11 = tmp10 == tmp0
tmp13 = tmp5 * tmp12
tmp14 = tl.where(tmp11, tmp8, tmp13)
tmp15 = tmp9 - tmp14
tmp16 = tl.where(tmp1, tmp15, tmp4)
tmp17 = tmp2 == tmp10
tmp19 = tl_math.exp(tmp18)
tmp21 = tmp19 * tmp20
tmp22 = tl.where(tmp17, tmp21, tmp8)
tmp23 = tmp16 + tmp22
tl.store(out_ptr0 + x2, tmp23, xmask)
@triton.jit
def triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_5(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x3 = xindex % 16
x0 = xindex % 4
x1 = xindex // 4 % 4
x5 = xindex
tmp3 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + x3, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + 5 * x0, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr1 + x5, xmask)
tmp0 = x2
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = x0
tmp5 = tmp4 == tmp1
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp7 * tmp8
tmp10 = 0.0
tmp11 = tl.where(tmp5, tmp10, tmp9)
tmp12 = x1
tmp13 = tmp12 == tmp1
tmp15 = tmp7 * tmp14
tmp16 = tl.where(tmp13, tmp10, tmp15)
tmp17 = tmp11 - tmp16
tmp19 = tl.where(tmp2, tmp17, tmp18)
tmp20 = tl.where(tmp2, tmp3, tmp19)
tl.store(out_ptr0 + x5, tmp20, xmask)
@triton.jit
def triton_poi_fused_eye_masked_fill_ne_sum_6(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp7 = tl.load(in_ptr0 + (32 + x0), xmask)
tmp16 = tl.load(in_ptr0 + (36 + x0), xmask)
tmp25 = tl.load(in_ptr0 + (40 + x0), xmask)
tmp34 = tl.load(in_ptr0 + (44 + x0), xmask)
tmp0 = tl.full([1], 0, tl.int64)
tmp1 = x0
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = tmp5 != tmp4
tmp8 = tl_math.exp(tmp7)
tmp9 = 1e-05
tmp10 = tmp8 + tmp9
tmp11 = tl.where(tmp6, tmp4, tmp10)
tmp12 = tl.full([1], 1, tl.int64)
tmp13 = tmp12 == tmp1
tmp14 = tl.where(tmp13, tmp3, tmp4)
tmp15 = tmp14 != tmp4
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp17 + tmp9
tmp19 = tl.where(tmp15, tmp4, tmp18)
tmp20 = tmp11 + tmp19
tmp21 = tl.full([1], 2, tl.int64)
tmp22 = tmp21 == tmp1
tmp23 = tl.where(tmp22, tmp3, tmp4)
tmp24 = tmp23 != tmp4
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp26 + tmp9
tmp28 = tl.where(tmp24, tmp4, tmp27)
tmp29 = tmp20 + tmp28
tmp30 = tl.full([1], 3, tl.int64)
tmp31 = tmp30 == tmp1
tmp32 = tl.where(tmp31, tmp3, tmp4)
tmp33 = tmp32 != tmp4
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp35 + tmp9
tmp37 = tl.where(tmp33, tmp4, tmp36)
tmp38 = tmp29 + tmp37
tl.store(out_ptr0 + x0, tmp38, xmask)
@triton.jit
def triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_7(
in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp3 = tl.load(in_ptr0 + (32 + 5 * x0), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (32 + x2), xmask)
tmp18 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = tl_math.exp(tmp3)
tmp5 = x0
tmp6 = tmp0 == tmp5
tmp7 = 1.0
tmp8 = 0.0
tmp9 = tl.where(tmp6, tmp7, tmp8)
tmp10 = tmp9 != tmp8
tmp12 = tl_math.exp(tmp11)
tmp13 = 1e-05
tmp14 = tmp12 + tmp13
tmp15 = tl.where(tmp10, tmp8, tmp14)
tmp16 = -tmp15
tmp17 = tmp5 == tmp0
tmp19 = tl.where(tmp17, tmp18, tmp8)
tmp20 = tmp16 + tmp19
tmp21 = tl.where(tmp2, tmp4, tmp20)
tl.store(out_ptr0 + x2, tmp21, xmask)
@triton.jit
def triton_poi_fused_add_diag_embed_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp5 = tl.load(in_ptr0 + (16 + x2), xmask)
tmp7 = tl.load(in_ptr1 + 5 * x0, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + x2, xmask)
tmp17 = tl.load(in_ptr2 + (16 + x2), xmask)
tmp20 = tl.load(in_ptr0 + (16 + 5 * x0), xmask, eviction_policy=
'evict_last')
tmp22 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp0 = tl.full([1], 1, tl.int32)
tmp1 = tmp0 == tmp0
tmp2 = x0
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = tmp2 == tmp3
tmp6 = tl_math.exp(tmp5)
tmp8 = tmp6 * tmp7
tmp9 = 0.0
tmp10 = tl.where(tmp4, tmp9, tmp8)
tmp11 = x1
tmp12 = tmp11 == tmp3
tmp14 = tmp6 * tmp13
tmp15 = tl.where(tmp12, tmp9, tmp14)
tmp16 = tmp10 - tmp15
tmp18 = tl.where(tmp1, tmp16, tmp17)
tmp19 = tmp2 == tmp11
tmp21 = tl_math.exp(tmp20)
tmp23 = tmp21 * tmp22
tmp24 = tl.where(tmp19, tmp23, tmp9)
tmp25 = tmp18 + tmp24
tl.store(out_ptr0 + x2, tmp25, xmask)
@triton.jit
def triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_9(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x3 = xindex % 16
x0 = xindex % 4
x1 = xindex // 4 % 4
x5 = xindex
tmp3 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (16 + x3), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + 5 * x0, xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_out_ptr0 + x5, xmask)
tmp0 = x2
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = x0
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = tmp4 == tmp5
tmp8 = tl_math.exp(tmp7)
tmp10 = tmp8 * tmp9
tmp11 = 0.0
tmp12 = tl.where(tmp6, tmp11, tmp10)
tmp13 = x1
tmp14 = tmp13 == tmp5
tmp16 = tmp8 * tmp15
tmp17 = tl.where(tmp14, tmp11, tmp16)
tmp18 = tmp12 - tmp17
tmp20 = tl.where(tmp2, tmp18, tmp19)
tmp21 = tl.where(tmp2, tmp3, tmp20)
tl.store(in_out_ptr0 + x5, tmp21, xmask)
@triton.jit
def triton_poi_fused_eye_masked_fill_ne_sum_10(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp7 = tl.load(in_ptr0 + (48 + x0), xmask)
tmp16 = tl.load(in_ptr0 + (52 + x0), xmask)
tmp25 = tl.load(in_ptr0 + (56 + x0), xmask)
tmp34 = tl.load(in_ptr0 + (60 + x0), xmask)
tmp0 = tl.full([1], 0, tl.int64)
tmp1 = x0
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = tmp5 != tmp4
tmp8 = tl_math.exp(tmp7)
tmp9 = 1e-05
tmp10 = tmp8 + tmp9
tmp11 = tl.where(tmp6, tmp4, tmp10)
tmp12 = tl.full([1], 1, tl.int64)
tmp13 = tmp12 == tmp1
tmp14 = tl.where(tmp13, tmp3, tmp4)
tmp15 = tmp14 != tmp4
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp17 + tmp9
tmp19 = tl.where(tmp15, tmp4, tmp18)
tmp20 = tmp11 + tmp19
tmp21 = tl.full([1], 2, tl.int64)
tmp22 = tmp21 == tmp1
tmp23 = tl.where(tmp22, tmp3, tmp4)
tmp24 = tmp23 != tmp4
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp26 + tmp9
tmp28 = tl.where(tmp24, tmp4, tmp27)
tmp29 = tmp20 + tmp28
tmp30 = tl.full([1], 3, tl.int64)
tmp31 = tmp30 == tmp1
tmp32 = tl.where(tmp31, tmp3, tmp4)
tmp33 = tmp32 != tmp4
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp35 + tmp9
tmp37 = tl.where(tmp33, tmp4, tmp36)
tmp38 = tmp29 + tmp37
tl.store(out_ptr0 + x0, tmp38, xmask)
@triton.jit
def triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_11(
in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp3 = tl.load(in_ptr0 + (48 + 5 * x0), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (48 + x2), xmask)
tmp18 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = tl_math.exp(tmp3)
tmp5 = x0
tmp6 = tmp0 == tmp5
tmp7 = 1.0
tmp8 = 0.0
tmp9 = tl.where(tmp6, tmp7, tmp8)
tmp10 = tmp9 != tmp8
tmp12 = tl_math.exp(tmp11)
tmp13 = 1e-05
tmp14 = tmp12 + tmp13
tmp15 = tl.where(tmp10, tmp8, tmp14)
tmp16 = -tmp15
tmp17 = tmp5 == tmp0
tmp19 = tl.where(tmp17, tmp18, tmp8)
tmp20 = tmp16 + tmp19
tmp21 = tl.where(tmp2, tmp4, tmp20)
tl.store(out_ptr0 + x2, tmp21, xmask)
@triton.jit
def triton_poi_fused_add_diag_embed_12(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp5 = tl.load(in_ptr0 + (32 + x2), xmask)
tmp7 = tl.load(in_ptr1 + 5 * x0, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + x2, xmask)
tmp17 = tl.load(in_ptr2 + (32 + x2), xmask)
tmp20 = tl.load(in_ptr0 + (32 + 5 * x0), xmask, eviction_policy=
'evict_last')
tmp22 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp0 = tl.full([1], 2, tl.int32)
tmp1 = tmp0 == tmp0
tmp2 = x0
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = tmp2 == tmp3
tmp6 = tl_math.exp(tmp5)
tmp8 = tmp6 * tmp7
tmp9 = 0.0
tmp10 = tl.where(tmp4, tmp9, tmp8)
tmp11 = x1
tmp12 = tmp11 == tmp3
tmp14 = tmp6 * tmp13
tmp15 = tl.where(tmp12, tmp9, tmp14)
tmp16 = tmp10 - tmp15
tmp18 = tl.where(tmp1, tmp16, tmp17)
tmp19 = tmp2 == tmp11
tmp21 = tl_math.exp(tmp20)
tmp23 = tmp21 * tmp22
tmp24 = tl.where(tmp19, tmp23, tmp9)
tmp25 = tmp18 + tmp24
tl.store(out_ptr0 + x2, tmp25, xmask)
@triton.jit
def triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_13(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x3 = xindex % 16
x0 = xindex % 4
x1 = xindex // 4 % 4
x5 = xindex
tmp3 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (32 + x3), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + 5 * x0, xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_out_ptr0 + x5, xmask)
tmp0 = x2
tmp1 = tl.full([1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = x0
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = tmp4 == tmp5
tmp8 = tl_math.exp(tmp7)
tmp10 = tmp8 * tmp9
tmp11 = 0.0
tmp12 = tl.where(tmp6, tmp11, tmp10)
tmp13 = x1
tmp14 = tmp13 == tmp5
tmp16 = tmp8 * tmp15
tmp17 = tl.where(tmp14, tmp11, tmp16)
tmp18 = tmp12 - tmp17
tmp20 = tl.where(tmp2, tmp18, tmp19)
tmp21 = tl.where(tmp2, tmp3, tmp20)
tl.store(in_out_ptr0 + x5, tmp21, xmask)
@triton.jit
def triton_poi_fused_add_diag_embed_14(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp5 = tl.load(in_ptr0 + (48 + x2), xmask)
tmp7 = tl.load(in_ptr1 + 5 * x0, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + x2, xmask)
tmp17 = tl.load(in_ptr2 + (48 + x2), xmask)
tmp20 = tl.load(in_ptr0 + (48 + 5 * x0), xmask, eviction_policy=
'evict_last')
tmp22 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp0 = tl.full([1], 3, tl.int32)
tmp1 = tmp0 == tmp0
tmp2 = x0
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = tmp2 == tmp3
tmp6 = tl_math.exp(tmp5)
tmp8 = tmp6 * tmp7
tmp9 = 0.0
tmp10 = tl.where(tmp4, tmp9, tmp8)
tmp11 = x1
tmp12 = tmp11 == tmp3
tmp14 = tmp6 * tmp13
tmp15 = tl.where(tmp12, tmp9, tmp14)
tmp16 = tmp10 - tmp15
tmp18 = tl.where(tmp1, tmp16, tmp17)
tmp19 = tmp2 == tmp11
tmp21 = tl_math.exp(tmp20)
tmp23 = tmp21 * tmp22
tmp24 = tl.where(tmp19, tmp23, tmp9)
tmp25 = tmp18 + tmp24
tl.store(out_ptr0 + x2, tmp25, xmask)
@triton.jit
def triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_15(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x3 = xindex % 16
x0 = xindex % 4
x1 = xindex // 4 % 4
x5 = xindex
tmp3 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (48 + x3), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + 5 * x0, xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_out_ptr0 + x5, xmask)
tmp0 = x2
tmp1 = tl.full([1], 3, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = x0
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = tmp4 == tmp5
tmp8 = tl_math.exp(tmp7)
tmp10 = tmp8 * tmp9
tmp11 = 0.0
tmp12 = tl.where(tmp6, tmp11, tmp10)
tmp13 = x1
tmp14 = tmp13 == tmp5
tmp16 = tmp8 * tmp15
tmp17 = tl.where(tmp14, tmp11, tmp16)
tmp18 = tmp12 - tmp17
tmp20 = tl.where(tmp2, tmp18, tmp19)
tmp21 = tl.where(tmp2, tmp3, tmp20)
tl.store(in_out_ptr0 + x5, tmp21, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
get_raw_stream(0)
triton_poi_fused_eye_masked_fill_ne_sum_0[grid(4)](arg0_1, buf0, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_1[
grid(16)](arg0_1, buf0, buf1, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf2 = torch.ops.aten.linalg_inv_ex.default(buf1)
buf3 = buf2[0]
del buf2
buf5 = buf0
del buf0
triton_poi_fused_eye_masked_fill_ne_sum_2[grid(4)](arg0_1, buf5, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf6 = buf1
del buf1
triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_3[
grid(16)](arg0_1, buf5, buf6, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf7 = torch.ops.aten.linalg_inv_ex.default(buf6)
buf8 = buf7[0]
del buf7
buf10 = buf6
del buf6
triton_poi_fused_add_diag_embed_4[grid(16)](arg0_1, buf3, buf10, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_5[grid(64)
](buf10, arg0_1, buf3, buf11, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf10
buf12 = buf5
del buf5
triton_poi_fused_eye_masked_fill_ne_sum_6[grid(4)](arg0_1, buf12, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf13 = reinterpret_tensor(buf3, (4, 4), (4, 1), 0)
del buf3
triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_7[
grid(16)](arg0_1, buf12, buf13, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf14 = torch.ops.aten.linalg_inv_ex.default(buf13)
buf15 = buf14[0]
del buf14
buf17 = buf13
del buf13
triton_poi_fused_add_diag_embed_8[grid(16)](arg0_1, buf8, buf11,
buf17, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf18 = buf11
del buf11
triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_9[grid(64)
](buf18, buf17, arg0_1, buf8, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf17
buf19 = buf12
del buf12
triton_poi_fused_eye_masked_fill_ne_sum_10[grid(4)](arg0_1, buf19,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf20 = reinterpret_tensor(buf8, (4, 4), (4, 1), 0)
del buf8
triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_11[
grid(16)](arg0_1, buf19, buf20, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del buf19
buf21 = torch.ops.aten.linalg_inv_ex.default(buf20)
buf22 = buf21[0]
del buf21
buf24 = buf20
del buf20
triton_poi_fused_add_diag_embed_12[grid(16)](arg0_1, buf15, buf18,
buf24, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf25 = buf18
del buf18
triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_13[grid(64)
](buf25, buf24, arg0_1, buf15, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf15
buf26 = buf24
del buf24
triton_poi_fused_add_diag_embed_14[grid(16)](arg0_1, buf22, buf25,
buf26, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf27 = buf25
del buf25
triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_15[grid(64)
](buf27, buf26, arg0_1, buf22, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del arg0_1
del buf22
del buf26
return buf27,
class MatrixTreeNew(nn.Module):
"""Implementation of the matrix-tree theorem for computing marginals
of non-projective dependency parsing. This attention layer is used
in the paper "Learning Structured Text Representations"
:cite:`DBLP:journals/corr/LiuL17d`.
"""
def __init__(self, eps=1e-05):
self.eps = eps
super(MatrixTreeNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
ChenRocks/Distill-BERT-Textgen-ONMT
|
MatrixTree
| false
| 17,150
|
[
"MIT"
] | 7
|
d83dd1a95af7513cbfae4a2768f6effc2f3a589f
|
https://github.com/ChenRocks/Distill-BERT-Textgen-ONMT/tree/d83dd1a95af7513cbfae4a2768f6effc2f3a589f
|
NullaryPrimitivesPredefined_v2
|
import math
import torch
from torch import nn
class Normalize(nn.Module):
def __init__(self, distribution=None, **kwargs):
super().__init__()
self.distribution = distribution
self.data_ = []
if distribution is None:
pass
elif distribution == 'normal':
mean = kwargs['mean'] if 'mean' in kwargs else 0
std = kwargs['std'] if 'std' in kwargs else 1
self.param = nn.Parameter(torch.Tensor([mean, std]), False)
elif distribution == 'uniform':
vmin = kwargs['minv'] if 'minv' in kwargs else 0
vmax = kwargs['maxv'] if 'maxv' in kwargs else 1
self.param = nn.Parameter(torch.Tensor([vmin, vmax]), False)
else:
raise NotImplementedError()
def forward(self, x, keep_data=False):
if keep_data:
self.data_.append(x.detach().cpu().view(-1))
return x
if self.distribution is None:
return x
elif self.distribution == 'normal':
mean = self.param[0]
std = self.param[1]
return (x - mean) / std
elif self.distribution == 'uniform':
vmin = self.param[0]
vmax = self.param[1]
return (x - vmin) / (vmax - vmin + 1e-05)
else:
raise NotImplementedError()
def reset_parameters(self, name=None):
assert len(self.data_) > 0
data = torch.cat(self.data_)
self.data_ = []
if self.distribution is None:
pass
elif self.distribution == 'normal':
with torch.no_grad():
self.param[0] = data.mean().item()
self.param[1] = data.std().item()
if name is not None:
None
elif self.distribution == 'uniform':
with torch.no_grad():
self.param[0] = data.min().item()
self.param[1] = data.max().item()
if name is not None:
None
else:
raise NotImplementedError()
def recover_threshold(self, x):
if self.distribution is None:
return x
elif self.distribution == 'normal':
return x * float(self.param[1]) + float(self.param[0])
elif self.distribution == 'uniform':
return x * float(self.param[1] - self.param[0] + 1e-05) + float(
self.param[0])
else:
raise NotImplementedError()
def init_thresholds(self, x):
if self.distribution is None:
nn.init.normal_(x, 0, 1)
elif self.distribution == 'normal':
nn.init.normal_(x, 0, 1)
elif self.distribution == 'uniform':
nn.init.uniform_(x, 0, 1)
else:
raise NotImplementedError()
class SoftCmp(nn.Module):
"""
Sigmoid((x - y) / e^beta)
"""
def __init__(self):
super().__init__()
self.sigmoid = nn.Sigmoid()
def forward(self, x, y, beta):
return self.sigmoid((x - y) / math.exp(beta))
class Inequality(nn.Module):
def __init__(self, out_dim=1, distribution=None, **kwargs):
super().__init__()
self.out_dim = out_dim
self.thresholds = nn.Parameter(torch.zeros(out_dim), requires_grad=True
)
self.distribution = distribution
self.normalize = Normalize(distribution)
self.cmp = SoftCmp()
self.normalize.init_thresholds(self.thresholds)
def forward(self, states, beta=0, **kwargs):
"""
:param states: [batch, length, n_agents, ... ]
"""
states_expand = states.view(*(states.size() + (1,)))
estimate_parameters = 'estimate_parameters' in kwargs and kwargs[
'estimate_parameters']
states_expand = self.normalize(states_expand, keep_data=
estimate_parameters)
return self.cmp(states_expand, self.thresholds.view(*([1] * len(
states.size()) + [self.out_dim])), beta)
def reset_parameters(self, parameter_name, name=None):
if parameter_name == 'primitive_inequality':
self.normalize.reset_parameters(name=name)
self.normalize.init_thresholds(self.thresholds)
def get_descriptions(self, name='Inequality'):
theta = self.thresholds.detach().cpu().view(self.out_dim)
descroptions = []
for k in range(theta.size(0)):
t = self.normalize.recover_threshold(theta[k])
if 'speed' in name:
t = t * 8
if 'acc' in name:
t = t * 64
descroptions.append('%s > %.2lf' % (name, t))
return descroptions
class N_aryPrimitivesPredefined(nn.Module):
def __init__(self):
super().__init__()
self.out_dim = 0
self.primitive_list = []
self.ineqs = nn.ModuleDict({})
def reset_parameters(self, parameter_name):
for k in self.primitive_list:
self.ineqs[k].reset_parameters(parameter_name, name=k)
def get_descriptions(self):
descriptions = []
for k in self.primitive_list:
descriptions += self.ineqs[k].get_descriptions(name=k)
return descriptions
class AlignDifferential(nn.Module):
def __init__(self):
super().__init__()
def new_length(self, length):
return length
def forward(self, states):
"""
:param states: [batch, length, *]
"""
padded_states = torch.cat([states[:, 0:1] * 2 - states[:, 1:2],
states, states[:, -1:] * 2 - states[:, -2:-1]], dim=1)
return (padded_states[:, 2:] - padded_states[:, :-2]) / 2
def show(self, name='AlignDifferential', indent=0, log=print, **kwargs):
log(' ' * indent + '- %s(x) = AlignDifferential()' % (name,))
class NullaryPrimitivesPredefined_v2(N_aryPrimitivesPredefined):
def __init__(self, cmp_dim=10):
super().__init__()
self.differential = AlignDifferential()
self.primitive_list = ['ball_acc', 'ball_pos_z', 'ball_speed']
self.ineqs.update({'ball_acc': Inequality(out_dim=cmp_dim,
distribution='normal'), 'ball_pos_z': Inequality(out_dim=
cmp_dim, distribution='uniform'), 'ball_speed': Inequality(
out_dim=cmp_dim, distribution='normal')})
self.out_dim = sum([self.ineqs[k].out_dim for k in self.primitive_list]
)
def forward(self, states, beta=0, **kwargs):
"""
:param states: [batch, length, n_agents, state_dim]
return [batch, length, out_dim]
"""
velocity = self.differential(states)
acc = self.differential(velocity)
ineqs_inputs = {'ball_acc': torch.norm(acc[:, :, 0, :], p=2, dim=2),
'ball_pos_z': states[:, :, 0, 2], 'ball_speed': torch.norm(
states[:, :, 0, :], p=2, dim=2)}
output = torch.cat([self.ineqs[k](ineqs_inputs[k], beta, **kwargs) for
k in self.primitive_list], dim=-1)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 6
x0 = xindex % 16
x2 = xindex // 96
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = 2.0
tmp7 = tmp5 * tmp6
tmp8 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp9 = tmp7 - tmp8
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tmp13 = tl.full([1], 5, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = tmp12 & tmp14
tmp16 = tl.load(in_ptr0 + (x0 + 16 * (-1 + x1) + 64 * x2), tmp15 &
xmask, other=0.0)
tmp17 = tmp0 >= tmp13
tl.full([1], 6, tl.int64)
tmp20 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp17 & xmask,
eviction_policy='evict_last', other=0.0)
tmp21 = tmp20 * tmp6
tmp22 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp17 & xmask,
eviction_policy='evict_last', other=0.0)
tmp23 = tmp21 - tmp22
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp17, tmp23, tmp24)
tmp26 = tl.where(tmp15, tmp16, tmp25)
tmp27 = tl.where(tmp4, tmp11, tmp26)
tl.store(out_ptr0 + x3, tmp27, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 6
x0 = xindex % 16
x2 = xindex // 96
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (32 + x0 + 16 * x1 + 96 * x2), tmp4 & xmask,
other=0.0)
tmp6 = tl.load(in_ptr0 + (x0 + 16 * x1 + 96 * x2), tmp4 & xmask, other=0.0)
tmp7 = tmp5 - tmp6
tmp8 = 0.5
tmp9 = tmp7 * tmp8
tmp10 = 2.0
tmp11 = tmp9 * tmp10
tmp12 = tl.load(in_ptr0 + (48 + x0 + 16 * x1 + 96 * x2), tmp4 & xmask,
other=0.0)
tmp13 = tl.load(in_ptr0 + (16 + x0 + 16 * x1 + 96 * x2), tmp4 & xmask,
other=0.0)
tmp14 = tmp12 - tmp13
tmp15 = tmp14 * tmp8
tmp16 = tmp11 - tmp15
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp4, tmp16, tmp17)
tmp19 = tmp0 >= tmp3
tmp20 = tl.full([1], 5, tl.int64)
tmp21 = tmp0 < tmp20
tmp22 = tmp19 & tmp21
tmp23 = tl.load(in_ptr0 + (32 + x0 + 16 * (-1 + x1) + 96 * x2), tmp22 &
xmask, other=0.0)
tmp24 = tl.load(in_ptr0 + (x0 + 16 * (-1 + x1) + 96 * x2), tmp22 &
xmask, other=0.0)
tmp25 = tmp23 - tmp24
tmp26 = tmp25 * tmp8
tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype)
tmp28 = tl.where(tmp22, tmp26, tmp27)
tmp29 = tmp0 >= tmp20
tl.full([1], 6, tl.int64)
tmp32 = tl.load(in_ptr0 + (80 + x0 + 16 * (-5 + x1) + 96 * x2), tmp29 &
xmask, other=0.0)
tmp33 = tl.load(in_ptr0 + (48 + x0 + 16 * (-5 + x1) + 96 * x2), tmp29 &
xmask, other=0.0)
tmp34 = tmp32 - tmp33
tmp35 = tmp34 * tmp8
tmp36 = tmp35 * tmp10
tmp37 = tl.load(in_ptr0 + (64 + x0 + 16 * (-5 + x1) + 96 * x2), tmp29 &
xmask, other=0.0)
tmp38 = tl.load(in_ptr0 + (32 + x0 + 16 * (-5 + x1) + 96 * x2), tmp29 &
xmask, other=0.0)
tmp39 = tmp37 - tmp38
tmp40 = tmp39 * tmp8
tmp41 = tmp36 - tmp40
tmp42 = tl.full(tmp41.shape, 0.0, tmp41.dtype)
tmp43 = tl.where(tmp29, tmp41, tmp42)
tmp44 = tl.where(tmp22, tmp28, tmp43)
tmp45 = tl.where(tmp4, tmp18, tmp44)
tl.store(out_ptr0 + x3, tmp45, xmask)
@triton.jit
def triton_poi_fused_div_sub_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (32 + 16 * x0 + 96 * x1), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (16 * x0 + 96 * x1), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (33 + 16 * x0 + 96 * x1), xmask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (1 + 16 * x0 + 96 * x1), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (34 + 16 * x0 + 96 * x1), xmask,
eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (2 + 16 * x0 + 96 * x1), xmask,
eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (35 + 16 * x0 + 96 * x1), xmask,
eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (3 + 16 * x0 + 96 * x1), xmask,
eviction_policy='evict_last')
tmp25 = tl.load(in_ptr1 + 0)
tmp26 = tl.broadcast_to(tmp25, [XBLOCK])
tmp28 = tl.load(in_ptr1 + 1)
tmp29 = tl.broadcast_to(tmp28, [XBLOCK])
tmp2 = tmp0 - tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = tmp4 * tmp4
tmp8 = tmp6 - tmp7
tmp9 = tmp8 * tmp3
tmp10 = tmp9 * tmp9
tmp11 = tmp5 + tmp10
tmp14 = tmp12 - tmp13
tmp15 = tmp14 * tmp3
tmp16 = tmp15 * tmp15
tmp17 = tmp11 + tmp16
tmp20 = tmp18 - tmp19
tmp21 = tmp20 * tmp3
tmp22 = tmp21 * tmp21
tmp23 = tmp17 + tmp22
tmp24 = libdevice.sqrt(tmp23)
tmp27 = tmp24 - tmp26
tmp30 = tmp27 / tmp29
tl.store(in_out_ptr0 + x2, tmp30, xmask)
@triton.jit
def triton_poi_fused_add_div_sub_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr1 + 1)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp10 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr2 + 0)
tmp22 = tl.broadcast_to(tmp21, [XBLOCK])
tmp24 = tl.load(in_ptr2 + 1)
tmp25 = tl.broadcast_to(tmp24, [XBLOCK])
tmp3 = tmp0 - tmp2
tmp6 = tmp5 - tmp2
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = tmp3 / tmp8
tmp11 = tmp10 * tmp10
tmp13 = tmp12 * tmp12
tmp14 = tmp11 + tmp13
tmp15 = tmp0 * tmp0
tmp16 = tmp14 + tmp15
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = libdevice.sqrt(tmp19)
tmp23 = tmp20 - tmp22
tmp26 = tmp23 / tmp25
tl.store(out_ptr0 + x0, tmp9, xmask)
tl.store(out_ptr1 + x0, tmp26, xmask)
@triton.jit
def triton_poi_fused_cat_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 480
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 30
x1 = xindex // 30
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 10, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp5 - tmp6
tmp8 = 1.0
tmp9 = tmp7 * tmp8
tmp10 = tl.sigmoid(tmp9)
tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype)
tmp12 = tl.where(tmp4, tmp10, tmp11)
tmp13 = tmp0 >= tmp3
tmp14 = tl.full([1], 20, tl.int64)
tmp15 = tmp0 < tmp14
tmp16 = tmp13 & tmp15
tmp17 = tl.load(in_ptr2 + x1, tmp16 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp18 = tl.load(in_ptr3 + (-10 + x0), tmp16 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp19 = tmp17 - tmp18
tmp20 = tmp19 * tmp8
tmp21 = tl.sigmoid(tmp20)
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp16, tmp21, tmp22)
tmp24 = tmp0 >= tmp14
tl.full([1], 30, tl.int64)
tmp27 = tl.load(in_ptr4 + x1, tmp24 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp28 = tl.load(in_ptr5 + (-20 + x0), tmp24 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp29 = tmp27 - tmp28
tmp30 = tmp29 * tmp8
tmp31 = tl.sigmoid(tmp30)
tmp32 = tl.full(tmp31.shape, 0.0, tmp31.dtype)
tmp33 = tl.where(tmp24, tmp31, tmp32)
tmp34 = tl.where(tmp16, tmp23, tmp33)
tmp35 = tl.where(tmp4, tmp12, tmp34)
tl.store(out_ptr0 + x2, tmp35, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2,), (1,))
assert_size_stride(primals_3, (10,), (1,))
assert_size_stride(primals_4, (2,), (1,))
assert_size_stride(primals_5, (10,), (1,))
assert_size_stride(primals_6, (2,), (1,))
assert_size_stride(primals_7, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 6, 4, 4), (96, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(384)](primals_1, buf0, 384, XBLOCK=128,
num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((4, 6, 4, 4), (96, 16, 4, 1), torch.float32)
triton_poi_fused_cat_1[grid(384)](buf0, buf1, 384, XBLOCK=128,
num_warps=4, num_stages=1)
del buf0
buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf3 = reinterpret_tensor(buf2, (4, 4, 1), (4, 1, 1), 0)
del buf2
triton_poi_fused_div_sub_2[grid(16)](buf3, buf1, primals_2, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del buf1
del primals_2
buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
triton_poi_fused_add_div_sub_3[grid(16)](primals_1, primals_4,
primals_6, buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_1
del primals_4
del primals_6
buf6 = empty_strided_cuda((4, 4, 30), (120, 30, 1), torch.float32)
triton_poi_fused_cat_4[grid(480)](buf3, primals_3, buf4, primals_5,
buf5, primals_7, buf6, 480, XBLOCK=128, num_warps=4, num_stages=1)
return buf6, primals_3, primals_5, primals_7, buf3, buf4, buf5
class Normalize(nn.Module):
def __init__(self, distribution=None, **kwargs):
super().__init__()
self.distribution = distribution
self.data_ = []
if distribution is None:
pass
elif distribution == 'normal':
mean = kwargs['mean'] if 'mean' in kwargs else 0
std = kwargs['std'] if 'std' in kwargs else 1
self.param = nn.Parameter(torch.Tensor([mean, std]), False)
elif distribution == 'uniform':
vmin = kwargs['minv'] if 'minv' in kwargs else 0
vmax = kwargs['maxv'] if 'maxv' in kwargs else 1
self.param = nn.Parameter(torch.Tensor([vmin, vmax]), False)
else:
raise NotImplementedError()
def forward(self, x, keep_data=False):
if keep_data:
self.data_.append(x.detach().cpu().view(-1))
return x
if self.distribution is None:
return x
elif self.distribution == 'normal':
mean = self.param[0]
std = self.param[1]
return (x - mean) / std
elif self.distribution == 'uniform':
vmin = self.param[0]
vmax = self.param[1]
return (x - vmin) / (vmax - vmin + 1e-05)
else:
raise NotImplementedError()
def reset_parameters(self, name=None):
assert len(self.data_) > 0
data = torch.cat(self.data_)
self.data_ = []
if self.distribution is None:
pass
elif self.distribution == 'normal':
with torch.no_grad():
self.param[0] = data.mean().item()
self.param[1] = data.std().item()
if name is not None:
None
elif self.distribution == 'uniform':
with torch.no_grad():
self.param[0] = data.min().item()
self.param[1] = data.max().item()
if name is not None:
None
else:
raise NotImplementedError()
def recover_threshold(self, x):
if self.distribution is None:
return x
elif self.distribution == 'normal':
return x * float(self.param[1]) + float(self.param[0])
elif self.distribution == 'uniform':
return x * float(self.param[1] - self.param[0] + 1e-05) + float(
self.param[0])
else:
raise NotImplementedError()
def init_thresholds(self, x):
if self.distribution is None:
nn.init.normal_(x, 0, 1)
elif self.distribution == 'normal':
nn.init.normal_(x, 0, 1)
elif self.distribution == 'uniform':
nn.init.uniform_(x, 0, 1)
else:
raise NotImplementedError()
class SoftCmp(nn.Module):
"""
Sigmoid((x - y) / e^beta)
"""
def __init__(self):
super().__init__()
self.sigmoid = nn.Sigmoid()
def forward(self, x, y, beta):
return self.sigmoid((x - y) / math.exp(beta))
class Inequality(nn.Module):
def __init__(self, out_dim=1, distribution=None, **kwargs):
super().__init__()
self.out_dim = out_dim
self.thresholds = nn.Parameter(torch.zeros(out_dim), requires_grad=True
)
self.distribution = distribution
self.normalize = Normalize(distribution)
self.cmp = SoftCmp()
self.normalize.init_thresholds(self.thresholds)
def forward(self, states, beta=0, **kwargs):
"""
:param states: [batch, length, n_agents, ... ]
"""
states_expand = states.view(*(states.size() + (1,)))
estimate_parameters = 'estimate_parameters' in kwargs and kwargs[
'estimate_parameters']
states_expand = self.normalize(states_expand, keep_data=
estimate_parameters)
return self.cmp(states_expand, self.thresholds.view(*([1] * len(
states.size()) + [self.out_dim])), beta)
def reset_parameters(self, parameter_name, name=None):
if parameter_name == 'primitive_inequality':
self.normalize.reset_parameters(name=name)
self.normalize.init_thresholds(self.thresholds)
def get_descriptions(self, name='Inequality'):
theta = self.thresholds.detach().cpu().view(self.out_dim)
descroptions = []
for k in range(theta.size(0)):
t = self.normalize.recover_threshold(theta[k])
if 'speed' in name:
t = t * 8
if 'acc' in name:
t = t * 64
descroptions.append('%s > %.2lf' % (name, t))
return descroptions
class N_aryPrimitivesPredefined(nn.Module):
def __init__(self):
super().__init__()
self.out_dim = 0
self.primitive_list = []
self.ineqs = nn.ModuleDict({})
def reset_parameters(self, parameter_name):
for k in self.primitive_list:
self.ineqs[k].reset_parameters(parameter_name, name=k)
def get_descriptions(self):
descriptions = []
for k in self.primitive_list:
descriptions += self.ineqs[k].get_descriptions(name=k)
return descriptions
class AlignDifferential(nn.Module):
def __init__(self):
super().__init__()
def new_length(self, length):
return length
def forward(self, states):
"""
:param states: [batch, length, *]
"""
padded_states = torch.cat([states[:, 0:1] * 2 - states[:, 1:2],
states, states[:, -1:] * 2 - states[:, -2:-1]], dim=1)
return (padded_states[:, 2:] - padded_states[:, :-2]) / 2
def show(self, name='AlignDifferential', indent=0, log=print, **kwargs):
log(' ' * indent + '- %s(x) = AlignDifferential()' % (name,))
class NullaryPrimitivesPredefined_v2New(N_aryPrimitivesPredefined):
def __init__(self, cmp_dim=10):
super().__init__()
self.differential = AlignDifferential()
self.primitive_list = ['ball_acc', 'ball_pos_z', 'ball_speed']
self.ineqs.update({'ball_acc': Inequality(out_dim=cmp_dim,
distribution='normal'), 'ball_pos_z': Inequality(out_dim=
cmp_dim, distribution='uniform'), 'ball_speed': Inequality(
out_dim=cmp_dim, distribution='normal')})
self.out_dim = sum([self.ineqs[k].out_dim for k in self.primitive_list]
)
def forward(self, input_0):
primals_3 = self.ineqs.ball_acc.thresholds
primals_2 = self.ineqs.ball_acc.normalize.param
primals_5 = self.ineqs.ball_pos_z.thresholds
primals_4 = self.ineqs.ball_pos_z.normalize.param
primals_7 = self.ineqs.ball_speed.thresholds
primals_6 = self.ineqs.ball_speed.normalize.param
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
C-SUNSHINE/TOQ-Nets-PyTorch-Release
|
NullaryPrimitivesPredefined_v2
| false
| 17,151
|
[
"MIT"
] | 6
|
05e06bf633fb3c6b610dda9a5126ecd7af1db02f
|
https://github.com/C-SUNSHINE/TOQ-Nets-PyTorch-Release/tree/05e06bf633fb3c6b610dda9a5126ecd7af1db02f
|
MaxPoolPad
|
import torch
import torch.utils.data
import torch.nn as nn
import torch.backends.cudnn
class MaxPoolPad(nn.Module):
def __init__(self):
super(MaxPoolPad, self).__init__()
self.pad = nn.ZeroPad2d((1, 0, 1, 0))
self.pool = nn.MaxPool2d(3, stride=2, padding=1)
def forward(self, x):
x = self.pad(x)
x = self.pool(x)
x = x[:, :, 1:, 1:]
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
import torch.nn as nn
import torch.backends.cudnn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_0(in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 3 % 3
x0 = xindex % 3
x2 = xindex // 9
x4 = xindex
tmp0 = -1 + 2 * x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 5, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + 2 * x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = -2 + 2 * x1
tmp12 = tmp11 >= tmp1
tmp13 = -2 + 2 * x0
tmp14 = tmp13 >= tmp1
tmp15 = tmp12 & tmp14
tmp16 = tmp15 & tmp10
tmp17 = tl.load(in_ptr0 + (-10 + 2 * x0 + 8 * x1 + 16 * x2), tmp16 &
xmask, eviction_policy='evict_last', other=0.0)
tmp18 = tl.full(tmp17.shape, float('-inf'), tmp17.dtype)
tmp19 = tl.where(tmp10, tmp17, tmp18)
tmp20 = 2 * x0
tmp21 = tmp20 >= tmp1
tmp22 = tmp20 < tmp3
tmp23 = tmp21 & tmp22
tmp24 = tmp5 & tmp23
tmp25 = tmp12 & tmp7
tmp26 = tmp25 & tmp24
tmp27 = tl.load(in_ptr0 + (-9 + 2 * x0 + 8 * x1 + 16 * x2), tmp26 &
xmask, eviction_policy='evict_last', other=0.0)
tmp28 = tl.full(tmp27.shape, float('-inf'), tmp27.dtype)
tmp29 = tl.where(tmp24, tmp27, tmp28)
tmp30 = triton_helpers.maximum(tmp29, tmp19)
tmp31 = 1 + 2 * x0
tmp32 = tmp31 >= tmp1
tmp33 = tmp31 < tmp3
tmp34 = tmp32 & tmp33
tmp35 = tmp5 & tmp34
tmp36 = tmp12 & tmp21
tmp37 = tmp36 & tmp35
tmp38 = tl.load(in_ptr0 + (-8 + 2 * x0 + 8 * x1 + 16 * x2), tmp37 &
xmask, eviction_policy='evict_last', other=0.0)
tmp39 = tl.full(tmp38.shape, float('-inf'), tmp38.dtype)
tmp40 = tl.where(tmp35, tmp38, tmp39)
tmp41 = triton_helpers.maximum(tmp40, tmp30)
tmp42 = 2 * x1
tmp43 = tmp42 >= tmp1
tmp44 = tmp42 < tmp3
tmp45 = tmp43 & tmp44
tmp46 = tmp45 & tmp9
tmp47 = tmp2 & tmp14
tmp48 = tmp47 & tmp46
tmp49 = tl.load(in_ptr0 + (-6 + 2 * x0 + 8 * x1 + 16 * x2), tmp48 &
xmask, eviction_policy='evict_last', other=0.0)
tmp50 = tl.full(tmp49.shape, float('-inf'), tmp49.dtype)
tmp51 = tl.where(tmp46, tmp49, tmp50)
tmp52 = triton_helpers.maximum(tmp51, tmp41)
tmp53 = tmp45 & tmp23
tmp54 = tmp2 & tmp7
tmp55 = tmp54 & tmp53
tmp56 = tl.load(in_ptr0 + (-5 + 2 * x0 + 8 * x1 + 16 * x2), tmp55 &
xmask, eviction_policy='evict_last', other=0.0)
tmp57 = tl.full(tmp56.shape, float('-inf'), tmp56.dtype)
tmp58 = tl.where(tmp53, tmp56, tmp57)
tmp59 = triton_helpers.maximum(tmp58, tmp52)
tmp60 = tmp45 & tmp34
tmp61 = tmp2 & tmp21
tmp62 = tmp61 & tmp60
tmp63 = tl.load(in_ptr0 + (-4 + 2 * x0 + 8 * x1 + 16 * x2), tmp62 &
xmask, eviction_policy='evict_last', other=0.0)
tmp64 = tl.full(tmp63.shape, float('-inf'), tmp63.dtype)
tmp65 = tl.where(tmp60, tmp63, tmp64)
tmp66 = triton_helpers.maximum(tmp65, tmp59)
tmp67 = 1 + 2 * x1
tmp68 = tmp67 >= tmp1
tmp69 = tmp67 < tmp3
tmp70 = tmp68 & tmp69
tmp71 = tmp70 & tmp9
tmp72 = tmp43 & tmp14
tmp73 = tmp72 & tmp71
tmp74 = tl.load(in_ptr0 + (-2 + 2 * x0 + 8 * x1 + 16 * x2), tmp73 &
xmask, eviction_policy='evict_last', other=0.0)
tmp75 = tl.full(tmp74.shape, float('-inf'), tmp74.dtype)
tmp76 = tl.where(tmp71, tmp74, tmp75)
tmp77 = triton_helpers.maximum(tmp76, tmp66)
tmp78 = tmp70 & tmp23
tmp79 = tmp43 & tmp7
tmp80 = tmp79 & tmp78
tmp81 = tl.load(in_ptr0 + (-1 + 2 * x0 + 8 * x1 + 16 * x2), tmp80 &
xmask, eviction_policy='evict_last', other=0.0)
tmp82 = tl.full(tmp81.shape, float('-inf'), tmp81.dtype)
tmp83 = tl.where(tmp78, tmp81, tmp82)
tmp84 = triton_helpers.maximum(tmp83, tmp77)
tmp85 = tmp70 & tmp34
tmp86 = tmp43 & tmp21
tmp87 = tmp86 & tmp85
tmp88 = tl.load(in_ptr0 + (2 * x0 + 8 * x1 + 16 * x2), tmp87 & xmask,
eviction_policy='evict_last', other=0.0)
tmp89 = tl.full(tmp88.shape, float('-inf'), tmp88.dtype)
tmp90 = tl.where(tmp85, tmp88, tmp89)
tmp91 = triton_helpers.maximum(tmp90, tmp84)
tl.store(out_ptr0 + x4, tmp91, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_0[grid(144)](
arg0_1, buf0, 144, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 4, 2, 2), (36, 9, 3, 1), 4),
class MaxPoolPadNew(nn.Module):
def __init__(self):
super(MaxPoolPadNew, self).__init__()
self.pad = nn.ZeroPad2d((1, 0, 1, 0))
self.pool = nn.MaxPool2d(3, stride=2, padding=1)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
CalebEverett/fastai-dl2
|
MaxPoolPad
| false
| 17,152
|
[
"Apache-2.0"
] | 4
|
64d23592eddca6ca1f3647e73c319e97c8eb392b
|
https://github.com/CalebEverett/fastai-dl2/tree/64d23592eddca6ca1f3647e73c319e97c8eb392b
|
MaxPool
|
import torch
import torch.nn as nn
import torch.utils.data
class MaxPool(nn.Module):
def __init__(self, kernel_size, stride=1, padding=1, zero_pad=False):
super(MaxPool, self).__init__()
self.zero_pad = nn.ZeroPad2d((1, 0, 1, 0)) if zero_pad else None
self.pool = nn.MaxPool2d(kernel_size, stride=stride, padding=padding)
def forward(self, x):
if self.zero_pad:
x = self.zero_pad(x)
x = self.pool(x)
if self.zero_pad:
x = x[:, :, 1:, 1:]
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'kernel_size': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 3 % 3
x0 = xindex % 3
x2 = xindex // 9
x4 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask,
other=float('-inf'))
tmp12 = x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-4 + x0 + 4 * x1 + 16 * x2), tmp16 & xmask,
other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + x0
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-3 + x0 + 4 * x1 + 16 * x2), tmp23 & xmask,
other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 2 + x0
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp5 & tmp29
tmp31 = tl.load(in_ptr0 + (-2 + x0 + 4 * x1 + 16 * x2), tmp30 & xmask,
other=float('-inf'))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = x1
tmp34 = tmp33 >= tmp1
tmp35 = tmp33 < tmp3
tmp36 = tmp34 & tmp35
tmp37 = tmp36 & tmp9
tmp38 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1 + 16 * x2), tmp37 & xmask,
other=float('-inf'))
tmp39 = triton_helpers.maximum(tmp38, tmp32)
tmp40 = tmp36 & tmp15
tmp41 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp40 & xmask, other
=float('-inf'))
tmp42 = triton_helpers.maximum(tmp41, tmp39)
tmp43 = tmp36 & tmp22
tmp44 = tl.load(in_ptr0 + (1 + x0 + 4 * x1 + 16 * x2), tmp43 & xmask,
other=float('-inf'))
tmp45 = triton_helpers.maximum(tmp44, tmp42)
tmp46 = tmp36 & tmp29
tmp47 = tl.load(in_ptr0 + (2 + x0 + 4 * x1 + 16 * x2), tmp46 & xmask,
other=float('-inf'))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = 1 + x1
tmp50 = tmp49 >= tmp1
tmp51 = tmp49 < tmp3
tmp52 = tmp50 & tmp51
tmp53 = tmp52 & tmp9
tmp54 = tl.load(in_ptr0 + (3 + x0 + 4 * x1 + 16 * x2), tmp53 & xmask,
other=float('-inf'))
tmp55 = triton_helpers.maximum(tmp54, tmp48)
tmp56 = tmp52 & tmp15
tmp57 = tl.load(in_ptr0 + (4 + x0 + 4 * x1 + 16 * x2), tmp56 & xmask,
other=float('-inf'))
tmp58 = triton_helpers.maximum(tmp57, tmp55)
tmp59 = tmp52 & tmp22
tmp60 = tl.load(in_ptr0 + (5 + x0 + 4 * x1 + 16 * x2), tmp59 & xmask,
other=float('-inf'))
tmp61 = triton_helpers.maximum(tmp60, tmp58)
tmp62 = tmp52 & tmp29
tmp63 = tl.load(in_ptr0 + (6 + x0 + 4 * x1 + 16 * x2), tmp62 & xmask,
other=float('-inf'))
tmp64 = triton_helpers.maximum(tmp63, tmp61)
tmp65 = 2 + x1
tmp66 = tmp65 >= tmp1
tmp67 = tmp65 < tmp3
tmp68 = tmp66 & tmp67
tmp69 = tmp68 & tmp9
tmp70 = tl.load(in_ptr0 + (7 + x0 + 4 * x1 + 16 * x2), tmp69 & xmask,
other=float('-inf'))
tmp71 = triton_helpers.maximum(tmp70, tmp64)
tmp72 = tmp68 & tmp15
tmp73 = tl.load(in_ptr0 + (8 + x0 + 4 * x1 + 16 * x2), tmp72 & xmask,
other=float('-inf'))
tmp74 = triton_helpers.maximum(tmp73, tmp71)
tmp75 = tmp68 & tmp22
tmp76 = tl.load(in_ptr0 + (9 + x0 + 4 * x1 + 16 * x2), tmp75 & xmask,
other=float('-inf'))
tmp77 = triton_helpers.maximum(tmp76, tmp74)
tmp78 = tmp68 & tmp29
tmp79 = tl.load(in_ptr0 + (10 + x0 + 4 * x1 + 16 * x2), tmp78 & xmask,
other=float('-inf'))
tmp80 = triton_helpers.maximum(tmp79, tmp77)
tl.store(out_ptr0 + x4, tmp80, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0[grid(144)](arg0_1, buf0,
144, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class MaxPoolNew(nn.Module):
def __init__(self, kernel_size, stride=1, padding=1, zero_pad=False):
super(MaxPoolNew, self).__init__()
self.zero_pad = nn.ZeroPad2d((1, 0, 1, 0)) if zero_pad else None
self.pool = nn.MaxPool2d(kernel_size, stride=stride, padding=padding)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
BigFishMaster/tnt
|
MaxPool
| false
| 17,153
|
[
"BSD-3-Clause"
] | 3
|
8b80bb3b194eb87ac18924428ef0924c2fb263c5
|
https://github.com/BigFishMaster/tnt/tree/8b80bb3b194eb87ac18924428ef0924c2fb263c5
|
UnaryPrimitivesToyotaJoint
|
import math
import torch
from torch import nn
class Normalize(nn.Module):
def __init__(self, distribution=None, **kwargs):
super().__init__()
self.distribution = distribution
self.data_ = []
if distribution is None:
pass
elif distribution == 'normal':
mean = kwargs['mean'] if 'mean' in kwargs else 0
std = kwargs['std'] if 'std' in kwargs else 1
self.param = nn.Parameter(torch.Tensor([mean, std]), False)
elif distribution == 'uniform':
vmin = kwargs['minv'] if 'minv' in kwargs else 0
vmax = kwargs['maxv'] if 'maxv' in kwargs else 1
self.param = nn.Parameter(torch.Tensor([vmin, vmax]), False)
else:
raise NotImplementedError()
def forward(self, x, keep_data=False):
if keep_data:
self.data_.append(x.detach().cpu().view(-1))
return x
if self.distribution is None:
return x
elif self.distribution == 'normal':
mean = self.param[0]
std = self.param[1]
return (x - mean) / std
elif self.distribution == 'uniform':
vmin = self.param[0]
vmax = self.param[1]
return (x - vmin) / (vmax - vmin + 1e-05)
else:
raise NotImplementedError()
def reset_parameters(self, name=None):
assert len(self.data_) > 0
data = torch.cat(self.data_)
self.data_ = []
if self.distribution is None:
pass
elif self.distribution == 'normal':
with torch.no_grad():
self.param[0] = data.mean().item()
self.param[1] = data.std().item()
if name is not None:
None
elif self.distribution == 'uniform':
with torch.no_grad():
self.param[0] = data.min().item()
self.param[1] = data.max().item()
if name is not None:
None
else:
raise NotImplementedError()
def recover_threshold(self, x):
if self.distribution is None:
return x
elif self.distribution == 'normal':
return x * float(self.param[1]) + float(self.param[0])
elif self.distribution == 'uniform':
return x * float(self.param[1] - self.param[0] + 1e-05) + float(
self.param[0])
else:
raise NotImplementedError()
def init_thresholds(self, x):
if self.distribution is None:
nn.init.normal_(x, 0, 1)
elif self.distribution == 'normal':
nn.init.normal_(x, 0, 1)
elif self.distribution == 'uniform':
nn.init.uniform_(x, 0, 1)
else:
raise NotImplementedError()
class SoftCmp(nn.Module):
"""
Sigmoid((x - y) / e^beta)
"""
def __init__(self):
super().__init__()
self.sigmoid = nn.Sigmoid()
def forward(self, x, y, beta):
return self.sigmoid((x - y) / math.exp(beta))
class Inequality(nn.Module):
def __init__(self, out_dim=1, distribution=None, **kwargs):
super().__init__()
self.out_dim = out_dim
self.thresholds = nn.Parameter(torch.zeros(out_dim), requires_grad=True
)
self.distribution = distribution
self.normalize = Normalize(distribution)
self.cmp = SoftCmp()
self.normalize.init_thresholds(self.thresholds)
def forward(self, states, beta=0, **kwargs):
"""
:param states: [batch, length, n_agents, ... ]
"""
states_expand = states.view(*(states.size() + (1,)))
estimate_parameters = 'estimate_parameters' in kwargs and kwargs[
'estimate_parameters']
states_expand = self.normalize(states_expand, keep_data=
estimate_parameters)
return self.cmp(states_expand, self.thresholds.view(*([1] * len(
states.size()) + [self.out_dim])), beta)
def reset_parameters(self, parameter_name, name=None):
if parameter_name == 'primitive_inequality':
self.normalize.reset_parameters(name=name)
self.normalize.init_thresholds(self.thresholds)
def get_descriptions(self, name='Inequality'):
theta = self.thresholds.detach().cpu().view(self.out_dim)
descroptions = []
for k in range(theta.size(0)):
t = self.normalize.recover_threshold(theta[k])
if 'speed' in name:
t = t * 8
if 'acc' in name:
t = t * 64
descroptions.append('%s > %.2lf' % (name, t))
return descroptions
class AlignDifferential(nn.Module):
def __init__(self):
super().__init__()
def new_length(self, length):
return length
def forward(self, states):
"""
:param states: [batch, length, *]
"""
padded_states = torch.cat([states[:, 0:1] * 2 - states[:, 1:2],
states, states[:, -1:] * 2 - states[:, -2:-1]], dim=1)
return (padded_states[:, 2:] - padded_states[:, :-2]) / 2
def show(self, name='AlignDifferential', indent=0, log=print, **kwargs):
log(' ' * indent + '- %s(x) = AlignDifferential()' % (name,))
class N_aryPrimitivesToyotaJoint(nn.Module):
def __init__(self):
super().__init__()
self.out_dim = 0
self.primitive_list = []
self.ineqs = nn.ModuleDict({})
def reset_parameters(self, parameter_name):
for k in self.primitive_list:
self.ineqs[k].reset_parameters(parameter_name, name=k)
class UnaryPrimitivesToyotaJoint(N_aryPrimitivesToyotaJoint):
def __init__(self, cmp_dim=10):
super().__init__()
self.differential = AlignDifferential()
self.primitive_list = ['pos_x', 'pos_y', 'pos_z']
self.ineqs.update({'pos_x': Inequality(out_dim=cmp_dim,
distribution='uniform'), 'pos_y': Inequality(out_dim=cmp_dim,
distribution='uniform'), 'pos_z': Inequality(out_dim=cmp_dim,
distribution='uniform')})
self.out_dim = sum([self.ineqs[k].out_dim for k in self.primitive_list]
)
def forward(self, states, beta=0, **kwargs):
"""
:param states: [batch, length, n_joints, state_dim]
return [batch, length, n_joints, out_dim]
"""
velocity = self.differential(states)
acc = self.differential(velocity)
ineqs_inputs = {'pos_x': states[:, :, :, 0], 'pos_y': states[:, :,
:, 1], 'pos_z': states[:, :, :, 2], 'speed': torch.norm(
velocity, p=2, dim=3), 'acc': torch.norm(acc, p=2, dim=3)}
output = torch.cat([self.ineqs[k](ineqs_inputs[k], beta, name=k, **
kwargs) for k in self.primitive_list], dim=-1)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1920
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 30
x1 = xindex // 30
x2 = xindex
tmp6 = tl.load(in_ptr1 + 0)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp9 = tl.load(in_ptr1 + 1)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK])
tmp27 = tl.load(in_ptr3 + 0)
tmp28 = tl.broadcast_to(tmp27, [XBLOCK])
tmp30 = tl.load(in_ptr3 + 1)
tmp31 = tl.broadcast_to(tmp30, [XBLOCK])
tmp45 = tl.load(in_ptr5 + 0)
tmp46 = tl.broadcast_to(tmp45, [XBLOCK])
tmp48 = tl.load(in_ptr5 + 1)
tmp49 = tl.broadcast_to(tmp48, [XBLOCK])
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 10, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + 4 * x1, tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp8 = tmp5 - tmp7
tmp11 = tmp10 - tmp7
tmp12 = 1e-05
tmp13 = tmp11 + tmp12
tmp14 = tmp8 / tmp13
tmp15 = tl.load(in_ptr2 + x0, tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp16 = tmp14 - tmp15
tmp17 = 1.0
tmp18 = tmp16 * tmp17
tmp19 = tl.sigmoid(tmp18)
tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype)
tmp21 = tl.where(tmp4, tmp19, tmp20)
tmp22 = tmp0 >= tmp3
tmp23 = tl.full([1], 20, tl.int64)
tmp24 = tmp0 < tmp23
tmp25 = tmp22 & tmp24
tmp26 = tl.load(in_ptr0 + (1 + 4 * x1), tmp25 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp29 = tmp26 - tmp28
tmp32 = tmp31 - tmp28
tmp33 = tmp32 + tmp12
tmp34 = tmp29 / tmp33
tmp35 = tl.load(in_ptr4 + (-10 + x0), tmp25 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp36 = tmp34 - tmp35
tmp37 = tmp36 * tmp17
tmp38 = tl.sigmoid(tmp37)
tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype)
tmp40 = tl.where(tmp25, tmp38, tmp39)
tmp41 = tmp0 >= tmp23
tl.full([1], 30, tl.int64)
tmp44 = tl.load(in_ptr0 + (2 + 4 * x1), tmp41 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp47 = tmp44 - tmp46
tmp50 = tmp49 - tmp46
tmp51 = tmp50 + tmp12
tmp52 = tmp47 / tmp51
tmp53 = tl.load(in_ptr6 + (-20 + x0), tmp41 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp54 = tmp52 - tmp53
tmp55 = tmp54 * tmp17
tmp56 = tl.sigmoid(tmp55)
tmp57 = tl.full(tmp56.shape, 0.0, tmp56.dtype)
tmp58 = tl.where(tmp41, tmp56, tmp57)
tmp59 = tl.where(tmp25, tmp40, tmp58)
tmp60 = tl.where(tmp4, tmp21, tmp59)
tl.store(out_ptr0 + x2, tmp60, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2,), (1,))
assert_size_stride(primals_3, (10,), (1,))
assert_size_stride(primals_4, (2,), (1,))
assert_size_stride(primals_5, (10,), (1,))
assert_size_stride(primals_6, (2,), (1,))
assert_size_stride(primals_7, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 30), (480, 120, 30, 1), torch.
float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(1920)](primals_1, primals_2, primals_3,
primals_4, primals_5, primals_6, primals_7, buf0, 1920, XBLOCK=
256, num_warps=4, num_stages=1)
return (buf0, primals_1, primals_2, primals_3, primals_4, primals_5,
primals_6, primals_7)
class Normalize(nn.Module):
def __init__(self, distribution=None, **kwargs):
super().__init__()
self.distribution = distribution
self.data_ = []
if distribution is None:
pass
elif distribution == 'normal':
mean = kwargs['mean'] if 'mean' in kwargs else 0
std = kwargs['std'] if 'std' in kwargs else 1
self.param = nn.Parameter(torch.Tensor([mean, std]), False)
elif distribution == 'uniform':
vmin = kwargs['minv'] if 'minv' in kwargs else 0
vmax = kwargs['maxv'] if 'maxv' in kwargs else 1
self.param = nn.Parameter(torch.Tensor([vmin, vmax]), False)
else:
raise NotImplementedError()
def forward(self, x, keep_data=False):
if keep_data:
self.data_.append(x.detach().cpu().view(-1))
return x
if self.distribution is None:
return x
elif self.distribution == 'normal':
mean = self.param[0]
std = self.param[1]
return (x - mean) / std
elif self.distribution == 'uniform':
vmin = self.param[0]
vmax = self.param[1]
return (x - vmin) / (vmax - vmin + 1e-05)
else:
raise NotImplementedError()
def reset_parameters(self, name=None):
assert len(self.data_) > 0
data = torch.cat(self.data_)
self.data_ = []
if self.distribution is None:
pass
elif self.distribution == 'normal':
with torch.no_grad():
self.param[0] = data.mean().item()
self.param[1] = data.std().item()
if name is not None:
None
elif self.distribution == 'uniform':
with torch.no_grad():
self.param[0] = data.min().item()
self.param[1] = data.max().item()
if name is not None:
None
else:
raise NotImplementedError()
def recover_threshold(self, x):
if self.distribution is None:
return x
elif self.distribution == 'normal':
return x * float(self.param[1]) + float(self.param[0])
elif self.distribution == 'uniform':
return x * float(self.param[1] - self.param[0] + 1e-05) + float(
self.param[0])
else:
raise NotImplementedError()
def init_thresholds(self, x):
if self.distribution is None:
nn.init.normal_(x, 0, 1)
elif self.distribution == 'normal':
nn.init.normal_(x, 0, 1)
elif self.distribution == 'uniform':
nn.init.uniform_(x, 0, 1)
else:
raise NotImplementedError()
class SoftCmp(nn.Module):
"""
Sigmoid((x - y) / e^beta)
"""
def __init__(self):
super().__init__()
self.sigmoid = nn.Sigmoid()
def forward(self, x, y, beta):
return self.sigmoid((x - y) / math.exp(beta))
class Inequality(nn.Module):
def __init__(self, out_dim=1, distribution=None, **kwargs):
super().__init__()
self.out_dim = out_dim
self.thresholds = nn.Parameter(torch.zeros(out_dim), requires_grad=True
)
self.distribution = distribution
self.normalize = Normalize(distribution)
self.cmp = SoftCmp()
self.normalize.init_thresholds(self.thresholds)
def forward(self, states, beta=0, **kwargs):
"""
:param states: [batch, length, n_agents, ... ]
"""
states_expand = states.view(*(states.size() + (1,)))
estimate_parameters = 'estimate_parameters' in kwargs and kwargs[
'estimate_parameters']
states_expand = self.normalize(states_expand, keep_data=
estimate_parameters)
return self.cmp(states_expand, self.thresholds.view(*([1] * len(
states.size()) + [self.out_dim])), beta)
def reset_parameters(self, parameter_name, name=None):
if parameter_name == 'primitive_inequality':
self.normalize.reset_parameters(name=name)
self.normalize.init_thresholds(self.thresholds)
def get_descriptions(self, name='Inequality'):
theta = self.thresholds.detach().cpu().view(self.out_dim)
descroptions = []
for k in range(theta.size(0)):
t = self.normalize.recover_threshold(theta[k])
if 'speed' in name:
t = t * 8
if 'acc' in name:
t = t * 64
descroptions.append('%s > %.2lf' % (name, t))
return descroptions
class AlignDifferential(nn.Module):
def __init__(self):
super().__init__()
def new_length(self, length):
return length
def forward(self, states):
"""
:param states: [batch, length, *]
"""
padded_states = torch.cat([states[:, 0:1] * 2 - states[:, 1:2],
states, states[:, -1:] * 2 - states[:, -2:-1]], dim=1)
return (padded_states[:, 2:] - padded_states[:, :-2]) / 2
def show(self, name='AlignDifferential', indent=0, log=print, **kwargs):
log(' ' * indent + '- %s(x) = AlignDifferential()' % (name,))
class N_aryPrimitivesToyotaJoint(nn.Module):
def __init__(self):
super().__init__()
self.out_dim = 0
self.primitive_list = []
self.ineqs = nn.ModuleDict({})
def reset_parameters(self, parameter_name):
for k in self.primitive_list:
self.ineqs[k].reset_parameters(parameter_name, name=k)
class UnaryPrimitivesToyotaJointNew(N_aryPrimitivesToyotaJoint):
def __init__(self, cmp_dim=10):
super().__init__()
self.differential = AlignDifferential()
self.primitive_list = ['pos_x', 'pos_y', 'pos_z']
self.ineqs.update({'pos_x': Inequality(out_dim=cmp_dim,
distribution='uniform'), 'pos_y': Inequality(out_dim=cmp_dim,
distribution='uniform'), 'pos_z': Inequality(out_dim=cmp_dim,
distribution='uniform')})
self.out_dim = sum([self.ineqs[k].out_dim for k in self.primitive_list]
)
def forward(self, input_0):
primals_3 = self.ineqs.pos_x.thresholds
primals_2 = self.ineqs.pos_x.normalize.param
primals_5 = self.ineqs.pos_y.thresholds
primals_4 = self.ineqs.pos_y.normalize.param
primals_7 = self.ineqs.pos_z.thresholds
primals_6 = self.ineqs.pos_z.normalize.param
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
C-SUNSHINE/TOQ-Nets-PyTorch-Release
|
UnaryPrimitivesToyotaJoint
| false
| 17,154
|
[
"MIT"
] | 6
|
05e06bf633fb3c6b610dda9a5126ecd7af1db02f
|
https://github.com/C-SUNSHINE/TOQ-Nets-PyTorch-Release/tree/05e06bf633fb3c6b610dda9a5126ecd7af1db02f
|
MaxPoolPad
|
import torch
import torch.nn as nn
import torch.utils.data
class MaxPoolPad(nn.Module):
def __init__(self):
super(MaxPoolPad, self).__init__()
self.pad = nn.ZeroPad2d((1, 0, 1, 0))
self.pool = nn.MaxPool2d(3, stride=2, padding=1)
def forward(self, x):
x = self.pad(x)
x = self.pool(x)
x = x[:, :, 1:, 1:].contiguous()
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_0(in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 3 % 3
x0 = xindex % 3
x2 = xindex // 9
x4 = xindex
tmp0 = -1 + 2 * x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 5, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + 2 * x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = -2 + 2 * x1
tmp12 = tmp11 >= tmp1
tmp13 = -2 + 2 * x0
tmp14 = tmp13 >= tmp1
tmp15 = tmp12 & tmp14
tmp16 = tmp15 & tmp10
tmp17 = tl.load(in_ptr0 + (-10 + 2 * x0 + 8 * x1 + 16 * x2), tmp16 &
xmask, eviction_policy='evict_last', other=0.0)
tmp18 = tl.full(tmp17.shape, float('-inf'), tmp17.dtype)
tmp19 = tl.where(tmp10, tmp17, tmp18)
tmp20 = 2 * x0
tmp21 = tmp20 >= tmp1
tmp22 = tmp20 < tmp3
tmp23 = tmp21 & tmp22
tmp24 = tmp5 & tmp23
tmp25 = tmp12 & tmp7
tmp26 = tmp25 & tmp24
tmp27 = tl.load(in_ptr0 + (-9 + 2 * x0 + 8 * x1 + 16 * x2), tmp26 &
xmask, eviction_policy='evict_last', other=0.0)
tmp28 = tl.full(tmp27.shape, float('-inf'), tmp27.dtype)
tmp29 = tl.where(tmp24, tmp27, tmp28)
tmp30 = triton_helpers.maximum(tmp29, tmp19)
tmp31 = 1 + 2 * x0
tmp32 = tmp31 >= tmp1
tmp33 = tmp31 < tmp3
tmp34 = tmp32 & tmp33
tmp35 = tmp5 & tmp34
tmp36 = tmp12 & tmp21
tmp37 = tmp36 & tmp35
tmp38 = tl.load(in_ptr0 + (-8 + 2 * x0 + 8 * x1 + 16 * x2), tmp37 &
xmask, eviction_policy='evict_last', other=0.0)
tmp39 = tl.full(tmp38.shape, float('-inf'), tmp38.dtype)
tmp40 = tl.where(tmp35, tmp38, tmp39)
tmp41 = triton_helpers.maximum(tmp40, tmp30)
tmp42 = 2 * x1
tmp43 = tmp42 >= tmp1
tmp44 = tmp42 < tmp3
tmp45 = tmp43 & tmp44
tmp46 = tmp45 & tmp9
tmp47 = tmp2 & tmp14
tmp48 = tmp47 & tmp46
tmp49 = tl.load(in_ptr0 + (-6 + 2 * x0 + 8 * x1 + 16 * x2), tmp48 &
xmask, eviction_policy='evict_last', other=0.0)
tmp50 = tl.full(tmp49.shape, float('-inf'), tmp49.dtype)
tmp51 = tl.where(tmp46, tmp49, tmp50)
tmp52 = triton_helpers.maximum(tmp51, tmp41)
tmp53 = tmp45 & tmp23
tmp54 = tmp2 & tmp7
tmp55 = tmp54 & tmp53
tmp56 = tl.load(in_ptr0 + (-5 + 2 * x0 + 8 * x1 + 16 * x2), tmp55 &
xmask, eviction_policy='evict_last', other=0.0)
tmp57 = tl.full(tmp56.shape, float('-inf'), tmp56.dtype)
tmp58 = tl.where(tmp53, tmp56, tmp57)
tmp59 = triton_helpers.maximum(tmp58, tmp52)
tmp60 = tmp45 & tmp34
tmp61 = tmp2 & tmp21
tmp62 = tmp61 & tmp60
tmp63 = tl.load(in_ptr0 + (-4 + 2 * x0 + 8 * x1 + 16 * x2), tmp62 &
xmask, eviction_policy='evict_last', other=0.0)
tmp64 = tl.full(tmp63.shape, float('-inf'), tmp63.dtype)
tmp65 = tl.where(tmp60, tmp63, tmp64)
tmp66 = triton_helpers.maximum(tmp65, tmp59)
tmp67 = 1 + 2 * x1
tmp68 = tmp67 >= tmp1
tmp69 = tmp67 < tmp3
tmp70 = tmp68 & tmp69
tmp71 = tmp70 & tmp9
tmp72 = tmp43 & tmp14
tmp73 = tmp72 & tmp71
tmp74 = tl.load(in_ptr0 + (-2 + 2 * x0 + 8 * x1 + 16 * x2), tmp73 &
xmask, eviction_policy='evict_last', other=0.0)
tmp75 = tl.full(tmp74.shape, float('-inf'), tmp74.dtype)
tmp76 = tl.where(tmp71, tmp74, tmp75)
tmp77 = triton_helpers.maximum(tmp76, tmp66)
tmp78 = tmp70 & tmp23
tmp79 = tmp43 & tmp7
tmp80 = tmp79 & tmp78
tmp81 = tl.load(in_ptr0 + (-1 + 2 * x0 + 8 * x1 + 16 * x2), tmp80 &
xmask, eviction_policy='evict_last', other=0.0)
tmp82 = tl.full(tmp81.shape, float('-inf'), tmp81.dtype)
tmp83 = tl.where(tmp78, tmp81, tmp82)
tmp84 = triton_helpers.maximum(tmp83, tmp77)
tmp85 = tmp70 & tmp34
tmp86 = tmp43 & tmp21
tmp87 = tmp86 & tmp85
tmp88 = tl.load(in_ptr0 + (2 * x0 + 8 * x1 + 16 * x2), tmp87 & xmask,
eviction_policy='evict_last', other=0.0)
tmp89 = tl.full(tmp88.shape, float('-inf'), tmp88.dtype)
tmp90 = tl.where(tmp85, tmp88, tmp89)
tmp91 = triton_helpers.maximum(tmp90, tmp84)
tl.store(out_ptr0 + x4, tmp91, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2 % 2
x2 = xindex // 4
x3 = xindex
tmp0 = tl.load(in_ptr0 + (4 + x0 + 3 * x1 + 9 * x2), xmask)
tl.store(out_ptr0 + x3, tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_0[grid(144)](
arg0_1, buf0, 144, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
triton_poi_fused_clone_1[grid(64)](buf0, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf0
return buf1,
class MaxPoolPadNew(nn.Module):
def __init__(self):
super(MaxPoolPadNew, self).__init__()
self.pad = nn.ZeroPad2d((1, 0, 1, 0))
self.pool = nn.MaxPool2d(3, stride=2, padding=1)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
BigFishMaster/tnt
|
MaxPoolPad
| false
| 17,155
|
[
"BSD-3-Clause"
] | 3
|
8b80bb3b194eb87ac18924428ef0924c2fb263c5
|
https://github.com/BigFishMaster/tnt/tree/8b80bb3b194eb87ac18924428ef0924c2fb263c5
|
SpaceTimeRegionalConv
|
import torch
from torch import nn
class SpaceTimeRegionalConv(nn.Module):
"""
Space Time Region Graph
"""
def __init__(self, input_dim, output_dim, t_kernel_size=1, t_stride=1,
t_padding=None, t_dilation=1, bias=True, residual=True):
super().__init__()
self.input_dim = input_dim
self.output_dim = output_dim
if t_padding is None:
t_padding = (t_kernel_size - 1) // 2
self.conv = nn.Conv2d(input_dim, output_dim, kernel_size=(
t_kernel_size, 1), padding=(t_padding, 0), stride=(t_stride, 1),
dilation=(t_dilation, 1), bias=bias)
if not residual:
self.residual = lambda x: 0
elif input_dim == output_dim and t_stride == 1:
self.residual = lambda x: x
else:
self.residual = nn.Sequential(nn.Conv2d(input_dim, output_dim,
kernel_size=1, stride=(t_stride, 1)))
self.relu = nn.ReLU()
def _get_graph(self, length, n_agents, device):
g = torch.zeros(length, n_agents, length, n_agents, device=device)
g = torch.max(g, torch.eye(length, length, device=device).view(
length, 1, length, 1))
g = torch.max(g, torch.eye(n_agents, n_agents, device=device).view(
1, n_agents, 1, n_agents))
return g.view(length * n_agents, length * n_agents)
def forward(self, x):
"""
:param x: [batch, length, n_agents, input_dim]
return output: [batch, new_length, n_agents, output_dim]
"""
batch, length, n_agents, _ = x.size()
x = x.permute(0, 3, 1, 2)
res = self.residual(x)
x = self.conv(x)
new_length = x.size(2)
g = self._get_graph(new_length, n_agents, x.device)
x = x.view(batch, self.output_dim, new_length * n_agents)
x = torch.einsum('ncv,vw->ncw', (x, g)).view(batch, self.output_dim,
new_length, n_agents) / (length + n_agents - 2) ** 0.5
x = x + res
return self.relu(x.permute(0, 2, 3, 1))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'output_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_maximum_zeros_1(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 64
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x0 = xindex % 4
x4 = xindex
tmp0 = x3
tmp1 = x1
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = x2
tmp8 = x0
tmp9 = tmp7 == tmp8
tmp10 = tl.where(tmp9, tmp3, tmp4)
tmp11 = triton_helpers.maximum(tmp6, tmp10)
tl.store(out_ptr0 + x4, tmp11, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0,
out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_out_ptr0 + (x2 + 16 * y3), xmask & ymask)
tmp3 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tmp1 = 0.4082482904638631
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.full([1, 1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = 0.0
tmp8 = tmp6 <= tmp7
tl.store(in_out_ptr0 + (x2 + 16 * y3), tmp6, xmask & ymask)
tl.store(out_ptr0 + (x2 + 16 * y3), tmp8, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4,
4, 4, 4), (64, 1, 16, 4), 0), primals_2, stride=(1, 1), padding
=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0,
0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 1, 16, 4))
buf1 = empty_strided_cuda((4, 4, 16, 1), (64, 16, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(16, 16)](buf0, primals_3, buf1, 16,
16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused_maximum_zeros_1[grid(256)](buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((1, 16, 16), (256, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf1, (1, 16, 16), (0, 16, 1),
0), reinterpret_tensor(buf2, (1, 16, 16), (0, 16, 1), 0), out=buf3)
del buf1
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 4, 1, 16), 0)
del buf3
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 4, 1, 16), torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(16, 16)](buf4,
primals_1, buf5, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4,
num_stages=1)
return buf4, primals_2, reinterpret_tensor(primals_1, (4, 4, 4, 4), (64,
1, 16, 4), 0), buf5, reinterpret_tensor(buf2, (1, 16, 16), (256, 1,
16), 0)
class SpaceTimeRegionalConvNew(nn.Module):
"""
Space Time Region Graph
"""
def __init__(self, input_dim, output_dim, t_kernel_size=1, t_stride=1,
t_padding=None, t_dilation=1, bias=True, residual=True):
super().__init__()
self.input_dim = input_dim
self.output_dim = output_dim
if t_padding is None:
t_padding = (t_kernel_size - 1) // 2
self.conv = nn.Conv2d(input_dim, output_dim, kernel_size=(
t_kernel_size, 1), padding=(t_padding, 0), stride=(t_stride, 1),
dilation=(t_dilation, 1), bias=bias)
if not residual:
self.residual = lambda x: 0
elif input_dim == output_dim and t_stride == 1:
self.residual = lambda x: x
else:
self.residual = nn.Sequential(nn.Conv2d(input_dim, output_dim,
kernel_size=1, stride=(t_stride, 1)))
self.relu = nn.ReLU()
def _get_graph(self, length, n_agents, device):
g = torch.zeros(length, n_agents, length, n_agents, device=device)
g = torch.max(g, torch.eye(length, length, device=device).view(
length, 1, length, 1))
g = torch.max(g, torch.eye(n_agents, n_agents, device=device).view(
1, n_agents, 1, n_agents))
return g.view(length * n_agents, length * n_agents)
def forward(self, input_0):
primals_2 = self.conv.weight
primals_3 = self.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
C-SUNSHINE/TOQ-Nets-PyTorch-Release
|
SpaceTimeRegionalConv
| false
| 17,156
|
[
"MIT"
] | 6
|
05e06bf633fb3c6b610dda9a5126ecd7af1db02f
|
https://github.com/C-SUNSHINE/TOQ-Nets-PyTorch-Release/tree/05e06bf633fb3c6b610dda9a5126ecd7af1db02f
|
Sigmoid
|
import torch
import torch.nn as nn
import torch.utils.data
from torch.nn import Sigmoid
class Sigmoid(nn.Module):
def __init__(self, inplace: 'bool'=False):
super(Sigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return x.sigmoid_() if self.inplace else x.sigmoid()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(out_ptr0 + x0, tmp1, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_sigmoid_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class SigmoidNew(nn.Module):
def __init__(self, inplace: 'bool'=False):
super(SigmoidNew, self).__init__()
self.inplace = inplace
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
BigFishMaster/tnt
|
Sigmoid
| false
| 17,157
|
[
"BSD-3-Clause"
] | 3
|
8b80bb3b194eb87ac18924428ef0924c2fb263c5
|
https://github.com/BigFishMaster/tnt/tree/8b80bb3b194eb87ac18924428ef0924c2fb263c5
|
AvgPoolPad
|
import torch
import torch.nn as nn
import torch.utils.data
class AvgPoolPad(nn.Module):
def __init__(self, stride=2, padding=1):
super(AvgPoolPad, self).__init__()
self.pad = nn.ZeroPad2d((1, 0, 1, 0))
self.pool = nn.AvgPool2d(3, stride=stride, padding=padding,
count_include_pad=False)
def forward(self, x):
x = self.pad(x)
x = self.pool(x)
x = x[:, :, 1:, 1:].contiguous()
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_avg_pool2d_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 3 % 3
x0 = xindex % 3
x2 = xindex // 9
x4 = xindex
tmp0 = -1 + 2 * x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 5, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + 2 * x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = -2 + 2 * x1
tmp12 = tmp11 >= tmp1
tmp13 = -2 + 2 * x0
tmp14 = tmp13 >= tmp1
tmp15 = tmp12 & tmp14
tmp16 = tmp15 & tmp10
tmp17 = tl.load(in_ptr0 + (-10 + 2 * x0 + 8 * x1 + 16 * x2), tmp16 &
xmask, eviction_policy='evict_last', other=0.0)
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp10, tmp17, tmp18)
tmp20 = 2 * x0
tmp21 = tmp20 >= tmp1
tmp22 = tmp20 < tmp3
tmp23 = tmp21 & tmp22
tmp24 = tmp5 & tmp23
tmp25 = tmp12 & tmp7
tmp26 = tmp25 & tmp24
tmp27 = tl.load(in_ptr0 + (-9 + 2 * x0 + 8 * x1 + 16 * x2), tmp26 &
xmask, eviction_policy='evict_last', other=0.0)
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp24, tmp27, tmp28)
tmp30 = tmp29 + tmp19
tmp31 = 1 + 2 * x0
tmp32 = tmp31 >= tmp1
tmp33 = tmp31 < tmp3
tmp34 = tmp32 & tmp33
tmp35 = tmp5 & tmp34
tmp36 = tmp12 & tmp21
tmp37 = tmp36 & tmp35
tmp38 = tl.load(in_ptr0 + (-8 + 2 * x0 + 8 * x1 + 16 * x2), tmp37 &
xmask, eviction_policy='evict_last', other=0.0)
tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype)
tmp40 = tl.where(tmp35, tmp38, tmp39)
tmp41 = tmp40 + tmp30
tmp42 = 2 * x1
tmp43 = tmp42 >= tmp1
tmp44 = tmp42 < tmp3
tmp45 = tmp43 & tmp44
tmp46 = tmp45 & tmp9
tmp47 = tmp2 & tmp14
tmp48 = tmp47 & tmp46
tmp49 = tl.load(in_ptr0 + (-6 + 2 * x0 + 8 * x1 + 16 * x2), tmp48 &
xmask, eviction_policy='evict_last', other=0.0)
tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype)
tmp51 = tl.where(tmp46, tmp49, tmp50)
tmp52 = tmp51 + tmp41
tmp53 = tmp45 & tmp23
tmp54 = tmp2 & tmp7
tmp55 = tmp54 & tmp53
tmp56 = tl.load(in_ptr0 + (-5 + 2 * x0 + 8 * x1 + 16 * x2), tmp55 &
xmask, eviction_policy='evict_last', other=0.0)
tmp57 = tl.full(tmp56.shape, 0.0, tmp56.dtype)
tmp58 = tl.where(tmp53, tmp56, tmp57)
tmp59 = tmp58 + tmp52
tmp60 = tmp45 & tmp34
tmp61 = tmp2 & tmp21
tmp62 = tmp61 & tmp60
tmp63 = tl.load(in_ptr0 + (-4 + 2 * x0 + 8 * x1 + 16 * x2), tmp62 &
xmask, eviction_policy='evict_last', other=0.0)
tmp64 = tl.full(tmp63.shape, 0.0, tmp63.dtype)
tmp65 = tl.where(tmp60, tmp63, tmp64)
tmp66 = tmp65 + tmp59
tmp67 = 1 + 2 * x1
tmp68 = tmp67 >= tmp1
tmp69 = tmp67 < tmp3
tmp70 = tmp68 & tmp69
tmp71 = tmp70 & tmp9
tmp72 = tmp43 & tmp14
tmp73 = tmp72 & tmp71
tmp74 = tl.load(in_ptr0 + (-2 + 2 * x0 + 8 * x1 + 16 * x2), tmp73 &
xmask, eviction_policy='evict_last', other=0.0)
tmp75 = tl.full(tmp74.shape, 0.0, tmp74.dtype)
tmp76 = tl.where(tmp71, tmp74, tmp75)
tmp77 = tmp76 + tmp66
tmp78 = tmp70 & tmp23
tmp79 = tmp43 & tmp7
tmp80 = tmp79 & tmp78
tmp81 = tl.load(in_ptr0 + (-1 + 2 * x0 + 8 * x1 + 16 * x2), tmp80 &
xmask, eviction_policy='evict_last', other=0.0)
tmp82 = tl.full(tmp81.shape, 0.0, tmp81.dtype)
tmp83 = tl.where(tmp78, tmp81, tmp82)
tmp84 = tmp83 + tmp77
tmp85 = tmp70 & tmp34
tmp86 = tmp43 & tmp21
tmp87 = tmp86 & tmp85
tmp88 = tl.load(in_ptr0 + (2 * x0 + 8 * x1 + 16 * x2), tmp87 & xmask,
eviction_policy='evict_last', other=0.0)
tmp89 = tl.full(tmp88.shape, 0.0, tmp88.dtype)
tmp90 = tl.where(tmp85, tmp88, tmp89)
tmp91 = tmp90 + tmp84
tmp92 = (0 * (0 >= -1 + 2 * x0) + (-1 + 2 * x0) * (-1 + 2 * x0 > 0)) * (
0 * (0 >= -1 + 2 * x1) + (-1 + 2 * x1) * (-1 + 2 * x1 > 0)) + (5 *
(5 <= 2 + 2 * x0) + (2 + 2 * x0) * (2 + 2 * x0 < 5)) * (5 * (5 <= 2 +
2 * x1) + (2 + 2 * x1) * (2 + 2 * x1 < 5)) + -1 * (0 * (0 >= -1 + 2 *
x0) + (-1 + 2 * x0) * (-1 + 2 * x0 > 0)) * (5 * (5 <= 2 + 2 * x1) +
(2 + 2 * x1) * (2 + 2 * x1 < 5)) + -1 * (0 * (0 >= -1 + 2 * x1) + (
-1 + 2 * x1) * (-1 + 2 * x1 > 0)) * (5 * (5 <= 2 + 2 * x0) + (2 + 2 *
x0) * (2 + 2 * x0 < 5))
tmp93 = tmp91 / tmp92
tl.store(out_ptr0 + x4, tmp93, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2 % 2
x2 = xindex // 4
x3 = xindex
tmp0 = tl.load(in_ptr0 + (4 + x0 + 3 * x1 + 9 * x2), xmask)
tl.store(out_ptr0 + x3, tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_avg_pool2d_constant_pad_nd_0[grid(144)](arg0_1,
buf0, 144, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
triton_poi_fused_clone_1[grid(64)](buf0, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf0
return buf1,
class AvgPoolPadNew(nn.Module):
def __init__(self, stride=2, padding=1):
super(AvgPoolPadNew, self).__init__()
self.pad = nn.ZeroPad2d((1, 0, 1, 0))
self.pool = nn.AvgPool2d(3, stride=stride, padding=padding,
count_include_pad=False)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
BigFishMaster/tnt
|
AvgPoolPad
| false
| 17,158
|
[
"BSD-3-Clause"
] | 3
|
8b80bb3b194eb87ac18924428ef0924c2fb263c5
|
https://github.com/BigFishMaster/tnt/tree/8b80bb3b194eb87ac18924428ef0924c2fb263c5
|
Tanh
|
import torch
import torch.nn as nn
import torch.utils.data
class Tanh(nn.Module):
def __init__(self, inplace: 'bool'=False):
super(Tanh, self).__init__()
self.inplace = inplace
def forward(self, x):
return x.tanh_() if self.inplace else x.tanh()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(out_ptr0 + x0, tmp1, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class TanhNew(nn.Module):
def __init__(self, inplace: 'bool'=False):
super(TanhNew, self).__init__()
self.inplace = inplace
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
BigFishMaster/tnt
|
Tanh
| false
| 17,159
|
[
"BSD-3-Clause"
] | 3
|
8b80bb3b194eb87ac18924428ef0924c2fb263c5
|
https://github.com/BigFishMaster/tnt/tree/8b80bb3b194eb87ac18924428ef0924c2fb263c5
|
ValueNetwork
|
import torch
import torch.nn.functional as F
import torch.nn as nn
def mish(x):
"""
Mish: A Self Regularized Non-Monotonic Neural Activation Function
https://arxiv.org/abs/1908.08681v1
implemented for PyTorch / FastAI by lessw2020
https://github.com/lessw2020/mish
param:
x: output of a layer of a neural network
return: mish activation function
"""
return torch.clamp(x * torch.tanh(F.softplus(x)), max=6)
class ValueNetwork(nn.Module):
def __init__(self, state_dim, hidden_dim, init_w=0.003):
super(ValueNetwork, self).__init__()
self.linear1 = nn.Linear(state_dim, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
self.linear2_3 = nn.Linear(hidden_dim, hidden_dim)
self.linear3 = nn.Linear(hidden_dim, 1)
self.linear1.weight.data.uniform_(-init_w, init_w)
self.linear1.bias.data.uniform_(-init_w, init_w)
self.linear2.weight.data.uniform_(-init_w, init_w)
self.linear2.bias.data.uniform_(-init_w, init_w)
self.linear2_3.weight.data.uniform_(-init_w, init_w)
self.linear2_3.bias.data.uniform_(-init_w, init_w)
self.linear3.weight.data.uniform_(-init_w, init_w)
self.linear3.bias.data.uniform_(-init_w, init_w)
def forward(self, state):
x = mish(self.linear1(state))
x = mish(self.linear2(x))
x = mish(self.linear2_3(x))
x = self.linear3(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4, 'hidden_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn.functional as F
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clamp_mul_softplus_tanh_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 20.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = libdevice.log1p(tmp3)
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp6 = libdevice.tanh(tmp5)
tmp7 = tmp0 * tmp6
tmp8 = 6.0
tmp9 = triton_helpers.minimum(tmp7, tmp8)
tl.store(out_ptr0 + x0, tmp9, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (1, 4), (4, 1))
assert_size_stride(primals_9, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_mul_softplus_tanh_0[grid(256)](buf0, buf1,
256, XBLOCK=128, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clamp_mul_softplus_tanh_0[grid(256)](buf2, buf3,
256, XBLOCK=128, num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clamp_mul_softplus_tanh_0[grid(256)](buf4, buf5,
256, XBLOCK=128, num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_8, (4, 1), (1, 4), 0),
alpha=1, beta=1, out=buf7)
del primals_9
return reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0
), buf2, reinterpret_tensor(buf3, (64, 4), (4, 1), 0
), buf4, reinterpret_tensor(buf5, (64, 4), (4, 1), 0
), primals_8, primals_6, primals_4
def mish(x):
"""
Mish: A Self Regularized Non-Monotonic Neural Activation Function
https://arxiv.org/abs/1908.08681v1
implemented for PyTorch / FastAI by lessw2020
https://github.com/lessw2020/mish
param:
x: output of a layer of a neural network
return: mish activation function
"""
return torch.clamp(x * torch.tanh(F.softplus(x)), max=6)
class ValueNetworkNew(nn.Module):
def __init__(self, state_dim, hidden_dim, init_w=0.003):
super(ValueNetworkNew, self).__init__()
self.linear1 = nn.Linear(state_dim, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
self.linear2_3 = nn.Linear(hidden_dim, hidden_dim)
self.linear3 = nn.Linear(hidden_dim, 1)
self.linear1.weight.data.uniform_(-init_w, init_w)
self.linear1.bias.data.uniform_(-init_w, init_w)
self.linear2.weight.data.uniform_(-init_w, init_w)
self.linear2.bias.data.uniform_(-init_w, init_w)
self.linear2_3.weight.data.uniform_(-init_w, init_w)
self.linear2_3.bias.data.uniform_(-init_w, init_w)
self.linear3.weight.data.uniform_(-init_w, init_w)
self.linear3.bias.data.uniform_(-init_w, init_w)
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_6 = self.linear2_3.weight
primals_7 = self.linear2_3.bias
primals_8 = self.linear3.weight
primals_9 = self.linear3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
Crawford-fang/ROS_pytorch_RL
|
ValueNetwork
| false
| 17,160
|
[
"Apache-2.0"
] | 10
|
2d3476f15d51aa1f5b5ae9edc5d7f4c776e5de9f
|
https://github.com/Crawford-fang/ROS_pytorch_RL/tree/2d3476f15d51aa1f5b5ae9edc5d7f4c776e5de9f
|
AdaptiveConcatPool2d
|
import torch
import torch.utils.data
import torch.nn as nn
import torch.backends.cudnn
class AdaptiveConcatPool2d(nn.Module):
def __init__(self, sz=None):
super().__init__()
sz = sz or (1, 1)
self.ap = nn.AdaptiveAvgPool2d(sz)
self.mp = nn.AdaptiveMaxPool2d(sz)
def forward(self, x):
return torch.cat([self.mp(x), self.ap(x)], 1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
import torch.nn as nn
import torch.backends.cudnn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_adaptive_max_pool2d_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + 16 * x2, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 16 * x2), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (2 + 16 * x2), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (3 + 16 * x2), xmask, eviction_policy='evict_last'
)
tmp7 = tl.load(in_ptr0 + (4 + 16 * x2), xmask, eviction_policy='evict_last'
)
tmp9 = tl.load(in_ptr0 + (5 + 16 * x2), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (6 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr0 + (7 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (8 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr0 + (9 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr0 + (10 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr0 + (11 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (12 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr0 + (13 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr0 + (14 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp29 = tl.load(in_ptr0 + (15 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp20 = triton_helpers.maximum(tmp19, tmp18)
tmp22 = triton_helpers.maximum(tmp21, tmp20)
tmp24 = triton_helpers.maximum(tmp23, tmp22)
tmp26 = triton_helpers.maximum(tmp25, tmp24)
tmp28 = triton_helpers.maximum(tmp27, tmp26)
tmp30 = triton_helpers.maximum(tmp29, tmp28)
tl.store(out_ptr0 + (x0 + 8 * x1), tmp30, xmask)
@triton.jit
def triton_per_fused_mean_1(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.
constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
x3 = xindex // 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.store(out_ptr1 + (x2 + 8 * x3), tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf3 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 1, 1), torch.float32)
buf0 = reinterpret_tensor(buf3, (4, 4, 1, 1), (8, 1, 1, 1), 0)
get_raw_stream(0)
triton_poi_fused_adaptive_max_pool2d_0[grid(16)](arg0_1, buf0, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf2 = reinterpret_tensor(buf3, (4, 4, 1, 1), (8, 1, 1, 1), 4)
triton_per_fused_mean_1[grid(16)](arg0_1, buf2, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del arg0_1
return buf3,
class AdaptiveConcatPool2dNew(nn.Module):
def __init__(self, sz=None):
super().__init__()
sz = sz or (1, 1)
self.ap = nn.AdaptiveAvgPool2d(sz)
self.mp = nn.AdaptiveMaxPool2d(sz)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
CalebEverett/fastai-dl2
|
AdaptiveConcatPool2d
| false
| 17,161
|
[
"Apache-2.0"
] | 4
|
64d23592eddca6ca1f3647e73c319e97c8eb392b
|
https://github.com/CalebEverett/fastai-dl2/tree/64d23592eddca6ca1f3647e73c319e97c8eb392b
|
SpatialCrossMapLRN
|
import torch
import torch.nn as nn
import torch.utils.data
class SpatialCrossMapLRN(nn.Module):
def __init__(self, local_size=1, alpha=1.0, beta=0.75, k=1,
ACROSS_CHANNELS=True):
super(SpatialCrossMapLRN, self).__init__()
self.ACROSS_CHANNELS = ACROSS_CHANNELS
if ACROSS_CHANNELS:
self.average = nn.AvgPool3d(kernel_size=(local_size, 1, 1),
stride=1, padding=(int((local_size - 1.0) / 2), 0, 0))
else:
self.average = nn.AvgPool2d(kernel_size=local_size, stride=1,
padding=int((local_size - 1.0) / 2))
self.alpha = alpha
self.beta = beta
self.k = k
def forward(self, x):
if self.ACROSS_CHANNELS:
div = x.pow(2).unsqueeze(1)
div = self.average(div).squeeze(1)
div = div.mul(self.alpha).add(self.k).pow(self.beta)
else:
div = x.pow(2)
div = self.average(div)
div = div.mul(self.alpha).add(self.k).pow(self.beta)
x = x.div(div)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mul_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0 * tmp0
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 + tmp2
tmp6 = 0.75
tmp7 = libdevice.pow(tmp5, tmp6)
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mul_pow_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class SpatialCrossMapLRNNew(nn.Module):
def __init__(self, local_size=1, alpha=1.0, beta=0.75, k=1,
ACROSS_CHANNELS=True):
super(SpatialCrossMapLRNNew, self).__init__()
self.ACROSS_CHANNELS = ACROSS_CHANNELS
if ACROSS_CHANNELS:
self.average = nn.AvgPool3d(kernel_size=(local_size, 1, 1),
stride=1, padding=(int((local_size - 1.0) / 2), 0, 0))
else:
self.average = nn.AvgPool2d(kernel_size=local_size, stride=1,
padding=int((local_size - 1.0) / 2))
self.alpha = alpha
self.beta = beta
self.k = k
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
BigFishMaster/tnt
|
SpatialCrossMapLRN
| false
| 17,162
|
[
"BSD-3-Clause"
] | 3
|
8b80bb3b194eb87ac18924428ef0924c2fb263c5
|
https://github.com/BigFishMaster/tnt/tree/8b80bb3b194eb87ac18924428ef0924c2fb263c5
|
MLP
|
import torch
from torch import nn
from torch.nn import functional as F
class MLP(nn.Module):
def __init__(self, input_dim, output_dim, dropout=0.5):
super(MLP, self).__init__()
self.input_fc = nn.Linear(input_dim, 250)
self.hidden_fc = nn.Linear(250, 100)
self.output_fc = nn.Linear(100, output_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
batch_size = x.shape[0]
x = x.view(batch_size, -1)
x = F.relu(self.input_fc(x))
x = self.dropout(x)
x = F.relu(self.hidden_fc(x))
x = self.dropout(x)
outputs = self.output_fc(x)
return outputs, x
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'output_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 250
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 100
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (250, 4), (4, 1))
assert_size_stride(primals_3, (250,), (1,))
assert_size_stride(primals_4, (100, 250), (250, 1))
assert_size_stride(primals_5, (100,), (1,))
assert_size_stride(primals_6, (4, 100), (100, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 250), (250, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 250),
(1, 4), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(1000)](buf1, primals_3, 1000, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 100), (100, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (250, 100), (
1, 250), 0), out=buf2)
buf3 = buf2
del buf2
triton_poi_fused_relu_1[grid(400)](buf3, primals_5, 400, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6,
(100, 4), (1, 100), 0), alpha=1, beta=1, out=buf4)
del primals_7
return buf4, buf3, primals_1, buf1, buf3, primals_6, primals_4
class MLPNew(nn.Module):
def __init__(self, input_dim, output_dim, dropout=0.5):
super(MLPNew, self).__init__()
self.input_fc = nn.Linear(input_dim, 250)
self.hidden_fc = nn.Linear(250, 100)
self.output_fc = nn.Linear(100, output_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, input_0):
primals_2 = self.input_fc.weight
primals_3 = self.input_fc.bias
primals_4 = self.hidden_fc.weight
primals_5 = self.hidden_fc.bias
primals_6 = self.output_fc.weight
primals_7 = self.output_fc.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0], output[1]
|
CrispenGari/pneumonia-infection
|
MLP
| false
| 17,163
|
[
"MIT"
] | 4
|
8d1fc5f61aa8c4eb06d640e6da5abbbe23ccb85e
|
https://github.com/CrispenGari/pneumonia-infection/tree/8d1fc5f61aa8c4eb06d640e6da5abbbe23ccb85e
|
OutConv_Sigmoid
|
import torch
import torch.nn as nn
class OutConv_Sigmoid(nn.Module):
def __init__(self, in_channels, out_channels):
super(OutConv_Sigmoid, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
return self.sigmoid(self.conv(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_sigmoid_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x3, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_sigmoid_0[grid(256)](buf1, primals_2,
256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return buf1, primals_1, primals_3, buf1
class OutConv_SigmoidNew(nn.Module):
def __init__(self, in_channels, out_channels):
super(OutConv_SigmoidNew, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
self.sigmoid = nn.Sigmoid()
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Curli-quan/oneshot-medical-landmark
|
OutConv_Sigmoid
| false
| 17,164
|
[
"Apache-2.0"
] | 7
|
572926077fffbe9832aa16baa98bd046ec326700
|
https://github.com/Curli-quan/oneshot-medical-landmark/tree/572926077fffbe9832aa16baa98bd046ec326700
|
QNetwork
|
import torch
import torch.nn.functional as F
import torch.nn as nn
def weights_init_(m):
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight, gain=1)
torch.nn.init.constant_(m.bias, 0)
class QNetwork(nn.Module):
def __init__(self, state_dim, action_dim, hidden_dim):
super(QNetwork, self).__init__()
self.linear1_q1 = nn.Linear(state_dim + action_dim, hidden_dim)
self.linear2_q1 = nn.Linear(hidden_dim, hidden_dim)
self.linear3_q1 = nn.Linear(hidden_dim, hidden_dim)
self.linear4_q1 = nn.Linear(hidden_dim, 1)
self.linear1_q2 = nn.Linear(state_dim + action_dim, hidden_dim)
self.linear2_q2 = nn.Linear(hidden_dim, hidden_dim)
self.linear3_q2 = nn.Linear(hidden_dim, hidden_dim)
self.linear4_q2 = nn.Linear(hidden_dim, 1)
self.apply(weights_init_)
def forward(self, state, action):
x_state_action = torch.cat([state, action], 1)
x1 = F.relu(self.linear1_q1(x_state_action))
x1 = F.relu(self.linear2_q1(x1))
x1 = F.relu(self.linear3_q1(x1))
x1 = self.linear4_q1(x1)
x2 = F.relu(self.linear1_q2(x_state_action))
x2 = F.relu(self.linear2_q2(x2))
x2 = F.relu(self.linear3_q2(x2))
x2 = self.linear4_q2(x2)
return x1, x2
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4, 'action_dim': 4, 'hidden_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17, primals_18
) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (1, 4), (4, 1))
assert_size_stride(primals_10, (1,), (1,))
assert_size_stride(primals_11, (4, 8), (8, 1))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4,), (1,))
assert_size_stride(primals_15, (4, 4), (4, 1))
assert_size_stride(primals_16, (4,), (1,))
assert_size_stride(primals_17, (1, 4), (4, 1))
assert_size_stride(primals_18, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8
), 0), out=buf1)
del primals_3
buf2 = buf1
del buf1
triton_poi_fused_relu_1[grid(16)](buf2, primals_4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (4, 4), (1, 4
), 0), out=buf3)
buf4 = buf3
del buf3
triton_poi_fused_relu_1[grid(16)](buf4, primals_6, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_6
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf4, reinterpret_tensor(primals_7, (4, 4), (1, 4
), 0), out=buf5)
buf6 = buf5
del buf5
triton_poi_fused_relu_1[grid(16)](buf6, primals_8, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_8
buf8 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_10, buf6, reinterpret_tensor(primals_9,
(4, 1), (1, 4), 0), alpha=1, beta=1, out=buf8)
del primals_10
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_11, (8, 4), (1,
8), 0), out=buf9)
del primals_11
buf10 = buf9
del buf9
triton_poi_fused_relu_1[grid(16)](buf10, primals_12, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_12
buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf10, reinterpret_tensor(primals_13, (4, 4), (1,
4), 0), out=buf11)
buf12 = buf11
del buf11
triton_poi_fused_relu_1[grid(16)](buf12, primals_14, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_14
buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf12, reinterpret_tensor(primals_15, (4, 4), (1,
4), 0), out=buf13)
buf14 = buf13
del buf13
triton_poi_fused_relu_1[grid(16)](buf14, primals_16, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_16
buf16 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_18, buf14, reinterpret_tensor(
primals_17, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf16)
del primals_18
return (buf8, buf16, buf0, buf2, buf4, buf6, buf10, buf12, buf14,
primals_17, primals_15, primals_13, primals_9, primals_7, primals_5)
def weights_init_(m):
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight, gain=1)
torch.nn.init.constant_(m.bias, 0)
class QNetworkNew(nn.Module):
def __init__(self, state_dim, action_dim, hidden_dim):
super(QNetworkNew, self).__init__()
self.linear1_q1 = nn.Linear(state_dim + action_dim, hidden_dim)
self.linear2_q1 = nn.Linear(hidden_dim, hidden_dim)
self.linear3_q1 = nn.Linear(hidden_dim, hidden_dim)
self.linear4_q1 = nn.Linear(hidden_dim, 1)
self.linear1_q2 = nn.Linear(state_dim + action_dim, hidden_dim)
self.linear2_q2 = nn.Linear(hidden_dim, hidden_dim)
self.linear3_q2 = nn.Linear(hidden_dim, hidden_dim)
self.linear4_q2 = nn.Linear(hidden_dim, 1)
self.apply(weights_init_)
def forward(self, input_0, input_1):
primals_3 = self.linear1_q1.weight
primals_4 = self.linear1_q1.bias
primals_1 = self.linear2_q1.weight
primals_6 = self.linear2_q1.bias
primals_2 = self.linear3_q1.weight
primals_8 = self.linear3_q1.bias
primals_9 = self.linear4_q1.weight
primals_10 = self.linear4_q1.bias
primals_11 = self.linear1_q2.weight
primals_12 = self.linear1_q2.bias
primals_5 = self.linear2_q2.weight
primals_14 = self.linear2_q2.bias
primals_7 = self.linear3_q2.weight
primals_16 = self.linear3_q2.bias
primals_17 = self.linear4_q2.weight
primals_18 = self.linear4_q2.bias
primals_13 = input_0
primals_15 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18])
return output[0], output[1]
|
Crawford-fang/ROS_pytorch_RL
|
QNetwork
| false
| 17,165
|
[
"Apache-2.0"
] | 10
|
2d3476f15d51aa1f5b5ae9edc5d7f4c776e5de9f
|
https://github.com/Crawford-fang/ROS_pytorch_RL/tree/2d3476f15d51aa1f5b5ae9edc5d7f4c776e5de9f
|
LayerNormalization
|
import torch
import torch.nn as nn
class LayerNormalization(nn.Module):
def __init__(self, normal_shape, gamma=True, beta=True, epsilon=1e-10):
"""Layer normalization layer
See: [Layer Normalization](https://arxiv.org/pdf/1607.06450.pdf)
:param normal_shape: The shape of the input tensor or the last dimension of the input tensor.
:param gamma: Add a scale parameter if it is True.
:param beta: Add an offset parameter if it is True.
:param epsilon: Epsilon for calculating variance.
"""
super(LayerNormalization, self).__init__()
if isinstance(normal_shape, int):
normal_shape = normal_shape,
else:
normal_shape = normal_shape[-1],
self.normal_shape = torch.Size(normal_shape)
self.epsilon = epsilon
if gamma:
self.gamma = nn.Parameter(torch.Tensor(*normal_shape))
else:
self.register_parameter('gamma', None)
if beta:
self.beta = nn.Parameter(torch.Tensor(*normal_shape))
else:
self.register_parameter('beta', None)
self.reset_parameters()
def reset_parameters(self):
if self.gamma is not None:
self.gamma.data.fill_(1)
if self.beta is not None:
self.beta.data.zero_()
def forward(self, x):
mean = x.mean(dim=-1, keepdim=True)
var = ((x - mean) ** 2).mean(dim=-1, keepdim=True)
std = (var + self.epsilon).sqrt()
y = (x - mean) / std
if self.gamma is not None:
y *= self.gamma
if self.beta is not None:
y += self.beta
return y
def extra_repr(self):
return 'normal_shape={}, gamma={}, beta={}, epsilon={}'.format(self
.normal_shape, self.gamma is not None, self.beta is not None,
self.epsilon)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'normal_shape': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mean_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_sqrt_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = 1e-10
tmp15 = tmp13 + tmp14
tmp16 = libdevice.sqrt(tmp15)
tmp17 = tmp0 / tmp16
tmp19 = tmp17 * tmp18
tmp21 = tmp19 + tmp20
tl.store(out_ptr0 + x2, tmp21, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_sub_0[grid(256)](primals_1, buf0, 256, XBLOCK
=256, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_pow_sqrt_1[grid(256)](buf0,
primals_2, primals_3, buf1, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del buf0
del primals_2
del primals_3
return buf1, primals_1
class LayerNormalizationNew(nn.Module):
def __init__(self, normal_shape, gamma=True, beta=True, epsilon=1e-10):
"""Layer normalization layer
See: [Layer Normalization](https://arxiv.org/pdf/1607.06450.pdf)
:param normal_shape: The shape of the input tensor or the last dimension of the input tensor.
:param gamma: Add a scale parameter if it is True.
:param beta: Add an offset parameter if it is True.
:param epsilon: Epsilon for calculating variance.
"""
super(LayerNormalizationNew, self).__init__()
if isinstance(normal_shape, int):
normal_shape = normal_shape,
else:
normal_shape = normal_shape[-1],
self.normal_shape = torch.Size(normal_shape)
self.epsilon = epsilon
if gamma:
self.gamma = nn.Parameter(torch.Tensor(*normal_shape))
else:
self.register_parameter('gamma', None)
if beta:
self.beta = nn.Parameter(torch.Tensor(*normal_shape))
else:
self.register_parameter('beta', None)
self.reset_parameters()
def reset_parameters(self):
if self.gamma is not None:
self.gamma.data.fill_(1)
if self.beta is not None:
self.beta.data.zero_()
def extra_repr(self):
return 'normal_shape={}, gamma={}, beta={}, epsilon={}'.format(self
.normal_shape, self.gamma is not None, self.beta is not None,
self.epsilon)
def forward(self, input_0):
primals_2 = self.gamma
primals_3 = self.beta
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
CyberZHG/torch-layer-normalization
|
LayerNormalization
| false
| 17,166
|
[
"MIT"
] | 9
|
89f405b60f53f85da6f03fe685c190ef394ce50c
|
https://github.com/CyberZHG/torch-layer-normalization/tree/89f405b60f53f85da6f03fe685c190ef394ce50c
|
DQN_hot2
|
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.utils.data
class DQN_hot2(nn.Module):
"""
A MLP for DQN learning.
Note: Uses a one hot board representation
"""
def __init__(self, m, n, num_actions):
super(DQN_hot2, self).__init__()
self.fc1 = nn.Linear(m * n, 100)
self.fc2 = nn.Linear(100, num_actions)
def forward(self, x):
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
return self.fc2(x)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'m': 4, 'n': 4, 'num_actions': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 100
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (100, 16), (16, 1))
assert_size_stride(primals_3, (100,), (1,))
assert_size_stride(primals_4, (4, 100), (100, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 100), (100, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 16), (16, 1), 0
), reinterpret_tensor(primals_2, (16, 100), (1, 16), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(400)](buf1, primals_3, 400, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4,
(100, 4), (1, 100), 0), alpha=1, beta=1, out=buf2)
del primals_5
return buf2, reinterpret_tensor(primals_1, (4, 16), (16, 1), 0
), buf1, primals_4
class DQN_hot2New(nn.Module):
"""
A MLP for DQN learning.
Note: Uses a one hot board representation
"""
def __init__(self, m, n, num_actions):
super(DQN_hot2New, self).__init__()
self.fc1 = nn.Linear(m * n, 100)
self.fc2 = nn.Linear(100, num_actions)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
CoAxLab/azad
|
DQN_hot2
| false
| 17,167
|
[
"MIT"
] | 6
|
d1498069dd8856e93ae077b34dd7c9f1c7ce80e6
|
https://github.com/CoAxLab/azad/tree/d1498069dd8856e93ae077b34dd7c9f1c7ce80e6
|
PolicyNetwork
|
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.distributions import Normal
def mish(x):
"""
Mish: A Self Regularized Non-Monotonic Neural Activation Function
https://arxiv.org/abs/1908.08681v1
implemented for PyTorch / FastAI by lessw2020
https://github.com/lessw2020/mish
param:
x: output of a layer of a neural network
return: mish activation function
"""
return torch.clamp(x * torch.tanh(F.softplus(x)), max=6)
class PolicyNetwork(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_size, init_w=0.003,
log_std_min=-10, log_std_max=2):
super(PolicyNetwork, self).__init__()
self.log_std_min = log_std_min
self.log_std_max = log_std_max
self.linear1 = nn.Linear(num_inputs, hidden_size)
self.linear1.weight.data.uniform_(-init_w, init_w)
self.linear1.bias.data.uniform_(-init_w, init_w)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.linear2.weight.data.uniform_(-init_w, init_w)
self.linear2.bias.data.uniform_(-init_w, init_w)
self.mean_linear = nn.Linear(hidden_size, num_actions)
self.mean_linear.weight.data.uniform_(-init_w, init_w)
self.mean_linear.bias.data.uniform_(-init_w, init_w)
self.log_std_linear = nn.Linear(hidden_size, num_actions)
self.log_std_linear.weight.data.uniform_(-init_w, init_w)
self.log_std_linear.bias.data.uniform_(-init_w, init_w)
def forward(self, state):
x = mish(self.linear1(state))
x = mish(self.linear2(x))
mean = self.mean_linear(x)
log_std = self.log_std_linear(x)
log_std = torch.clamp(log_std, self.log_std_min, self.log_std_max)
return mean, log_std
def evaluate(self, state, epsilon=1e-06):
mean, log_std = self.forward(state)
std = log_std.exp()
normal = Normal(mean, std)
z = normal.sample()
action = torch.tanh(z)
log_prob = normal.log_prob(z) - torch.log(1 - action.pow(2) + epsilon)
log_prob = log_prob.sum(-1, keepdim=True)
return action, log_prob, z, mean, log_std
def get_action(self, state, exploitation=False):
state = torch.FloatTensor(state).unsqueeze(0)
mean, log_std = self.forward(state)
std = log_std.exp()
normal = Normal(mean, std)
z = normal.sample()
action = torch.tanh(z)
if exploitation:
action = torch.tanh(mean)
action = action.detach().numpy()
return action[0]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_inputs': 4, 'num_actions': 4, 'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn.functional as F
import torch.nn as nn
from torch.distributions import Normal
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clamp_mul_softplus_tanh_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 20.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = libdevice.log1p(tmp3)
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp6 = libdevice.tanh(tmp5)
tmp7 = tmp0 * tmp6
tmp8 = 6.0
tmp9 = triton_helpers.minimum(tmp7, tmp8)
tl.store(out_ptr0 + x0, tmp9, xmask)
@triton.jit
def triton_poi_fused_clamp_ge_le_logical_and_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = -10.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 2.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = tmp2 >= tmp3
tmp8 = tmp2 <= tmp5
tmp9 = tmp7 & tmp8
tl.store(out_ptr0 + x2, tmp6, xmask)
tl.store(out_ptr1 + x2, tmp9, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_mul_softplus_tanh_0[grid(256)](buf0, buf1,
256, XBLOCK=128, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clamp_mul_softplus_tanh_0[grid(256)](buf2, buf3,
256, XBLOCK=128, num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_clamp_ge_le_logical_and_1[grid(256)](buf5,
primals_9, buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf5
del primals_9
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0
), buf2, reinterpret_tensor(buf3, (64, 4), (4, 1), 0
), buf7, primals_8, primals_6, primals_4
def mish(x):
"""
Mish: A Self Regularized Non-Monotonic Neural Activation Function
https://arxiv.org/abs/1908.08681v1
implemented for PyTorch / FastAI by lessw2020
https://github.com/lessw2020/mish
param:
x: output of a layer of a neural network
return: mish activation function
"""
return torch.clamp(x * torch.tanh(F.softplus(x)), max=6)
class PolicyNetworkNew(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_size, init_w=0.003,
log_std_min=-10, log_std_max=2):
super(PolicyNetworkNew, self).__init__()
self.log_std_min = log_std_min
self.log_std_max = log_std_max
self.linear1 = nn.Linear(num_inputs, hidden_size)
self.linear1.weight.data.uniform_(-init_w, init_w)
self.linear1.bias.data.uniform_(-init_w, init_w)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.linear2.weight.data.uniform_(-init_w, init_w)
self.linear2.bias.data.uniform_(-init_w, init_w)
self.mean_linear = nn.Linear(hidden_size, num_actions)
self.mean_linear.weight.data.uniform_(-init_w, init_w)
self.mean_linear.bias.data.uniform_(-init_w, init_w)
self.log_std_linear = nn.Linear(hidden_size, num_actions)
self.log_std_linear.weight.data.uniform_(-init_w, init_w)
self.log_std_linear.bias.data.uniform_(-init_w, init_w)
def evaluate(self, state, epsilon=1e-06):
mean, log_std = self.forward(state)
std = log_std.exp()
normal = Normal(mean, std)
z = normal.sample()
action = torch.tanh(z)
log_prob = normal.log_prob(z) - torch.log(1 - action.pow(2) + epsilon)
log_prob = log_prob.sum(-1, keepdim=True)
return action, log_prob, z, mean, log_std
def get_action(self, state, exploitation=False):
state = torch.FloatTensor(state).unsqueeze(0)
mean, log_std = self.forward(state)
std = log_std.exp()
normal = Normal(mean, std)
z = normal.sample()
action = torch.tanh(z)
if exploitation:
action = torch.tanh(mean)
action = action.detach().numpy()
return action[0]
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_6 = self.mean_linear.weight
primals_7 = self.mean_linear.bias
primals_8 = self.log_std_linear.weight
primals_9 = self.log_std_linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0], output[1]
|
Crawford-fang/ROS_pytorch_RL
|
PolicyNetwork
| false
| 17,168
|
[
"Apache-2.0"
] | 10
|
2d3476f15d51aa1f5b5ae9edc5d7f4c776e5de9f
|
https://github.com/Crawford-fang/ROS_pytorch_RL/tree/2d3476f15d51aa1f5b5ae9edc5d7f4c776e5de9f
|
UnaryPrimitivesPredefined_v2
|
import math
import torch
from torch import nn
def apply_last_dim(model, x):
size = list(x.size())
y = model(x.contiguous().view(-1, size[-1]))
size[-1] = y.size(-1)
y = y.view(torch.Size(size))
return y
def get_int_dim_index(name):
if isinstance(name, int):
return name
name_list = 'axyz'
assert name in name_list
return [i for i in range(len(name_list)) if name_list[i] == name][0] - 1
class Length(nn.Module):
def __init__(self, dim_index=-1):
super().__init__()
self.dim_index = dim_index
def forward(self, states, dim_index=None):
if dim_index is None:
dim_index = self.dim_index
if isinstance(dim_index, int):
dim_index = [dim_index]
else:
dim_index = [get_int_dim_index(x) for x in dim_index]
if -1 in dim_index:
def extractor(x):
return torch.sqrt(torch.sum(x * x, dim=1, keepdim=True))
else:
def extractor(x):
return torch.sqrt(torch.sum(x[:, dim_index].pow(2), dim=1,
keepdim=True))
return apply_last_dim(extractor, states)
def show(self, name='Length', indent=0, log=print, **kwargs):
log(' ' * indent + "- %s(x) = |x's dim %s|" % (name, str(self.
dim_index)))
class Distance(nn.Module):
def __init__(self, dim_index=-1):
super().__init__()
self.dim_index = dim_index
self.length = Length(dim_index)
def forward(self, states1, states2, dim_index=None):
return self.length(states1 - states2, dim_index)
def show(self, name='Distance', indent=0, log=print, **kwargs):
log(' ' * indent + '- %s(x1, x2) = |x1 - x2|' % name)
class Normalize(nn.Module):
def __init__(self, distribution=None, **kwargs):
super().__init__()
self.distribution = distribution
self.data_ = []
if distribution is None:
pass
elif distribution == 'normal':
mean = kwargs['mean'] if 'mean' in kwargs else 0
std = kwargs['std'] if 'std' in kwargs else 1
self.param = nn.Parameter(torch.Tensor([mean, std]), False)
elif distribution == 'uniform':
vmin = kwargs['minv'] if 'minv' in kwargs else 0
vmax = kwargs['maxv'] if 'maxv' in kwargs else 1
self.param = nn.Parameter(torch.Tensor([vmin, vmax]), False)
else:
raise NotImplementedError()
def forward(self, x, keep_data=False):
if keep_data:
self.data_.append(x.detach().cpu().view(-1))
return x
if self.distribution is None:
return x
elif self.distribution == 'normal':
mean = self.param[0]
std = self.param[1]
return (x - mean) / std
elif self.distribution == 'uniform':
vmin = self.param[0]
vmax = self.param[1]
return (x - vmin) / (vmax - vmin + 1e-05)
else:
raise NotImplementedError()
def reset_parameters(self, name=None):
assert len(self.data_) > 0
data = torch.cat(self.data_)
self.data_ = []
if self.distribution is None:
pass
elif self.distribution == 'normal':
with torch.no_grad():
self.param[0] = data.mean().item()
self.param[1] = data.std().item()
if name is not None:
None
elif self.distribution == 'uniform':
with torch.no_grad():
self.param[0] = data.min().item()
self.param[1] = data.max().item()
if name is not None:
None
else:
raise NotImplementedError()
def recover_threshold(self, x):
if self.distribution is None:
return x
elif self.distribution == 'normal':
return x * float(self.param[1]) + float(self.param[0])
elif self.distribution == 'uniform':
return x * float(self.param[1] - self.param[0] + 1e-05) + float(
self.param[0])
else:
raise NotImplementedError()
def init_thresholds(self, x):
if self.distribution is None:
nn.init.normal_(x, 0, 1)
elif self.distribution == 'normal':
nn.init.normal_(x, 0, 1)
elif self.distribution == 'uniform':
nn.init.uniform_(x, 0, 1)
else:
raise NotImplementedError()
class SoftCmp(nn.Module):
"""
Sigmoid((x - y) / e^beta)
"""
def __init__(self):
super().__init__()
self.sigmoid = nn.Sigmoid()
def forward(self, x, y, beta):
return self.sigmoid((x - y) / math.exp(beta))
class Inequality(nn.Module):
def __init__(self, out_dim=1, distribution=None, **kwargs):
super().__init__()
self.out_dim = out_dim
self.thresholds = nn.Parameter(torch.zeros(out_dim), requires_grad=True
)
self.distribution = distribution
self.normalize = Normalize(distribution)
self.cmp = SoftCmp()
self.normalize.init_thresholds(self.thresholds)
def forward(self, states, beta=0, **kwargs):
"""
:param states: [batch, length, n_agents, ... ]
"""
states_expand = states.view(*(states.size() + (1,)))
estimate_parameters = 'estimate_parameters' in kwargs and kwargs[
'estimate_parameters']
states_expand = self.normalize(states_expand, keep_data=
estimate_parameters)
return self.cmp(states_expand, self.thresholds.view(*([1] * len(
states.size()) + [self.out_dim])), beta)
def reset_parameters(self, parameter_name, name=None):
if parameter_name == 'primitive_inequality':
self.normalize.reset_parameters(name=name)
self.normalize.init_thresholds(self.thresholds)
def get_descriptions(self, name='Inequality'):
theta = self.thresholds.detach().cpu().view(self.out_dim)
descroptions = []
for k in range(theta.size(0)):
t = self.normalize.recover_threshold(theta[k])
if 'speed' in name:
t = t * 8
if 'acc' in name:
t = t * 64
descroptions.append('%s > %.2lf' % (name, t))
return descroptions
class N_aryPrimitivesPredefined(nn.Module):
def __init__(self):
super().__init__()
self.out_dim = 0
self.primitive_list = []
self.ineqs = nn.ModuleDict({})
def reset_parameters(self, parameter_name):
for k in self.primitive_list:
self.ineqs[k].reset_parameters(parameter_name, name=k)
def get_descriptions(self):
descriptions = []
for k in self.primitive_list:
descriptions += self.ineqs[k].get_descriptions(name=k)
return descriptions
class AlignDifferential(nn.Module):
def __init__(self):
super().__init__()
def new_length(self, length):
return length
def forward(self, states):
"""
:param states: [batch, length, *]
"""
padded_states = torch.cat([states[:, 0:1] * 2 - states[:, 1:2],
states, states[:, -1:] * 2 - states[:, -2:-1]], dim=1)
return (padded_states[:, 2:] - padded_states[:, :-2]) / 2
def show(self, name='AlignDifferential', indent=0, log=print, **kwargs):
log(' ' * indent + '- %s(x) = AlignDifferential()' % (name,))
class UnaryPrimitivesPredefined_v2(N_aryPrimitivesPredefined):
def __init__(self, cmp_dim=10):
super().__init__()
self.differential = AlignDifferential()
self.primitive_list = ['acc', 'pos_z', 'speed', 'dist_to_ball']
self.distance = Distance()
self.ineqs.update({'acc': Inequality(out_dim=cmp_dim, distribution=
'normal'), 'pos_z': Inequality(out_dim=cmp_dim, distribution=
'uniform'), 'speed': Inequality(out_dim=cmp_dim, distribution=
'normal'), 'dist_to_ball': Inequality(out_dim=cmp_dim,
distribution='normal')})
self.out_dim = sum([self.ineqs[k].out_dim for k in self.primitive_list]
)
def forward(self, states, beta=0, **kwargs):
"""
:param states: [batch, length, n_agents, state_dim]
return [batch, length, n_agents, out_dim]
"""
velocity = self.differential(states)
acc = self.differential(velocity)
n_agents = states.size(2)
p1 = states.unsqueeze(2).repeat(1, 1, n_agents, 1, 1)
p2 = states.unsqueeze(3).repeat(1, 1, 1, n_agents, 1)
dist = self.distance(p1, p2).squeeze(4)
ineqs_inputs = {'pos_z': states[:, :, 1:, 2], 'speed': torch.norm(
velocity[:, :, 1:, :], p=2, dim=3), 'acc': torch.norm(acc[:, :,
1:, :], p=2, dim=3), 'dist_to_ball': dist[:, :, 0, 1:]}
output = torch.cat([self.ineqs[k](ineqs_inputs[k], beta, **kwargs) for
k in self.primitive_list], dim=-1)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 6
x0 = xindex % 16
x2 = xindex // 96
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = 2.0
tmp7 = tmp5 * tmp6
tmp8 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp9 = tmp7 - tmp8
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tmp13 = tl.full([1], 5, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = tmp12 & tmp14
tmp16 = tl.load(in_ptr0 + (x0 + 16 * (-1 + x1) + 64 * x2), tmp15 &
xmask, other=0.0)
tmp17 = tmp0 >= tmp13
tl.full([1], 6, tl.int64)
tmp20 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp17 & xmask,
eviction_policy='evict_last', other=0.0)
tmp21 = tmp20 * tmp6
tmp22 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp17 & xmask,
eviction_policy='evict_last', other=0.0)
tmp23 = tmp21 - tmp22
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp17, tmp23, tmp24)
tmp26 = tl.where(tmp15, tmp16, tmp25)
tmp27 = tl.where(tmp4, tmp11, tmp26)
tl.store(out_ptr0 + x3, tmp27, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 6
x0 = xindex % 16
x2 = xindex // 96
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (32 + x0 + 16 * x1 + 96 * x2), tmp4 & xmask,
other=0.0)
tmp6 = tl.load(in_ptr0 + (x0 + 16 * x1 + 96 * x2), tmp4 & xmask, other=0.0)
tmp7 = tmp5 - tmp6
tmp8 = 0.5
tmp9 = tmp7 * tmp8
tmp10 = 2.0
tmp11 = tmp9 * tmp10
tmp12 = tl.load(in_ptr0 + (48 + x0 + 16 * x1 + 96 * x2), tmp4 & xmask,
other=0.0)
tmp13 = tl.load(in_ptr0 + (16 + x0 + 16 * x1 + 96 * x2), tmp4 & xmask,
other=0.0)
tmp14 = tmp12 - tmp13
tmp15 = tmp14 * tmp8
tmp16 = tmp11 - tmp15
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp4, tmp16, tmp17)
tmp19 = tmp0 >= tmp3
tmp20 = tl.full([1], 5, tl.int64)
tmp21 = tmp0 < tmp20
tmp22 = tmp19 & tmp21
tmp23 = tl.load(in_ptr0 + (32 + x0 + 16 * (-1 + x1) + 96 * x2), tmp22 &
xmask, other=0.0)
tmp24 = tl.load(in_ptr0 + (x0 + 16 * (-1 + x1) + 96 * x2), tmp22 &
xmask, other=0.0)
tmp25 = tmp23 - tmp24
tmp26 = tmp25 * tmp8
tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype)
tmp28 = tl.where(tmp22, tmp26, tmp27)
tmp29 = tmp0 >= tmp20
tl.full([1], 6, tl.int64)
tmp32 = tl.load(in_ptr0 + (80 + x0 + 16 * (-5 + x1) + 96 * x2), tmp29 &
xmask, other=0.0)
tmp33 = tl.load(in_ptr0 + (48 + x0 + 16 * (-5 + x1) + 96 * x2), tmp29 &
xmask, other=0.0)
tmp34 = tmp32 - tmp33
tmp35 = tmp34 * tmp8
tmp36 = tmp35 * tmp10
tmp37 = tl.load(in_ptr0 + (64 + x0 + 16 * (-5 + x1) + 96 * x2), tmp29 &
xmask, other=0.0)
tmp38 = tl.load(in_ptr0 + (32 + x0 + 16 * (-5 + x1) + 96 * x2), tmp29 &
xmask, other=0.0)
tmp39 = tmp37 - tmp38
tmp40 = tmp39 * tmp8
tmp41 = tmp36 - tmp40
tmp42 = tl.full(tmp41.shape, 0.0, tmp41.dtype)
tmp43 = tl.where(tmp29, tmp41, tmp42)
tmp44 = tl.where(tmp22, tmp28, tmp43)
tmp45 = tl.where(tmp4, tmp18, tmp44)
tl.store(out_ptr0 + x3, tmp45, xmask)
@triton.jit
def triton_poi_fused_div_sub_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x1 = xindex // 3 % 4
x2 = xindex // 12
x3 = xindex
tmp0 = tl.load(in_ptr0 + (36 + 4 * x0 + 16 * x1 + 96 * x2), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (4 + 4 * x0 + 16 * x1 + 96 * x2), xmask,
eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (37 + 4 * x0 + 16 * x1 + 96 * x2), xmask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (5 + 4 * x0 + 16 * x1 + 96 * x2), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (38 + 4 * x0 + 16 * x1 + 96 * x2), xmask,
eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (6 + 4 * x0 + 16 * x1 + 96 * x2), xmask,
eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (39 + 4 * x0 + 16 * x1 + 96 * x2), xmask,
eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (7 + 4 * x0 + 16 * x1 + 96 * x2), xmask,
eviction_policy='evict_last')
tmp25 = tl.load(in_ptr1 + 0)
tmp26 = tl.broadcast_to(tmp25, [XBLOCK])
tmp28 = tl.load(in_ptr1 + 1)
tmp29 = tl.broadcast_to(tmp28, [XBLOCK])
tmp2 = tmp0 - tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = tmp4 * tmp4
tmp8 = tmp6 - tmp7
tmp9 = tmp8 * tmp3
tmp10 = tmp9 * tmp9
tmp11 = tmp5 + tmp10
tmp14 = tmp12 - tmp13
tmp15 = tmp14 * tmp3
tmp16 = tmp15 * tmp15
tmp17 = tmp11 + tmp16
tmp20 = tmp18 - tmp19
tmp21 = tmp20 * tmp3
tmp22 = tmp21 * tmp21
tmp23 = tmp17 + tmp22
tmp24 = libdevice.sqrt(tmp23)
tmp27 = tmp24 - tmp26
tmp30 = tmp27 / tmp29
tl.store(in_out_ptr0 + x3, tmp30, xmask)
@triton.jit
def triton_poi_fused_add_div_sub_3(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x1 = xindex // 3
x2 = xindex
tmp0 = tl.load(in_ptr0 + (6 + 4 * x0 + 16 * x1), xmask, eviction_policy
='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr1 + 1)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp3 = tmp0 - tmp2
tmp6 = tmp5 - tmp2
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = tmp3 / tmp8
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_div_sub_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x1 = xindex // 3 % 4
x2 = xindex // 12
x3 = xindex
tmp0 = tl.load(in_ptr0 + (4 + 4 * x0 + 16 * x1 + 16 * ((1 + x0) // 16) +
64 * x2 + 64 * ((1 + x0 + 16 * x1) // 64)), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (4 * ((1 + x0) // 4) + 16 * x1 + 16 * ((1 + x0
) // 16) + 64 * x2 + 64 * ((1 + x0 + 16 * x1) // 64)), xmask,
eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (5 + 4 * x0 + 16 * x1 + 16 * ((1 + x0) // 16) +
64 * x2 + 64 * ((1 + x0 + 16 * x1) // 64)), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * ((1 + x0) // 4) + 16 * x1 + 16 * ((1 +
x0) // 16) + 64 * x2 + 64 * ((1 + x0 + 16 * x1) // 64)), xmask,
eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (6 + 4 * x0 + 16 * x1 + 16 * ((1 + x0) // 16) +
64 * x2 + 64 * ((1 + x0 + 16 * x1) // 64)), xmask, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr0 + (2 + 4 * ((1 + x0) // 4) + 16 * x1 + 16 * ((1 +
x0) // 16) + 64 * x2 + 64 * ((1 + x0 + 16 * x1) // 64)), xmask,
eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (7 + 4 * x0 + 16 * x1 + 16 * ((1 + x0) // 16) +
64 * x2 + 64 * ((1 + x0 + 16 * x1) // 64)), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (3 + 4 * ((1 + x0) // 4) + 16 * x1 + 16 * ((1 +
x0) // 16) + 64 * x2 + 64 * ((1 + x0 + 16 * x1) // 64)), xmask,
eviction_policy='evict_last')
tmp20 = tl.load(in_ptr1 + 0)
tmp21 = tl.broadcast_to(tmp20, [XBLOCK])
tmp23 = tl.load(in_ptr1 + 1)
tmp24 = tl.broadcast_to(tmp23, [XBLOCK])
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = libdevice.sqrt(tmp18)
tmp22 = tmp19 - tmp21
tmp25 = tmp22 / tmp24
tl.store(in_out_ptr0 + x3, tmp25, xmask)
@triton.jit
def triton_poi_fused_cat_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1920
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 40
x1 = xindex // 40
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 10, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp5 - tmp6
tmp8 = 1.0
tmp9 = tmp7 * tmp8
tmp10 = tl.sigmoid(tmp9)
tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype)
tmp12 = tl.where(tmp4, tmp10, tmp11)
tmp13 = tmp0 >= tmp3
tmp14 = tl.full([1], 20, tl.int64)
tmp15 = tmp0 < tmp14
tmp16 = tmp13 & tmp15
tmp17 = tl.load(in_ptr2 + x1, tmp16 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp18 = tl.load(in_ptr3 + (-10 + x0), tmp16 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp19 = tmp17 - tmp18
tmp20 = tmp19 * tmp8
tmp21 = tl.sigmoid(tmp20)
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp16, tmp21, tmp22)
tmp24 = tmp0 >= tmp14
tmp25 = tl.full([1], 30, tl.int64)
tmp26 = tmp0 < tmp25
tmp27 = tmp24 & tmp26
tmp28 = tl.load(in_ptr4 + x1, tmp27 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp29 = tl.load(in_ptr5 + (-20 + x0), tmp27 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp30 = tmp28 - tmp29
tmp31 = tmp30 * tmp8
tmp32 = tl.sigmoid(tmp31)
tmp33 = tl.full(tmp32.shape, 0.0, tmp32.dtype)
tmp34 = tl.where(tmp27, tmp32, tmp33)
tmp35 = tmp0 >= tmp25
tl.full([1], 40, tl.int64)
tmp38 = tl.load(in_ptr6 + x1, tmp35 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp39 = tl.load(in_ptr7 + (-30 + x0), tmp35 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp40 = tmp38 - tmp39
tmp41 = tmp40 * tmp8
tmp42 = tl.sigmoid(tmp41)
tmp43 = tl.full(tmp42.shape, 0.0, tmp42.dtype)
tmp44 = tl.where(tmp35, tmp42, tmp43)
tmp45 = tl.where(tmp27, tmp34, tmp44)
tmp46 = tl.where(tmp16, tmp23, tmp45)
tmp47 = tl.where(tmp4, tmp12, tmp46)
tl.store(out_ptr0 + x2, tmp47, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2,), (1,))
assert_size_stride(primals_3, (10,), (1,))
assert_size_stride(primals_4, (2,), (1,))
assert_size_stride(primals_5, (10,), (1,))
assert_size_stride(primals_6, (2,), (1,))
assert_size_stride(primals_7, (10,), (1,))
assert_size_stride(primals_8, (2,), (1,))
assert_size_stride(primals_9, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 6, 4, 4), (96, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(384)](primals_1, buf0, 384, XBLOCK=128,
num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((4, 6, 4, 4), (96, 16, 4, 1), torch.float32)
triton_poi_fused_cat_1[grid(384)](buf0, buf1, 384, XBLOCK=128,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 3, 1), (12, 3, 1, 48), torch.float32)
buf3 = reinterpret_tensor(buf2, (4, 4, 3, 1), (12, 3, 1, 1), 0)
del buf2
triton_poi_fused_div_sub_2[grid(48)](buf3, buf1, primals_2, 48,
XBLOCK=64, num_warps=1, num_stages=1)
del buf1
del primals_2
buf4 = empty_strided_cuda((4, 4, 3, 1), (12, 3, 1, 1), torch.float32)
triton_poi_fused_add_div_sub_3[grid(48)](primals_1, primals_4, buf4,
48, XBLOCK=64, num_warps=1, num_stages=1)
del primals_4
buf5 = empty_strided_cuda((4, 4, 3, 1), (12, 3, 1, 48), torch.float32)
buf6 = reinterpret_tensor(buf5, (4, 4, 3, 1), (12, 3, 1, 1), 0)
del buf5
triton_poi_fused_div_sub_2[grid(48)](buf6, buf0, primals_6, 48,
XBLOCK=64, num_warps=1, num_stages=1)
del buf0
del primals_6
buf7 = empty_strided_cuda((4, 4, 3, 1), (12, 3, 1, 48), torch.float32)
buf8 = reinterpret_tensor(buf7, (4, 4, 3, 1), (12, 3, 1, 1), 0)
del buf7
triton_poi_fused_div_sub_4[grid(48)](buf8, primals_1, primals_8, 48,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_1
del primals_8
buf9 = empty_strided_cuda((4, 4, 3, 40), (480, 120, 40, 1), torch.
float32)
triton_poi_fused_cat_5[grid(1920)](buf3, primals_3, buf4, primals_5,
buf6, primals_7, buf8, primals_9, buf9, 1920, XBLOCK=128,
num_warps=4, num_stages=1)
return (buf9, primals_3, primals_5, primals_7, primals_9, buf3, buf4,
buf6, buf8)
def apply_last_dim(model, x):
size = list(x.size())
y = model(x.contiguous().view(-1, size[-1]))
size[-1] = y.size(-1)
y = y.view(torch.Size(size))
return y
def get_int_dim_index(name):
if isinstance(name, int):
return name
name_list = 'axyz'
assert name in name_list
return [i for i in range(len(name_list)) if name_list[i] == name][0] - 1
class Length(nn.Module):
def __init__(self, dim_index=-1):
super().__init__()
self.dim_index = dim_index
def forward(self, states, dim_index=None):
if dim_index is None:
dim_index = self.dim_index
if isinstance(dim_index, int):
dim_index = [dim_index]
else:
dim_index = [get_int_dim_index(x) for x in dim_index]
if -1 in dim_index:
def extractor(x):
return torch.sqrt(torch.sum(x * x, dim=1, keepdim=True))
else:
def extractor(x):
return torch.sqrt(torch.sum(x[:, dim_index].pow(2), dim=1,
keepdim=True))
return apply_last_dim(extractor, states)
def show(self, name='Length', indent=0, log=print, **kwargs):
log(' ' * indent + "- %s(x) = |x's dim %s|" % (name, str(self.
dim_index)))
class Distance(nn.Module):
def __init__(self, dim_index=-1):
super().__init__()
self.dim_index = dim_index
self.length = Length(dim_index)
def forward(self, states1, states2, dim_index=None):
return self.length(states1 - states2, dim_index)
def show(self, name='Distance', indent=0, log=print, **kwargs):
log(' ' * indent + '- %s(x1, x2) = |x1 - x2|' % name)
class Normalize(nn.Module):
def __init__(self, distribution=None, **kwargs):
super().__init__()
self.distribution = distribution
self.data_ = []
if distribution is None:
pass
elif distribution == 'normal':
mean = kwargs['mean'] if 'mean' in kwargs else 0
std = kwargs['std'] if 'std' in kwargs else 1
self.param = nn.Parameter(torch.Tensor([mean, std]), False)
elif distribution == 'uniform':
vmin = kwargs['minv'] if 'minv' in kwargs else 0
vmax = kwargs['maxv'] if 'maxv' in kwargs else 1
self.param = nn.Parameter(torch.Tensor([vmin, vmax]), False)
else:
raise NotImplementedError()
def forward(self, x, keep_data=False):
if keep_data:
self.data_.append(x.detach().cpu().view(-1))
return x
if self.distribution is None:
return x
elif self.distribution == 'normal':
mean = self.param[0]
std = self.param[1]
return (x - mean) / std
elif self.distribution == 'uniform':
vmin = self.param[0]
vmax = self.param[1]
return (x - vmin) / (vmax - vmin + 1e-05)
else:
raise NotImplementedError()
def reset_parameters(self, name=None):
assert len(self.data_) > 0
data = torch.cat(self.data_)
self.data_ = []
if self.distribution is None:
pass
elif self.distribution == 'normal':
with torch.no_grad():
self.param[0] = data.mean().item()
self.param[1] = data.std().item()
if name is not None:
None
elif self.distribution == 'uniform':
with torch.no_grad():
self.param[0] = data.min().item()
self.param[1] = data.max().item()
if name is not None:
None
else:
raise NotImplementedError()
def recover_threshold(self, x):
if self.distribution is None:
return x
elif self.distribution == 'normal':
return x * float(self.param[1]) + float(self.param[0])
elif self.distribution == 'uniform':
return x * float(self.param[1] - self.param[0] + 1e-05) + float(
self.param[0])
else:
raise NotImplementedError()
def init_thresholds(self, x):
if self.distribution is None:
nn.init.normal_(x, 0, 1)
elif self.distribution == 'normal':
nn.init.normal_(x, 0, 1)
elif self.distribution == 'uniform':
nn.init.uniform_(x, 0, 1)
else:
raise NotImplementedError()
class SoftCmp(nn.Module):
"""
Sigmoid((x - y) / e^beta)
"""
def __init__(self):
super().__init__()
self.sigmoid = nn.Sigmoid()
def forward(self, x, y, beta):
return self.sigmoid((x - y) / math.exp(beta))
class Inequality(nn.Module):
def __init__(self, out_dim=1, distribution=None, **kwargs):
super().__init__()
self.out_dim = out_dim
self.thresholds = nn.Parameter(torch.zeros(out_dim), requires_grad=True
)
self.distribution = distribution
self.normalize = Normalize(distribution)
self.cmp = SoftCmp()
self.normalize.init_thresholds(self.thresholds)
def forward(self, states, beta=0, **kwargs):
"""
:param states: [batch, length, n_agents, ... ]
"""
states_expand = states.view(*(states.size() + (1,)))
estimate_parameters = 'estimate_parameters' in kwargs and kwargs[
'estimate_parameters']
states_expand = self.normalize(states_expand, keep_data=
estimate_parameters)
return self.cmp(states_expand, self.thresholds.view(*([1] * len(
states.size()) + [self.out_dim])), beta)
def reset_parameters(self, parameter_name, name=None):
if parameter_name == 'primitive_inequality':
self.normalize.reset_parameters(name=name)
self.normalize.init_thresholds(self.thresholds)
def get_descriptions(self, name='Inequality'):
theta = self.thresholds.detach().cpu().view(self.out_dim)
descroptions = []
for k in range(theta.size(0)):
t = self.normalize.recover_threshold(theta[k])
if 'speed' in name:
t = t * 8
if 'acc' in name:
t = t * 64
descroptions.append('%s > %.2lf' % (name, t))
return descroptions
class N_aryPrimitivesPredefined(nn.Module):
def __init__(self):
super().__init__()
self.out_dim = 0
self.primitive_list = []
self.ineqs = nn.ModuleDict({})
def reset_parameters(self, parameter_name):
for k in self.primitive_list:
self.ineqs[k].reset_parameters(parameter_name, name=k)
def get_descriptions(self):
descriptions = []
for k in self.primitive_list:
descriptions += self.ineqs[k].get_descriptions(name=k)
return descriptions
class AlignDifferential(nn.Module):
def __init__(self):
super().__init__()
def new_length(self, length):
return length
def forward(self, states):
"""
:param states: [batch, length, *]
"""
padded_states = torch.cat([states[:, 0:1] * 2 - states[:, 1:2],
states, states[:, -1:] * 2 - states[:, -2:-1]], dim=1)
return (padded_states[:, 2:] - padded_states[:, :-2]) / 2
def show(self, name='AlignDifferential', indent=0, log=print, **kwargs):
log(' ' * indent + '- %s(x) = AlignDifferential()' % (name,))
class UnaryPrimitivesPredefined_v2New(N_aryPrimitivesPredefined):
def __init__(self, cmp_dim=10):
super().__init__()
self.differential = AlignDifferential()
self.primitive_list = ['acc', 'pos_z', 'speed', 'dist_to_ball']
self.distance = Distance()
self.ineqs.update({'acc': Inequality(out_dim=cmp_dim, distribution=
'normal'), 'pos_z': Inequality(out_dim=cmp_dim, distribution=
'uniform'), 'speed': Inequality(out_dim=cmp_dim, distribution=
'normal'), 'dist_to_ball': Inequality(out_dim=cmp_dim,
distribution='normal')})
self.out_dim = sum([self.ineqs[k].out_dim for k in self.primitive_list]
)
def forward(self, input_0):
primals_3 = self.ineqs.acc.thresholds
primals_2 = self.ineqs.acc.normalize.param
primals_5 = self.ineqs.pos_z.thresholds
primals_4 = self.ineqs.pos_z.normalize.param
primals_7 = self.ineqs.speed.thresholds
primals_6 = self.ineqs.speed.normalize.param
primals_9 = self.ineqs.dist_to_ball.thresholds
primals_8 = self.ineqs.dist_to_ball.normalize.param
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
C-SUNSHINE/TOQ-Nets-PyTorch-Release
|
UnaryPrimitivesPredefined_v2
| false
| 17,169
|
[
"MIT"
] | 6
|
05e06bf633fb3c6b610dda9a5126ecd7af1db02f
|
https://github.com/C-SUNSHINE/TOQ-Nets-PyTorch-Release/tree/05e06bf633fb3c6b610dda9a5126ecd7af1db02f
|
SELayer
|
import torch
import torch.utils.data
import torch.nn as nn
import torch.nn.functional as F
import torch.optim
import torch.backends.cudnn
class SELayer(nn.Module):
def __init__(self, in_channels, reduction):
super(SELayer, self).__init__()
mid_channels = in_channels // reduction
self.fc1 = nn.Linear(in_channels, mid_channels)
self.fc2 = nn.Linear(mid_channels, in_channels)
def forward(self, x):
n_batches, n_channels, _, _ = x.size()
y = F.adaptive_avg_pool2d(x, output_size=1).view(n_batches, n_channels)
y = F.relu(self.fc1(y), inplace=True)
y = F.sigmoid(self.fc2(y)).view(n_batches, n_channels, 1, 1)
return x * y
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'reduction': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
import torch.nn as nn
import torch.optim
import torch.backends.cudnn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tl.store(in_out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4), (4, 1))
assert_size_stride(primals_3, (1,), (1,))
assert_size_stride(primals_4, (4, 1), (1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (4, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 1), (1, 4), 0), out=buf2)
del primals_2
buf3 = buf2
del buf2
triton_poi_fused_relu_1[grid(4)](buf3, primals_3, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_3
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, buf3, reinterpret_tensor(primals_4,
(1, 4), (1, 1), 0), alpha=1, beta=1, out=buf4)
del primals_5
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_2[grid(256)](primals_1, buf4, buf5, 256,
XBLOCK=256, num_warps=4, num_stages=1)
return buf5, primals_1, reinterpret_tensor(buf1, (4, 4), (4, 1), 0
), buf3, buf4, primals_4
class SELayerNew(nn.Module):
def __init__(self, in_channels, reduction):
super(SELayerNew, self).__init__()
mid_channels = in_channels // reduction
self.fc1 = nn.Linear(in_channels, mid_channels)
self.fc2 = nn.Linear(mid_channels, in_channels)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
CrazyStoneonRoad/pytorch_image_classification
|
SELayer
| false
| 17,170
|
[
"MIT"
] | 4
|
1dcf6d0ee8f4a102ca93cc6e5e325a2e9153918b
|
https://github.com/CrazyStoneonRoad/pytorch_image_classification/tree/1dcf6d0ee8f4a102ca93cc6e5e325a2e9153918b
|
MultiHeadAttention
|
import torch
import torch.utils.data
from torch import nn
import torch.nn.functional as F
class MultiHeadAttention(nn.Module):
"""
input:
query --- [N, T_q, query_dim]
key --- [N, T_k, key_dim]
output:
out --- [N, T_q, num_units]
"""
def __init__(self, query_dim, key_dim, num_units, num_heads):
super().__init__()
self.num_units = num_units
self.num_heads = num_heads
self.key_dim = key_dim
self.W_query = nn.Linear(in_features=query_dim, out_features=
num_units, bias=False)
self.W_key = nn.Linear(in_features=key_dim, out_features=num_units,
bias=False)
self.W_value = nn.Linear(in_features=key_dim, out_features=
num_units, bias=False)
def forward(self, query, key):
querys = self.W_query(query)
keys = self.W_key(key)
values = self.W_value(key)
split_size = self.num_units // self.num_heads
querys = torch.stack(torch.split(querys, split_size, dim=2), dim=0)
keys = torch.stack(torch.split(keys, split_size, dim=2), dim=0)
values = torch.stack(torch.split(values, split_size, dim=2), dim=0)
scores = torch.matmul(querys, keys.transpose(2, 3))
scores = scores / self.key_dim ** 0.5
scores = F.softmax(scores, dim=3)
out = torch.matmul(scores, values)
out = torch.cat(torch.split(out, 1, dim=0), dim=3).squeeze(0)
return out
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'query_dim': 4, 'key_dim': 4, 'num_units': 4, 'num_heads': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.utils.data
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 4
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x4 = xindex
tmp0 = x3
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x0 + 16 * (x1 + 4 * x2)), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * (-4 + x1 + 4 * x2)), tmp9 &
xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * (-8 + x1 + 4 * x2)), tmp14 &
xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tl.full([1], 16, tl.int64)
tmp19 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * (-12 + x1 + 4 * x2)),
tmp16 & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tmp23 = 0.7071067811865476
tmp24 = tmp22 * tmp23
tl.store(out_ptr0 + x4, tmp24, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr1 + x2, xmask)
tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = float('-inf')
tmp2 = tmp0 == tmp1
tmp3 = tmp2 == 0
tmp4 = tmp3.to(tl.int64)
tmp5 = tmp4 != 0
tmp7 = tmp6 == tmp1
tmp8 = tmp7 == 0
tmp9 = tmp8.to(tl.int64)
tmp10 = tmp9 != 0
tmp11 = tmp5 | tmp10
tmp13 = tmp12 == tmp1
tmp14 = tmp13 == 0
tmp15 = tmp14.to(tl.int64)
tmp16 = tmp15 != 0
tmp17 = tmp11 | tmp16
tmp19 = tmp18 == tmp1
tmp20 = tmp19 == 0
tmp21 = tmp20.to(tl.int64)
tmp22 = tmp21 != 0
tmp23 = tmp17 | tmp22
tmp24 = tmp23 == 0
tmp28 = tmp26 + tmp27
tmp30 = tmp28 + tmp29
tmp32 = tmp30 + tmp31
tmp33 = tmp25 / tmp32
tmp34 = 0.0
tmp35 = tl.where(tmp24, tmp34, tmp33)
tl.store(out_ptr0 + x2, tmp35, xmask)
@triton.jit
def triton_poi_fused_stack_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x0 + 16 * x1), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * (-4 + x1)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * (-8 + x1)), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tl.full([1], 16, tl.int64)
tmp19 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * (-12 + x1)), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tl.store(out_ptr0 + x2, tmp22, xmask)
@triton.jit
def triton_poi_fused_cat_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + (16 + x1), tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + (32 + x1), tmp14 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tl.full([1], 4, tl.int64)
tmp19 = tl.load(in_ptr0 + (48 + x1), tmp16 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tl.store(out_ptr0 + x2, tmp22, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(64)](buf0, buf3, 64, XBLOCK=64, num_warps=1,
num_stages=1)
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf0
triton_poi_fused_0[grid(64)](buf1, buf4, 64, XBLOCK=64, num_warps=1,
num_stages=1)
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_1[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_2[grid(256)](buf5, buf6, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf5
del buf6
buf8 = reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 1), 0)
del buf1
triton_poi_fused_stack_3[grid(64)](buf2, buf8, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9)
buf10 = empty_strided_cuda((1, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_cat_4[grid(64)](buf9, buf10, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf9
return reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_4, (16, 4), (4, 1), 0
), buf7, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0)
class MultiHeadAttentionNew(nn.Module):
"""
input:
query --- [N, T_q, query_dim]
key --- [N, T_k, key_dim]
output:
out --- [N, T_q, num_units]
"""
def __init__(self, query_dim, key_dim, num_units, num_heads):
super().__init__()
self.num_units = num_units
self.num_heads = num_heads
self.key_dim = key_dim
self.W_query = nn.Linear(in_features=query_dim, out_features=
num_units, bias=False)
self.W_key = nn.Linear(in_features=key_dim, out_features=num_units,
bias=False)
self.W_value = nn.Linear(in_features=key_dim, out_features=
num_units, bias=False)
def forward(self, input_0, input_1):
primals_1 = self.W_query.weight
primals_3 = self.W_key.weight
primals_5 = self.W_value.weight
primals_2 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
CookiePPP/pag-tacotron2
|
MultiHeadAttention
| false
| 17,171
|
[
"BSD-3-Clause"
] | 10
|
503e7e9e892c5c0795f6278e70e72b627ed1cfb7
|
https://github.com/CookiePPP/pag-tacotron2/tree/503e7e9e892c5c0795f6278e70e72b627ed1cfb7
|
GCN
|
import torch
import torch.nn as nn
class GCN(nn.Module):
def __init__(self, in_ft, out_ft, act, bias=True):
super(GCN, self).__init__()
self.fc = nn.Linear(in_ft, out_ft, bias=False)
self.act = nn.PReLU() if act == 'prelu' else act
if bias:
self.bias = nn.Parameter(torch.FloatTensor(out_ft))
self.bias.data.fill_(0.0)
else:
self.register_parameter('bias', None)
for m in self.modules():
self.weights_init(m)
def weights_init(self, m):
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
def forward(self, seq, adj, sparse=False):
seq_fts = self.fc(seq)
if sparse:
out = torch.unsqueeze(torch.spmm(adj, torch.squeeze(seq_fts, 0)), 0
)
else:
out = torch.bmm(adj, seq_fts)
if self.bias is not None:
out += self.bias
return out
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_ft': 4, 'out_ft': 4, 'act': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(primals_3, reinterpret_tensor(buf0, (4, 4, 4), (
16, 4, 1), 0), out=buf1)
del buf0
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_add_0[grid(64)](buf2, primals_4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_4
return buf2, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_3, (4, 4, 4), (16, 1, 4), 0)
class GCNNew(nn.Module):
def __init__(self, in_ft, out_ft, act, bias=True):
super(GCNNew, self).__init__()
self.fc = nn.Linear(in_ft, out_ft, bias=False)
self.act = nn.PReLU() if act == 'prelu' else act
if bias:
self.bias = nn.Parameter(torch.FloatTensor(out_ft))
self.bias.data.fill_(0.0)
else:
self.register_parameter('bias', None)
for m in self.modules():
self.weights_init(m)
def weights_init(self, m):
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
def forward(self, input_0, input_1):
primals_4 = self.bias
primals_1 = self.fc.weight
primals_2 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
CrowdDynamicsLab/InfoMotif
|
GCN
| false
| 17,172
|
[
"BSD-3-Clause"
] | 7
|
cca1ffa14cc94408a5c4c50b7b1707c608e3bc9b
|
https://github.com/CrowdDynamicsLab/InfoMotif/tree/cca1ffa14cc94408a5c4c50b7b1707c608e3bc9b
|
DQN_hot4
|
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.utils.data
class DQN_hot4(nn.Module):
"""
A MLP for DQN learning.
Note: Uses a one hot board representation
"""
def __init__(self, m, n, num_actions):
super(DQN_hot4, self).__init__()
self.fc1 = nn.Linear(m * n, 100)
self.fc2 = nn.Linear(100, 25)
self.fc3 = nn.Linear(25, num_actions)
def forward(self, x):
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
return self.fc3(x)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'m': 4, 'n': 4, 'num_actions': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 100
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 100
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 25
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (100, 16), (16, 1))
assert_size_stride(primals_3, (100,), (1,))
assert_size_stride(primals_4, (25, 100), (100, 1))
assert_size_stride(primals_5, (25,), (1,))
assert_size_stride(primals_6, (4, 25), (25, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 100), (100, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 16), (16, 1), 0
), reinterpret_tensor(primals_2, (16, 100), (1, 16), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(400)](buf1, primals_3, 400, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 25), (25, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (100, 25), (1,
100), 0), out=buf2)
buf3 = buf2
del buf2
triton_poi_fused_relu_1[grid(100)](buf3, primals_5, 100, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6,
(25, 4), (1, 25), 0), alpha=1, beta=1, out=buf4)
del primals_7
return buf4, reinterpret_tensor(primals_1, (4, 16), (16, 1), 0
), buf1, buf3, primals_6, primals_4
class DQN_hot4New(nn.Module):
"""
A MLP for DQN learning.
Note: Uses a one hot board representation
"""
def __init__(self, m, n, num_actions):
super(DQN_hot4New, self).__init__()
self.fc1 = nn.Linear(m * n, 100)
self.fc2 = nn.Linear(100, 25)
self.fc3 = nn.Linear(25, num_actions)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
CoAxLab/azad
|
DQN_hot4
| false
| 17,173
|
[
"MIT"
] | 6
|
d1498069dd8856e93ae077b34dd7c9f1c7ce80e6
|
https://github.com/CoAxLab/azad/tree/d1498069dd8856e93ae077b34dd7c9f1c7ce80e6
|
DQN_hot1
|
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.utils.data
class DQN_hot1(nn.Module):
"""
A MLP for DQN learning.
Note: Uses a one hot board representation
"""
def __init__(self, m, n, num_actions):
super(DQN_hot1, self).__init__()
self.fc1 = nn.Linear(m * n, 15)
self.fc2 = nn.Linear(15, num_actions)
def forward(self, x):
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
return self.fc2(x)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'m': 4, 'n': 4, 'num_actions': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 60
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 15
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (15, 16), (16, 1))
assert_size_stride(primals_3, (15,), (1,))
assert_size_stride(primals_4, (4, 15), (15, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 15), (15, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 16), (16, 1), 0
), reinterpret_tensor(primals_2, (16, 15), (1, 16), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(60)](buf1, primals_3, 60, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4,
(15, 4), (1, 15), 0), alpha=1, beta=1, out=buf2)
del primals_5
return buf2, reinterpret_tensor(primals_1, (4, 16), (16, 1), 0
), buf1, primals_4
class DQN_hot1New(nn.Module):
"""
A MLP for DQN learning.
Note: Uses a one hot board representation
"""
def __init__(self, m, n, num_actions):
super(DQN_hot1New, self).__init__()
self.fc1 = nn.Linear(m * n, 15)
self.fc2 = nn.Linear(15, num_actions)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
CoAxLab/azad
|
DQN_hot1
| false
| 17,174
|
[
"MIT"
] | 6
|
d1498069dd8856e93ae077b34dd7c9f1c7ce80e6
|
https://github.com/CoAxLab/azad/tree/d1498069dd8856e93ae077b34dd7c9f1c7ce80e6
|
SoftQNetwork
|
import torch
import torch.nn.functional as F
import torch.nn as nn
def mish(x):
"""
Mish: A Self Regularized Non-Monotonic Neural Activation Function
https://arxiv.org/abs/1908.08681v1
implemented for PyTorch / FastAI by lessw2020
https://github.com/lessw2020/mish
param:
x: output of a layer of a neural network
return: mish activation function
"""
return torch.clamp(x * torch.tanh(F.softplus(x)), max=6)
class SoftQNetwork(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_size, init_w=0.003):
super(SoftQNetwork, self).__init__()
self.linear1 = nn.Linear(num_inputs + num_actions, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.linear2_3 = nn.Linear(hidden_size, hidden_size)
self.linear3 = nn.Linear(hidden_size, 1)
self.linear1.weight.data.uniform_(-init_w, init_w)
self.linear1.bias.data.uniform_(-init_w, init_w)
self.linear2.weight.data.uniform_(-init_w, init_w)
self.linear2.bias.data.uniform_(-init_w, init_w)
self.linear2_3.weight.data.uniform_(-init_w, init_w)
self.linear2_3.bias.data.uniform_(-init_w, init_w)
self.linear3.weight.data.uniform_(-init_w, init_w)
self.linear3.bias.data.uniform_(-init_w, init_w)
def forward(self, state, action):
x = torch.cat([state, action], 1)
x = mish(self.linear1(x))
x = mish(self.linear2(x))
x = mish(self.linear2_3(x))
x = self.linear3(x)
return x
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'num_inputs': 4, 'num_actions': 4, 'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn.functional as F
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_clamp_mul_softplus_tanh_1(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 20.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = libdevice.log1p(tmp3)
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp6 = libdevice.tanh(tmp5)
tmp7 = tmp0 * tmp6
tmp8 = 6.0
tmp9 = triton_helpers.minimum(tmp7, tmp8)
tl.store(out_ptr0 + x0, tmp9, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (1, 4), (4, 1))
assert_size_stride(primals_10, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3,
(8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_clamp_mul_softplus_tanh_1[grid(16)](buf1, buf2, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, buf2, reinterpret_tensor(primals_5,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_6
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_clamp_mul_softplus_tanh_1[grid(16)](buf3, buf4, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf5)
del primals_8
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_clamp_mul_softplus_tanh_1[grid(16)](buf5, buf6, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf8 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_10, buf6, reinterpret_tensor(primals_9,
(4, 1), (1, 4), 0), alpha=1, beta=1, out=buf8)
del primals_10
return (buf8, buf0, buf1, buf2, buf3, buf4, buf5, buf6, primals_9,
primals_7, primals_5)
def mish(x):
"""
Mish: A Self Regularized Non-Monotonic Neural Activation Function
https://arxiv.org/abs/1908.08681v1
implemented for PyTorch / FastAI by lessw2020
https://github.com/lessw2020/mish
param:
x: output of a layer of a neural network
return: mish activation function
"""
return torch.clamp(x * torch.tanh(F.softplus(x)), max=6)
class SoftQNetworkNew(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_size, init_w=0.003):
super(SoftQNetworkNew, self).__init__()
self.linear1 = nn.Linear(num_inputs + num_actions, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.linear2_3 = nn.Linear(hidden_size, hidden_size)
self.linear3 = nn.Linear(hidden_size, 1)
self.linear1.weight.data.uniform_(-init_w, init_w)
self.linear1.bias.data.uniform_(-init_w, init_w)
self.linear2.weight.data.uniform_(-init_w, init_w)
self.linear2.bias.data.uniform_(-init_w, init_w)
self.linear2_3.weight.data.uniform_(-init_w, init_w)
self.linear2_3.bias.data.uniform_(-init_w, init_w)
self.linear3.weight.data.uniform_(-init_w, init_w)
self.linear3.bias.data.uniform_(-init_w, init_w)
def forward(self, input_0, input_1):
primals_3 = self.linear1.weight
primals_4 = self.linear1.bias
primals_1 = self.linear2.weight
primals_6 = self.linear2.bias
primals_2 = self.linear2_3.weight
primals_8 = self.linear2_3.bias
primals_9 = self.linear3.weight
primals_10 = self.linear3.bias
primals_5 = input_0
primals_7 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
|
Crawford-fang/ROS_pytorch_RL
|
SoftQNetwork
| false
| 17,175
|
[
"Apache-2.0"
] | 10
|
2d3476f15d51aa1f5b5ae9edc5d7f4c776e5de9f
|
https://github.com/Crawford-fang/ROS_pytorch_RL/tree/2d3476f15d51aa1f5b5ae9edc5d7f4c776e5de9f
|
DQN_hot3
|
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.utils.data
class DQN_hot3(nn.Module):
"""
A MLP for DQN learning.
Note: Uses a one hot board representation
"""
def __init__(self, m, n, num_actions):
super(DQN_hot3, self).__init__()
self.fc1 = nn.Linear(m * n, 10)
self.fc2 = nn.Linear(10, 20)
self.fc3 = nn.Linear(20, num_actions)
def forward(self, x):
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
return self.fc3(x)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'m': 4, 'n': 4, 'num_actions': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 40
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 10
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 20
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (10, 16), (16, 1))
assert_size_stride(primals_3, (10,), (1,))
assert_size_stride(primals_4, (20, 10), (10, 1))
assert_size_stride(primals_5, (20,), (1,))
assert_size_stride(primals_6, (4, 20), (20, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 16), (16, 1), 0
), reinterpret_tensor(primals_2, (16, 10), (1, 16), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(40)](buf1, primals_3, 40, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 20), (20, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (10, 20), (1,
10), 0), out=buf2)
buf3 = buf2
del buf2
triton_poi_fused_relu_1[grid(80)](buf3, primals_5, 80, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6,
(20, 4), (1, 20), 0), alpha=1, beta=1, out=buf4)
del primals_7
return buf4, reinterpret_tensor(primals_1, (4, 16), (16, 1), 0
), buf1, buf3, primals_6, primals_4
class DQN_hot3New(nn.Module):
"""
A MLP for DQN learning.
Note: Uses a one hot board representation
"""
def __init__(self, m, n, num_actions):
super(DQN_hot3New, self).__init__()
self.fc1 = nn.Linear(m * n, 10)
self.fc2 = nn.Linear(10, 20)
self.fc3 = nn.Linear(20, num_actions)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
CoAxLab/azad
|
DQN_hot3
| false
| 17,176
|
[
"MIT"
] | 6
|
d1498069dd8856e93ae077b34dd7c9f1c7ce80e6
|
https://github.com/CoAxLab/azad/tree/d1498069dd8856e93ae077b34dd7c9f1c7ce80e6
|
Encoder_attention
|
import torch
import torch.nn as nn
class Encoder_attention(nn.Module):
def __init__(self, n_h):
super(Encoder_attention, self).__init__()
self.linear = nn.Linear(n_h, 1)
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
"""Output: X """
x1 = self.linear(x).squeeze()
weights = self.softmax(x1).unsqueeze(2)
x2 = torch.sum(torch.mul(x, weights), dim=1)
return x2, weights.squeeze().clone().detach()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_h': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x3 = xindex % 16
x0 = xindex % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3 + 64 * x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x3 + 64 * x2), xmask)
tmp4 = tl.load(in_ptr1 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (32 + x3 + 64 * x2), xmask)
tmp8 = tl.load(in_ptr1 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x3 + 64 * x2), xmask)
tmp12 = tl.load(in_ptr1 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x4, tmp14, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 4), (4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_1
del primals_2
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0)
del buf1
triton_poi_fused__softmax_1[grid(64)](buf2, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf4 = buf2
del buf2
triton_poi_fused_mul_sum_2[grid(64)](primals_3, buf3, buf4, 64,
XBLOCK=64, num_warps=1, num_stages=1)
return buf4, buf3, primals_3, buf3
class Encoder_attentionNew(nn.Module):
def __init__(self, n_h):
super(Encoder_attentionNew, self).__init__()
self.linear = nn.Linear(n_h, 1)
self.softmax = nn.Softmax(dim=1)
def forward(self, input_0):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0], output[1]
|
CrowdDynamicsLab/InfoMotif
|
Encoder_attention
| false
| 17,177
|
[
"BSD-3-Clause"
] | 7
|
cca1ffa14cc94408a5c4c50b7b1707c608e3bc9b
|
https://github.com/CrowdDynamicsLab/InfoMotif/tree/cca1ffa14cc94408a5c4c50b7b1707c608e3bc9b
|
DQN_xy3
|
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.utils.data
class DQN_xy3(nn.Module):
"""
A MLP for DQN learning.
Note: Uses a one hot board representation
"""
def __init__(self):
super(DQN_xy3, self).__init__()
self.fc1 = nn.Linear(4, 10)
self.fc2 = nn.Linear(10, 20)
self.fc3 = nn.Linear(20, 1)
def forward(self, x):
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
return self.fc3(x)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 40
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 10
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 20
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (10, 4), (4, 1))
assert_size_stride(primals_3, (10,), (1,))
assert_size_stride(primals_4, (20, 10), (10, 1))
assert_size_stride(primals_5, (20,), (1,))
assert_size_stride(primals_6, (1, 20), (20, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 10),
(1, 4), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(40)](buf1, primals_3, 40, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 20), (20, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (10, 20), (1,
10), 0), out=buf2)
buf3 = buf2
del buf2
triton_poi_fused_relu_1[grid(80)](buf3, primals_5, 80, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6,
(20, 1), (1, 20), 0), alpha=1, beta=1, out=buf5)
del primals_7
return buf5, primals_1, buf1, buf3, primals_6, primals_4
class DQN_xy3New(nn.Module):
"""
A MLP for DQN learning.
Note: Uses a one hot board representation
"""
def __init__(self):
super(DQN_xy3New, self).__init__()
self.fc1 = nn.Linear(4, 10)
self.fc2 = nn.Linear(10, 20)
self.fc3 = nn.Linear(20, 1)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
CoAxLab/azad
|
DQN_xy3
| false
| 17,178
|
[
"MIT"
] | 6
|
d1498069dd8856e93ae077b34dd7c9f1c7ce80e6
|
https://github.com/CoAxLab/azad/tree/d1498069dd8856e93ae077b34dd7c9f1c7ce80e6
|
BPR
|
import torch
class BPR(torch.nn.Module):
def __init__(self):
super(BPR, self).__init__()
self._sigmoid = torch.nn.Sigmoid()
def forward(self, pos, neg):
loss = torch.log(self._sigmoid(pos.double() - neg.double()))
return -loss.mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused__to_copy_log_mean_neg_sigmoid_sub_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp2 = tl.load(in_ptr1 + r0, None)
tmp1 = tmp0.to(tl.float64)
tmp3 = tmp2.to(tl.float64)
tmp4 = tmp1 - tmp3
tmp5 = tl.full([1], 1, tl.int32)
tmp6 = -tmp4
tmp7 = libdevice.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp9 = tmp5 / tmp8
tmp10 = libdevice.log(tmp9)
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = tl.full([1], 256.0, tl.float64)
tmp15 = tmp13 / tmp14
tmp16 = -tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp16, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float64)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused__to_copy_log_mean_neg_sigmoid_sub_0[grid(1)](buf1,
arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class BPRNew(torch.nn.Module):
def __init__(self):
super(BPRNew, self).__init__()
self._sigmoid = torch.nn.Sigmoid()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
DanielMorales9/FactorizationPyTorch
|
BPR
| false
| 17,179
|
[
"MIT"
] | 4
|
50f0644fdb4a903550fb3f1ba78fb9fb8649ceb1
|
https://github.com/DanielMorales9/FactorizationPyTorch/tree/50f0644fdb4a903550fb3f1ba78fb9fb8649ceb1
|
UpsampleConvLayer
|
import torch
from torch import nn
import torch.nn.functional as f
import torch.nn.parallel
class UpsampleConvLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, activation='relu', norm=None):
super(UpsampleConvLayer, self).__init__()
bias = False if norm == 'BN' else True
self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding, bias=bias)
if activation is not None:
self.activation = getattr(torch, activation, 'relu')
else:
self.activation = None
self.norm = norm
if norm == 'BN':
self.norm_layer = nn.BatchNorm2d(out_channels)
elif norm == 'IN':
self.norm_layer = nn.InstanceNorm2d(out_channels,
track_running_stats=True)
def forward(self, x):
x_upsampled = f.interpolate(x, scale_factor=2, mode='bilinear',
align_corners=False)
out = self.conv2d(x_upsampled)
if self.norm in ['BN', 'IN']:
out = self.norm_layer(out)
if self.activation is not None:
out = self.activation(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(
in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 8 % 8
x0 = xindex % 8
x2 = xindex // 64
x4 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 3, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tmp13 = x0
tmp14 = tmp13.to(tl.float32)
tmp15 = tmp14 + tmp2
tmp16 = tmp15 * tmp2
tmp17 = tmp16 - tmp2
tmp18 = triton_helpers.maximum(tmp17, tmp6)
tmp19 = tmp18.to(tl.int32)
tmp20 = tmp19 + tmp9
tmp21 = triton_helpers.minimum(tmp20, tmp11)
tmp22 = tl.load(in_ptr0 + (tmp21 + 4 * tmp12 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (tmp19 + 4 * tmp12 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp24 = tmp22 - tmp23
tmp25 = tmp19.to(tl.float32)
tmp26 = tmp18 - tmp25
tmp27 = triton_helpers.maximum(tmp26, tmp6)
tmp28 = 1.0
tmp29 = triton_helpers.minimum(tmp27, tmp28)
tmp30 = tmp24 * tmp29
tmp31 = tmp23 + tmp30
tmp32 = tl.load(in_ptr0 + (tmp19 + 4 * tmp8 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp33 = tl.load(in_ptr0 + (tmp21 + 4 * tmp8 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp34 = tmp33 - tmp32
tmp35 = tmp34 * tmp29
tmp36 = tmp32 + tmp35
tmp37 = tmp31 - tmp36
tmp38 = tmp8.to(tl.float32)
tmp39 = tmp7 - tmp38
tmp40 = triton_helpers.maximum(tmp39, tmp6)
tmp41 = triton_helpers.minimum(tmp40, tmp28)
tmp42 = tmp37 * tmp41
tmp43 = tmp36 + tmp42
tl.store(in_out_ptr0 + x4, tmp43, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 25 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr0 + x3, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
buf1 = buf0
del buf0
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0[grid
(1024)](buf2, primals_1, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_1
buf3 = extern_kernels.convolution(buf2, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 5, 5), (100, 25, 5, 1))
buf4 = buf3
del buf3
buf5 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_1[grid(400)](buf4,
primals_3, buf5, 400, XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
return buf4, primals_2, buf2, buf5
class UpsampleConvLayerNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, activation='relu', norm=None):
super(UpsampleConvLayerNew, self).__init__()
bias = False if norm == 'BN' else True
self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding, bias=bias)
if activation is not None:
self.activation = getattr(torch, activation, 'relu')
else:
self.activation = None
self.norm = norm
if norm == 'BN':
self.norm_layer = nn.BatchNorm2d(out_channels)
elif norm == 'IN':
self.norm_layer = nn.InstanceNorm2d(out_channels,
track_running_stats=True)
def forward(self, input_0):
primals_1 = self.conv2d.weight
primals_3 = self.conv2d.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
DA4EVENT/home
|
UpsampleConvLayer
| false
| 17,180
|
[
"MIT"
] | 5
|
18cc93a795ce132e05b886aa34565a102915b1c6
|
https://github.com/DA4EVENT/home/tree/18cc93a795ce132e05b886aa34565a102915b1c6
|
DeepTable3
|
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.utils.data
class DeepTable3(nn.Module):
"""A deep differentialable 'Table' for learning one-hot input and output.
"""
def __init__(self, in_channels, out_channels, num_hidden1=200,
num_hidden2=100):
super(DeepTable3, self).__init__()
self.fc1 = nn.Linear(in_channels, num_hidden1, bias=False)
self.fc2 = nn.Linear(num_hidden1, num_hidden2, bias=False)
self.fc3 = nn.Linear(num_hidden2, out_channels, bias=False)
self.fc1.weight.data.uniform_(0.0, 0.0)
self.fc3.weight.data.uniform_(0.0, 0.0)
def forward(self, x):
x = self.fc1(x)
x = F.softmax(self.fc2(x))
return self.fc3(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 400
x2 = xindex // 1600
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 1600 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (400 + x0 + 1600 * x2), xmask, eviction_policy
='evict_last')
tmp4 = tl.load(in_ptr0 + (800 + x0 + 1600 * x2), xmask, eviction_policy
='evict_last')
tmp6 = tl.load(in_ptr0 + (1200 + x0 + 1600 * x2), xmask,
eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 400
x2 = xindex // 1600
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 1600 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (400 + x0 + 1600 * x2), xmask, eviction_policy
='evict_last')
tmp4 = tl.load(in_ptr0 + (800 + x0 + 1600 * x2), xmask, eviction_policy
='evict_last')
tmp6 = tl.load(in_ptr0 + (1200 + x0 + 1600 * x2), xmask,
eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (200, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (100, 200), (200, 1))
assert_size_stride(primals_4, (4, 100), (100, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 200), (200, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 200), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 100), (100, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (200, 100), (
1, 200), 0), out=buf1)
buf2 = empty_strided_cuda((4, 4, 4, 100), (1600, 400, 100, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(6400)](buf1, buf2, 6400, XBLOCK=
128, num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (4, 4, 4, 100), (1600, 400, 100, 1), 0)
del buf1
triton_poi_fused__softmax_1[grid(6400)](buf2, buf3, 6400, XBLOCK=
256, num_warps=4, num_stages=1)
del buf2
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 100), (100, 1), 0),
reinterpret_tensor(primals_4, (100, 4), (1, 100), 0), out=buf4)
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0
), buf0, buf3, primals_4, primals_3
class DeepTable3New(nn.Module):
"""A deep differentialable 'Table' for learning one-hot input and output.
"""
def __init__(self, in_channels, out_channels, num_hidden1=200,
num_hidden2=100):
super(DeepTable3New, self).__init__()
self.fc1 = nn.Linear(in_channels, num_hidden1, bias=False)
self.fc2 = nn.Linear(num_hidden1, num_hidden2, bias=False)
self.fc3 = nn.Linear(num_hidden2, out_channels, bias=False)
self.fc1.weight.data.uniform_(0.0, 0.0)
self.fc3.weight.data.uniform_(0.0, 0.0)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_3 = self.fc2.weight
primals_4 = self.fc3.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
CoAxLab/azad
|
DeepTable3
| false
| 17,181
|
[
"MIT"
] | 6
|
d1498069dd8856e93ae077b34dd7c9f1c7ce80e6
|
https://github.com/CoAxLab/azad/tree/d1498069dd8856e93ae077b34dd7c9f1c7ce80e6
|
BinaryPrimitivesSomethingElse
|
import math
import torch
from torch import nn
def apply_last_dim(model, x):
size = list(x.size())
y = model(x.contiguous().view(-1, size[-1]))
size[-1] = y.size(-1)
y = y.view(torch.Size(size))
return y
def get_int_dim_index(name):
if isinstance(name, int):
return name
name_list = 'axyz'
assert name in name_list
return [i for i in range(len(name_list)) if name_list[i] == name][0] - 1
class Length(nn.Module):
def __init__(self, dim_index=-1):
super().__init__()
self.dim_index = dim_index
def forward(self, states, dim_index=None):
if dim_index is None:
dim_index = self.dim_index
if isinstance(dim_index, int):
dim_index = [dim_index]
else:
dim_index = [get_int_dim_index(x) for x in dim_index]
if -1 in dim_index:
def extractor(x):
return torch.sqrt(torch.sum(x * x, dim=1, keepdim=True))
else:
def extractor(x):
return torch.sqrt(torch.sum(x[:, dim_index].pow(2), dim=1,
keepdim=True))
return apply_last_dim(extractor, states)
def show(self, name='Length', indent=0, log=print, **kwargs):
log(' ' * indent + "- %s(x) = |x's dim %s|" % (name, str(self.
dim_index)))
class Distance(nn.Module):
def __init__(self, dim_index=-1):
super().__init__()
self.dim_index = dim_index
self.length = Length(dim_index)
def forward(self, states1, states2, dim_index=None):
return self.length(states1 - states2, dim_index)
def show(self, name='Distance', indent=0, log=print, **kwargs):
log(' ' * indent + '- %s(x1, x2) = |x1 - x2|' % name)
class Normalize(nn.Module):
def __init__(self, distribution=None, **kwargs):
super().__init__()
self.distribution = distribution
self.data_ = []
if distribution is None:
pass
elif distribution == 'normal':
mean = kwargs['mean'] if 'mean' in kwargs else 0
std = kwargs['std'] if 'std' in kwargs else 1
self.param = nn.Parameter(torch.Tensor([mean, std]), False)
elif distribution == 'uniform':
vmin = kwargs['minv'] if 'minv' in kwargs else 0
vmax = kwargs['maxv'] if 'maxv' in kwargs else 1
self.param = nn.Parameter(torch.Tensor([vmin, vmax]), False)
else:
raise NotImplementedError()
def forward(self, x, keep_data=False):
if keep_data:
self.data_.append(x.detach().cpu().view(-1))
return x
if self.distribution is None:
return x
elif self.distribution == 'normal':
mean = self.param[0]
std = self.param[1]
return (x - mean) / std
elif self.distribution == 'uniform':
vmin = self.param[0]
vmax = self.param[1]
return (x - vmin) / (vmax - vmin + 1e-05)
else:
raise NotImplementedError()
def reset_parameters(self, name=None):
assert len(self.data_) > 0
data = torch.cat(self.data_)
self.data_ = []
if self.distribution is None:
pass
elif self.distribution == 'normal':
with torch.no_grad():
self.param[0] = data.mean().item()
self.param[1] = data.std().item()
if name is not None:
None
elif self.distribution == 'uniform':
with torch.no_grad():
self.param[0] = data.min().item()
self.param[1] = data.max().item()
if name is not None:
None
else:
raise NotImplementedError()
def recover_threshold(self, x):
if self.distribution is None:
return x
elif self.distribution == 'normal':
return x * float(self.param[1]) + float(self.param[0])
elif self.distribution == 'uniform':
return x * float(self.param[1] - self.param[0] + 1e-05) + float(
self.param[0])
else:
raise NotImplementedError()
def init_thresholds(self, x):
if self.distribution is None:
nn.init.normal_(x, 0, 1)
elif self.distribution == 'normal':
nn.init.normal_(x, 0, 1)
elif self.distribution == 'uniform':
nn.init.uniform_(x, 0, 1)
else:
raise NotImplementedError()
class SoftCmp(nn.Module):
"""
Sigmoid((x - y) / e^beta)
"""
def __init__(self):
super().__init__()
self.sigmoid = nn.Sigmoid()
def forward(self, x, y, beta):
return self.sigmoid((x - y) / math.exp(beta))
class Inequality(nn.Module):
def __init__(self, out_dim=1, distribution=None, **kwargs):
super().__init__()
self.out_dim = out_dim
self.thresholds = nn.Parameter(torch.zeros(out_dim), requires_grad=True
)
self.distribution = distribution
self.normalize = Normalize(distribution)
self.cmp = SoftCmp()
self.normalize.init_thresholds(self.thresholds)
def forward(self, states, beta=0, **kwargs):
"""
:param states: [batch, length, n_agents, ... ]
"""
states_expand = states.view(*(states.size() + (1,)))
estimate_parameters = 'estimate_parameters' in kwargs and kwargs[
'estimate_parameters']
states_expand = self.normalize(states_expand, keep_data=
estimate_parameters)
return self.cmp(states_expand, self.thresholds.view(*([1] * len(
states.size()) + [self.out_dim])), beta)
def reset_parameters(self, parameter_name, name=None):
if parameter_name == 'primitive_inequality':
self.normalize.reset_parameters(name=name)
self.normalize.init_thresholds(self.thresholds)
def get_descriptions(self, name='Inequality'):
theta = self.thresholds.detach().cpu().view(self.out_dim)
descroptions = []
for k in range(theta.size(0)):
t = self.normalize.recover_threshold(theta[k])
if 'speed' in name:
t = t * 8
if 'acc' in name:
t = t * 64
descroptions.append('%s > %.2lf' % (name, t))
return descroptions
class AlignDifferential(nn.Module):
def __init__(self):
super().__init__()
def new_length(self, length):
return length
def forward(self, states):
"""
:param states: [batch, length, *]
"""
padded_states = torch.cat([states[:, 0:1] * 2 - states[:, 1:2],
states, states[:, -1:] * 2 - states[:, -2:-1]], dim=1)
return (padded_states[:, 2:] - padded_states[:, :-2]) / 2
def show(self, name='AlignDifferential', indent=0, log=print, **kwargs):
log(' ' * indent + '- %s(x) = AlignDifferential()' % (name,))
class N_aryPrimitivesSomethingElse(nn.Module):
def __init__(self):
super().__init__()
self.out_dim = 0
self.ineqs = nn.ModuleDict({})
def reset_parameters(self, parameter_name):
for k in self.ineqs:
self.ineqs[k].reset_parameters(parameter_name, name=k)
class BinaryPrimitivesSomethingElse(N_aryPrimitivesSomethingElse):
def __init__(self, cmp_dim=10):
super().__init__()
self.distance = Distance()
self.differential = AlignDifferential()
self.ineqs.update({'dist': Inequality(out_dim=cmp_dim, distribution
='uniform'), 'app_vel': Inequality(out_dim=cmp_dim,
distribution='normal'), 'overlap_x': Inequality(out_dim=1,
distribution=None), 'overlap_y': Inequality(out_dim=1,
distribution=None), 'contain_x': Inequality(out_dim=1,
distribution=None), 'contain_y': Inequality(out_dim=1,
distribution=None)})
self.out_dim = sum([self.ineqs[k].out_dim for k in self.ineqs])
def forward(self, states, beta=0, **kwargs):
"""
:param states: [batch, length, n_agents, state_dim]
return [batch, length, n_agents, n_agents, out_dim]
"""
n_agents = states.size(2)
p1 = states.unsqueeze(2).repeat(1, 1, n_agents, 1, 1)
p2 = states.unsqueeze(3).repeat(1, 1, 1, n_agents, 1)
distances = self.distance(p1, p2, dim_index=(0, 1))
app_velocity = self.differential(distances)
distances_x = self.distance(p1, p2, dim_index=(0,))
distances_y = self.distance(p1, p2, dim_index=(1,))
ineqs_inputs = {'dist': distances.squeeze(4), 'app_vel':
app_velocity.squeeze(4), 'overlap_x': distances_x.squeeze(4) -
(p1[:, :, :, :, 2] + p2[:, :, :, :, 2]) / 2, 'overlap_y':
distances_y.squeeze(4) - (p1[:, :, :, :, 3] + p2[:, :, :, :, 3]
) / 2, 'contain_x': distances_x.squeeze(4) - (p1[:, :, :, :, 2] -
p2[:, :, :, :, 2]) / 2, 'contain_y': distances_y.squeeze(4) - (
p1[:, :, :, :, 3] - p2[:, :, :, :, 3]) / 2}
output = torch.cat([self.ineqs[k](ineqs_inputs[k], beta=beta, **
kwargs) for k in ineqs_inputs.keys()], dim=-1)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.full([1], 0, tl.int64)
tmp1 = tl.full([1], 1, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.where(tmp2, tmp0, tmp1)
tmp4 = tl.load(in_ptr0 + (tmp3 + 4 * x0 + 16 * ((x0 + 4 * x1) // 16) +
64 * x2 + 64 * ((x0 + 4 * x1) // 64)), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (tmp3 + 4 * x1 + 64 * x2 + 64 * ((x0 + 4 * x1) //
64)), xmask, eviction_policy='evict_last')
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp1 < tmp1
tmp9 = tl.where(tmp8, tmp0, tmp1)
tmp10 = tl.load(in_ptr0 + (tmp9 + 4 * x0 + 16 * ((x0 + 4 * x1) // 16) +
64 * x2 + 64 * ((x0 + 4 * x1) // 64)), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (tmp9 + 4 * x1 + 64 * x2 + 64 * ((x0 + 4 * x1
) // 64)), xmask, eviction_policy='evict_last')
tmp12 = tmp10 - tmp11
tmp13 = tmp12 * tmp12
tmp14 = tmp7 + tmp13
tmp15 = libdevice.sqrt(tmp14)
tmp16 = 2.0
tmp17 = tmp15 * tmp16
tmp18 = tl.load(in_ptr0 + (16 + tmp3 + 4 * x0 + 16 * ((x0 + 4 * x1) //
16) + 64 * x2 + 64 * ((16 + x0 + 4 * x1) // 64)), xmask,
eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (16 + tmp3 + 4 * x1 + 16 * ((x0 + 4 * x1) //
16) + 64 * x2 + 64 * ((16 + x0 + 4 * x1) // 64)), xmask,
eviction_policy='evict_last')
tmp20 = tmp18 - tmp19
tmp21 = tmp20 * tmp20
tmp22 = tl.load(in_ptr0 + (16 + tmp9 + 4 * x0 + 16 * ((x0 + 4 * x1) //
16) + 64 * x2 + 64 * ((16 + x0 + 4 * x1) // 64)), xmask,
eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (16 + tmp9 + 4 * x1 + 16 * ((x0 + 4 * x1) //
16) + 64 * x2 + 64 * ((16 + x0 + 4 * x1) // 64)), xmask,
eviction_policy='evict_last')
tmp24 = tmp22 - tmp23
tmp25 = tmp24 * tmp24
tmp26 = tmp21 + tmp25
tmp27 = libdevice.sqrt(tmp26)
tmp28 = tmp17 - tmp27
tl.store(out_ptr0 + x3, tmp28, xmask)
@triton.jit
def triton_poi_fused_mul_sub_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.full([1], 0, tl.int64)
tmp1 = tl.full([1], 1, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.where(tmp2, tmp0, tmp1)
tmp4 = tl.load(in_ptr0 + (48 + tmp3 + 4 * x0 + 16 * ((x0 + 4 * x1) //
16) + 64 * x2 + 64 * ((48 + x0 + 4 * x1) // 64)), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (48 + tmp3 + 4 * x1 + 16 * ((x0 + 4 * x1) //
16) + 64 * x2 + 64 * ((48 + x0 + 4 * x1) // 64)), xmask,
eviction_policy='evict_last')
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp1 < tmp1
tmp9 = tl.where(tmp8, tmp0, tmp1)
tmp10 = tl.load(in_ptr0 + (48 + tmp9 + 4 * x0 + 16 * ((x0 + 4 * x1) //
16) + 64 * x2 + 64 * ((48 + x0 + 4 * x1) // 64)), xmask,
eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (48 + tmp9 + 4 * x1 + 16 * ((x0 + 4 * x1) //
16) + 64 * x2 + 64 * ((48 + x0 + 4 * x1) // 64)), xmask,
eviction_policy='evict_last')
tmp12 = tmp10 - tmp11
tmp13 = tmp12 * tmp12
tmp14 = tmp7 + tmp13
tmp15 = libdevice.sqrt(tmp14)
tmp16 = 2.0
tmp17 = tmp15 * tmp16
tmp18 = tl.load(in_ptr0 + (32 + tmp3 + 4 * x0 + 16 * ((x0 + 4 * x1) //
16) + 64 * x2 + 64 * ((32 + x0 + 4 * x1) // 64)), xmask,
eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (32 + tmp3 + 4 * x1 + 16 * ((x0 + 4 * x1) //
16) + 64 * x2 + 64 * ((32 + x0 + 4 * x1) // 64)), xmask,
eviction_policy='evict_last')
tmp20 = tmp18 - tmp19
tmp21 = tmp20 * tmp20
tmp22 = tl.load(in_ptr0 + (32 + tmp9 + 4 * x0 + 16 * ((x0 + 4 * x1) //
16) + 64 * x2 + 64 * ((32 + x0 + 4 * x1) // 64)), xmask,
eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (32 + tmp9 + 4 * x1 + 16 * ((x0 + 4 * x1) //
16) + 64 * x2 + 64 * ((32 + x0 + 4 * x1) // 64)), xmask,
eviction_policy='evict_last')
tmp24 = tmp22 - tmp23
tmp25 = tmp24 * tmp24
tmp26 = tmp21 + tmp25
tmp27 = libdevice.sqrt(tmp26)
tmp28 = tmp17 - tmp27
tl.store(out_ptr0 + x3, tmp28, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16 % 6
x3 = xindex // 96
x4 = xindex % 16
x0 = xindex % 4
x1 = xindex // 4 % 4
x5 = xindex
tmp0 = x2
tmp1 = tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x4 + 16 * x3), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 5, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tmp1 < tmp3
tmp11 = tl.where(tmp10, tmp1, tmp3)
tmp12 = tl.load(in_ptr1 + (tmp11 + 4 * x0 + 16 * ((x0 + 4 * x1 + 16 * (
-1 + x2)) // 16 % 4) + 64 * ((x0 + 4 * x1 + 16 * (-1 + x2) + 64 *
x3) // 64 % 4)), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp13 = tl.load(in_ptr1 + (tmp11 + 4 * x1 + 16 * ((x0 + 4 * x1 + 16 * (
-1 + x2)) // 16 % 4) + 64 * ((x0 + 4 * x1 + 16 * (-1 + x2) + 64 *
x3) // 64 % 4)), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp14 = tmp12 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tmp3 < tmp3
tmp17 = tl.where(tmp16, tmp1, tmp3)
tmp18 = tl.load(in_ptr1 + (tmp17 + 4 * x0 + 16 * ((x0 + 4 * x1 + 16 * (
-1 + x2)) // 16 % 4) + 64 * ((x0 + 4 * x1 + 16 * (-1 + x2) + 64 *
x3) // 64 % 4)), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp19 = tl.load(in_ptr1 + (tmp17 + 4 * x1 + 16 * ((x0 + 4 * x1 + 16 * (
-1 + x2)) // 16 % 4) + 64 * ((x0 + 4 * x1 + 16 * (-1 + x2) + 64 *
x3) // 64 % 4)), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tmp18 - tmp19
tmp21 = tmp20 * tmp20
tmp22 = tmp15 + tmp21
tmp23 = libdevice.sqrt(tmp22)
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp9, tmp23, tmp24)
tmp26 = tmp0 >= tmp7
tl.full([1], 6, tl.int64)
tmp29 = tl.load(in_ptr2 + (x4 + 16 * x3), tmp26 & xmask,
eviction_policy='evict_last', other=0.0)
tmp30 = tl.where(tmp9, tmp25, tmp29)
tmp31 = tl.where(tmp4, tmp5, tmp30)
tl.store(out_ptr0 + x5, tmp31, xmask)
@triton.jit
def triton_poi_fused_add_div_sub_3(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp16 = tl.load(in_ptr1 + 0)
tmp17 = tl.broadcast_to(tmp16, [XBLOCK])
tmp19 = tl.load(in_ptr1 + 1)
tmp20 = tl.broadcast_to(tmp19, [XBLOCK])
tmp0 = tl.full([1], 0, tl.int64)
tmp1 = tl.full([1], 1, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.where(tmp2, tmp0, tmp1)
tmp4 = tl.load(in_ptr0 + (tmp3 + 4 * x0 + 16 * x2 + 16 * ((x0 + 4 * x1) //
16) + 64 * x3 + 64 * ((x0 + 4 * x1 + 16 * x2) // 64)), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (tmp3 + 4 * x1 + 16 * x2 + 16 * ((x0 + 4 * x1) //
16) + 64 * x3 + 64 * ((x0 + 4 * x1 + 16 * x2) // 64)), xmask,
eviction_policy='evict_last')
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp1 < tmp1
tmp9 = tl.where(tmp8, tmp0, tmp1)
tmp10 = tl.load(in_ptr0 + (tmp9 + 4 * x0 + 16 * x2 + 16 * ((x0 + 4 * x1
) // 16) + 64 * x3 + 64 * ((x0 + 4 * x1 + 16 * x2) // 64)), xmask,
eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (tmp9 + 4 * x1 + 16 * x2 + 16 * ((x0 + 4 * x1
) // 16) + 64 * x3 + 64 * ((x0 + 4 * x1 + 16 * x2) // 64)), xmask,
eviction_policy='evict_last')
tmp12 = tmp10 - tmp11
tmp13 = tmp12 * tmp12
tmp14 = tmp7 + tmp13
tmp15 = libdevice.sqrt(tmp14)
tmp18 = tmp15 - tmp17
tmp21 = tmp20 - tmp17
tmp22 = 1e-05
tmp23 = tmp21 + tmp22
tmp24 = tmp18 / tmp23
tl.store(out_ptr0 + x4, tmp24, xmask)
@triton.jit
def triton_poi_fused_div_sub_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = xindex // 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + (32 + x0 + 96 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 96 * x1), xmask)
tmp5 = tl.load(in_ptr1 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp8 = tl.load(in_ptr1 + 1)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp2 = tmp0 - tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp7 = tmp4 - tmp6
tmp10 = tmp7 / tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_div_sigmoid_sigmoid_backward_sub_5(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x5 = xindex // 16
x6 = xindex // 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (4 * x0 + 16 * x2 + 16 * ((x0 + 4 * x1) // 16) +
64 * x3 + 64 * ((x0 + 4 * x1 + 16 * x2) // 64)), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (4 * x1 + 16 * x2 + 16 * ((x0 + 4 * x1) // 16) +
64 * x3 + 64 * ((x0 + 4 * x1 + 16 * x2) // 64)), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * x5), xmask, eviction_policy
='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x6), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr1 + 0)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp20 = tl.load(in_ptr2 + 0)
tmp21 = tl.broadcast_to(tmp20, [XBLOCK])
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = libdevice.sqrt(tmp3)
tmp7 = tmp5 + tmp6
tmp8 = 0.5
tmp9 = tmp7 * tmp8
tmp10 = tmp4 - tmp9
tmp13 = tmp10 - tmp12
tmp14 = 1.0
tmp15 = tmp13 * tmp14
tmp16 = tl.sigmoid(tmp15)
tmp17 = tmp5 - tmp6
tmp18 = tmp17 * tmp8
tmp19 = tmp4 - tmp18
tmp22 = tmp19 - tmp21
tmp23 = tmp22 * tmp14
tmp24 = tl.sigmoid(tmp23)
tmp25 = tmp14 - tmp24
tmp26 = tmp24 * tmp25
tmp27 = tmp14 - tmp16
tmp28 = tmp16 * tmp27
tl.store(out_ptr0 + 24 * x4, tmp16, xmask)
tl.store(out_ptr1 + 24 * x4, tmp24, xmask)
tl.store(out_ptr2 + x4, tmp26, xmask)
tl.store(out_ptr3 + x4, tmp28, xmask)
@triton.jit
def triton_poi_fused_div_sigmoid_sigmoid_backward_sub_6(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x5 = xindex // 16
x6 = xindex // 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * x2 + 16 * ((x0 + 4 * x1) //
16) + 64 * x3 + 64 * ((x0 + 4 * x1 + 16 * x2) // 64)), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x1 + 16 * x2 + 16 * ((x0 + 4 * x1) //
16) + 64 * x3 + 64 * ((x0 + 4 * x1 + 16 * x2) // 64)), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * x5), xmask, eviction_policy
='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x6), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr1 + 0)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp20 = tl.load(in_ptr2 + 0)
tmp21 = tl.broadcast_to(tmp20, [XBLOCK])
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = libdevice.sqrt(tmp3)
tmp7 = tmp5 + tmp6
tmp8 = 0.5
tmp9 = tmp7 * tmp8
tmp10 = tmp4 - tmp9
tmp13 = tmp10 - tmp12
tmp14 = 1.0
tmp15 = tmp13 * tmp14
tmp16 = tl.sigmoid(tmp15)
tmp17 = tmp5 - tmp6
tmp18 = tmp17 * tmp8
tmp19 = tmp4 - tmp18
tmp22 = tmp19 - tmp21
tmp23 = tmp22 * tmp14
tmp24 = tl.sigmoid(tmp23)
tmp25 = tmp14 - tmp24
tmp26 = tmp24 * tmp25
tmp27 = tmp14 - tmp16
tmp28 = tmp16 * tmp27
tl.store(out_ptr0 + 24 * x4, tmp16, xmask)
tl.store(out_ptr1 + 24 * x4, tmp24, xmask)
tl.store(out_ptr2 + x4, tmp26, xmask)
tl.store(out_ptr3 + x4, tmp28, xmask)
@triton.jit
def triton_poi_fused_div_sigmoid_sub_7(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 2560
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 10
x0 = xindex % 10
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tmp5 = tl.sigmoid(tmp4)
tl.store(out_ptr0 + (x0 + 24 * x1), tmp5, xmask)
@triton.jit
def triton_poi_fused_div_sigmoid_sub_8(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 2560
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 10
x0 = xindex % 10
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tmp5 = tl.sigmoid(tmp4)
tl.store(out_ptr0 + (x0 + 24 * x1), tmp5, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2,), (1,))
assert_size_stride(primals_3, (10,), (1,))
assert_size_stride(primals_4, (2,), (1,))
assert_size_stride(primals_5, (10,), (1,))
assert_size_stride(primals_6, (1,), (1,))
assert_size_stride(primals_7, (1,), (1,))
assert_size_stride(primals_8, (1,), (1,))
assert_size_stride(primals_9, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4, 1), (16, 64, 4, 1, 64),
torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sub_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 1, 4, 4, 1), (16, 64, 4, 1, 64),
torch.float32)
triton_poi_fused_mul_sub_1[grid(64)](primals_1, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 6, 4, 4, 1), (96, 16, 4, 1, 384),
torch.float32)
triton_poi_fused_cat_2[grid(384)](buf0, primals_1, buf1, buf2, 384,
XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del buf1
buf3 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch
.float32)
triton_poi_fused_add_div_sub_3[grid(256)](primals_1, primals_2,
buf3, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf4 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch
.float32)
triton_poi_fused_div_sub_4[grid(256)](buf2, primals_4, buf4, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del buf2
del primals_4
buf11 = empty_strided_cuda((4, 4, 4, 4, 24), (1536, 384, 96, 24, 1),
torch.float32)
buf5 = reinterpret_tensor(buf11, (4, 4, 4, 4, 1), (1536, 384, 96,
24, 1), 20)
buf7 = reinterpret_tensor(buf11, (4, 4, 4, 4, 1), (1536, 384, 96,
24, 1), 22)
buf13 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1),
torch.float32)
buf15 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1),
torch.float32)
triton_poi_fused_div_sigmoid_sigmoid_backward_sub_5[grid(256)](
primals_1, primals_6, primals_8, buf5, buf7, buf13, buf15, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_6
del primals_8
buf6 = reinterpret_tensor(buf11, (4, 4, 4, 4, 1), (1536, 384, 96,
24, 1), 21)
buf8 = reinterpret_tensor(buf11, (4, 4, 4, 4, 1), (1536, 384, 96,
24, 1), 23)
buf12 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1),
torch.float32)
buf14 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1),
torch.float32)
triton_poi_fused_div_sigmoid_sigmoid_backward_sub_6[grid(256)](
primals_1, primals_7, primals_9, buf6, buf8, buf12, buf14, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_7
del primals_9
buf9 = reinterpret_tensor(buf11, (4, 4, 4, 4, 10), (1536, 384, 96,
24, 1), 0)
triton_poi_fused_div_sigmoid_sub_7[grid(2560)](buf3, primals_3,
buf9, 2560, XBLOCK=256, num_warps=4, num_stages=1)
buf10 = reinterpret_tensor(buf11, (4, 4, 4, 4, 10), (1536, 384, 96,
24, 1), 10)
triton_poi_fused_div_sigmoid_sub_8[grid(2560)](buf4, primals_5,
buf10, 2560, XBLOCK=256, num_warps=4, num_stages=1)
return buf11, primals_3, primals_5, buf3, buf4, buf12, buf13, buf14, buf15
def apply_last_dim(model, x):
size = list(x.size())
y = model(x.contiguous().view(-1, size[-1]))
size[-1] = y.size(-1)
y = y.view(torch.Size(size))
return y
def get_int_dim_index(name):
if isinstance(name, int):
return name
name_list = 'axyz'
assert name in name_list
return [i for i in range(len(name_list)) if name_list[i] == name][0] - 1
class Length(nn.Module):
def __init__(self, dim_index=-1):
super().__init__()
self.dim_index = dim_index
def forward(self, states, dim_index=None):
if dim_index is None:
dim_index = self.dim_index
if isinstance(dim_index, int):
dim_index = [dim_index]
else:
dim_index = [get_int_dim_index(x) for x in dim_index]
if -1 in dim_index:
def extractor(x):
return torch.sqrt(torch.sum(x * x, dim=1, keepdim=True))
else:
def extractor(x):
return torch.sqrt(torch.sum(x[:, dim_index].pow(2), dim=1,
keepdim=True))
return apply_last_dim(extractor, states)
def show(self, name='Length', indent=0, log=print, **kwargs):
log(' ' * indent + "- %s(x) = |x's dim %s|" % (name, str(self.
dim_index)))
class Distance(nn.Module):
def __init__(self, dim_index=-1):
super().__init__()
self.dim_index = dim_index
self.length = Length(dim_index)
def forward(self, states1, states2, dim_index=None):
return self.length(states1 - states2, dim_index)
def show(self, name='Distance', indent=0, log=print, **kwargs):
log(' ' * indent + '- %s(x1, x2) = |x1 - x2|' % name)
class Normalize(nn.Module):
def __init__(self, distribution=None, **kwargs):
super().__init__()
self.distribution = distribution
self.data_ = []
if distribution is None:
pass
elif distribution == 'normal':
mean = kwargs['mean'] if 'mean' in kwargs else 0
std = kwargs['std'] if 'std' in kwargs else 1
self.param = nn.Parameter(torch.Tensor([mean, std]), False)
elif distribution == 'uniform':
vmin = kwargs['minv'] if 'minv' in kwargs else 0
vmax = kwargs['maxv'] if 'maxv' in kwargs else 1
self.param = nn.Parameter(torch.Tensor([vmin, vmax]), False)
else:
raise NotImplementedError()
def forward(self, x, keep_data=False):
if keep_data:
self.data_.append(x.detach().cpu().view(-1))
return x
if self.distribution is None:
return x
elif self.distribution == 'normal':
mean = self.param[0]
std = self.param[1]
return (x - mean) / std
elif self.distribution == 'uniform':
vmin = self.param[0]
vmax = self.param[1]
return (x - vmin) / (vmax - vmin + 1e-05)
else:
raise NotImplementedError()
def reset_parameters(self, name=None):
assert len(self.data_) > 0
data = torch.cat(self.data_)
self.data_ = []
if self.distribution is None:
pass
elif self.distribution == 'normal':
with torch.no_grad():
self.param[0] = data.mean().item()
self.param[1] = data.std().item()
if name is not None:
None
elif self.distribution == 'uniform':
with torch.no_grad():
self.param[0] = data.min().item()
self.param[1] = data.max().item()
if name is not None:
None
else:
raise NotImplementedError()
def recover_threshold(self, x):
if self.distribution is None:
return x
elif self.distribution == 'normal':
return x * float(self.param[1]) + float(self.param[0])
elif self.distribution == 'uniform':
return x * float(self.param[1] - self.param[0] + 1e-05) + float(
self.param[0])
else:
raise NotImplementedError()
def init_thresholds(self, x):
if self.distribution is None:
nn.init.normal_(x, 0, 1)
elif self.distribution == 'normal':
nn.init.normal_(x, 0, 1)
elif self.distribution == 'uniform':
nn.init.uniform_(x, 0, 1)
else:
raise NotImplementedError()
class SoftCmp(nn.Module):
"""
Sigmoid((x - y) / e^beta)
"""
def __init__(self):
super().__init__()
self.sigmoid = nn.Sigmoid()
def forward(self, x, y, beta):
return self.sigmoid((x - y) / math.exp(beta))
class Inequality(nn.Module):
def __init__(self, out_dim=1, distribution=None, **kwargs):
super().__init__()
self.out_dim = out_dim
self.thresholds = nn.Parameter(torch.zeros(out_dim), requires_grad=True
)
self.distribution = distribution
self.normalize = Normalize(distribution)
self.cmp = SoftCmp()
self.normalize.init_thresholds(self.thresholds)
def forward(self, states, beta=0, **kwargs):
"""
:param states: [batch, length, n_agents, ... ]
"""
states_expand = states.view(*(states.size() + (1,)))
estimate_parameters = 'estimate_parameters' in kwargs and kwargs[
'estimate_parameters']
states_expand = self.normalize(states_expand, keep_data=
estimate_parameters)
return self.cmp(states_expand, self.thresholds.view(*([1] * len(
states.size()) + [self.out_dim])), beta)
def reset_parameters(self, parameter_name, name=None):
if parameter_name == 'primitive_inequality':
self.normalize.reset_parameters(name=name)
self.normalize.init_thresholds(self.thresholds)
def get_descriptions(self, name='Inequality'):
theta = self.thresholds.detach().cpu().view(self.out_dim)
descroptions = []
for k in range(theta.size(0)):
t = self.normalize.recover_threshold(theta[k])
if 'speed' in name:
t = t * 8
if 'acc' in name:
t = t * 64
descroptions.append('%s > %.2lf' % (name, t))
return descroptions
class AlignDifferential(nn.Module):
def __init__(self):
super().__init__()
def new_length(self, length):
return length
def forward(self, states):
"""
:param states: [batch, length, *]
"""
padded_states = torch.cat([states[:, 0:1] * 2 - states[:, 1:2],
states, states[:, -1:] * 2 - states[:, -2:-1]], dim=1)
return (padded_states[:, 2:] - padded_states[:, :-2]) / 2
def show(self, name='AlignDifferential', indent=0, log=print, **kwargs):
log(' ' * indent + '- %s(x) = AlignDifferential()' % (name,))
class N_aryPrimitivesSomethingElse(nn.Module):
def __init__(self):
super().__init__()
self.out_dim = 0
self.ineqs = nn.ModuleDict({})
def reset_parameters(self, parameter_name):
for k in self.ineqs:
self.ineqs[k].reset_parameters(parameter_name, name=k)
class BinaryPrimitivesSomethingElseNew(N_aryPrimitivesSomethingElse):
def __init__(self, cmp_dim=10):
super().__init__()
self.distance = Distance()
self.differential = AlignDifferential()
self.ineqs.update({'dist': Inequality(out_dim=cmp_dim, distribution
='uniform'), 'app_vel': Inequality(out_dim=cmp_dim,
distribution='normal'), 'overlap_x': Inequality(out_dim=1,
distribution=None), 'overlap_y': Inequality(out_dim=1,
distribution=None), 'contain_x': Inequality(out_dim=1,
distribution=None), 'contain_y': Inequality(out_dim=1,
distribution=None)})
self.out_dim = sum([self.ineqs[k].out_dim for k in self.ineqs])
def forward(self, input_0):
primals_3 = self.ineqs.dist.thresholds
primals_2 = self.ineqs.dist.normalize.param
primals_5 = self.ineqs.app_vel.thresholds
primals_4 = self.ineqs.app_vel.normalize.param
primals_6 = self.ineqs.overlap_x.thresholds
primals_7 = self.ineqs.overlap_y.thresholds
primals_8 = self.ineqs.contain_x.thresholds
primals_9 = self.ineqs.contain_y.thresholds
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
C-SUNSHINE/TOQ-Nets-PyTorch-Release
|
BinaryPrimitivesSomethingElse
| false
| 17,182
|
[
"MIT"
] | 6
|
05e06bf633fb3c6b610dda9a5126ecd7af1db02f
|
https://github.com/C-SUNSHINE/TOQ-Nets-PyTorch-Release/tree/05e06bf633fb3c6b610dda9a5126ecd7af1db02f
|
DQN_xy2
|
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.utils.data
class DQN_xy2(nn.Module):
"""
A MLP for DQN learning.
Note: Uses a one hot board representation
"""
def __init__(self):
super(DQN_xy2, self).__init__()
self.fc1 = nn.Linear(4, 100)
self.fc2 = nn.Linear(100, 1)
def forward(self, x):
x = F.relu(self.fc1(x))
return self.fc2(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 100
x2 = xindex % 1600
x3 = xindex // 1600
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x2 + 1664 * x3), tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (100, 4), (4, 1))
assert_size_stride(primals_2, (100,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 100), (100, 1))
assert_size_stride(primals_5, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 100), (100, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 100), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 100), (1600, 400, 100, 1), 0)
del buf0
buf4 = empty_strided_cuda((4, 4, 4, 100), (1664, 400, 100, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(6400)](buf1,
primals_2, buf4, 6400, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 100),
(100, 1), 0), reinterpret_tensor(primals_4, (100, 1), (1, 100),
0), alpha=1, beta=1, out=buf3)
del primals_5
return reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 100), (100, 1), 0), primals_4, buf4
class DQN_xy2New(nn.Module):
"""
A MLP for DQN learning.
Note: Uses a one hot board representation
"""
def __init__(self):
super(DQN_xy2New, self).__init__()
self.fc1 = nn.Linear(4, 100)
self.fc2 = nn.Linear(100, 1)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
CoAxLab/azad
|
DQN_xy2
| false
| 17,183
|
[
"MIT"
] | 6
|
d1498069dd8856e93ae077b34dd7c9f1c7ce80e6
|
https://github.com/CoAxLab/azad/tree/d1498069dd8856e93ae077b34dd7c9f1c7ce80e6
|
MMDLoss
|
import torch
from torch import nn
import torch.nn.parallel
def gaussian_kernel(source, target, kernel_mul=2.0, kernel_num=5, fix_sigma
=None):
n_samples = int(source.size()[0]) + int(target.size()[0])
total = torch.cat([source, target], dim=0)
total0 = total.unsqueeze(0).expand(int(total.size(0)), int(total.size(0
)), int(total.size(1)))
total1 = total.unsqueeze(1).expand(int(total.size(0)), int(total.size(0
)), int(total.size(1)))
L2_distance = ((total0 - total1) ** 2).sum(2)
if fix_sigma:
bandwidth = fix_sigma
else:
bandwidth = torch.sum(L2_distance.data) / (n_samples ** 2 - n_samples)
bandwidth /= kernel_mul ** (kernel_num // 2)
bandwidth_list = [(bandwidth * kernel_mul ** i) for i in range(kernel_num)]
kernel_val = [torch.exp(-L2_distance / bandwidth_temp) for
bandwidth_temp in bandwidth_list]
return sum(kernel_val)
def DAN(source, target, kernel_mul=2.0, kernel_num=5, fix_sigma=None):
n = int(source.size()[0])
k = gaussian_kernel(source, target, kernel_mul=kernel_mul, kernel_num=
kernel_num, fix_sigma=fix_sigma)
loss = (k[:n, :n].triu(1).sum() + k[n:, n:].triu(1).sum()) / (n * (n -
1) / 2) - torch.sum(k[:n, n:]) * 2 / (n * n)
return loss
class MMDLoss(nn.Module):
def __init__(self, kernel_mul=2.0, kernel_num=5, fix_sigma=None):
super().__init__()
self.kernel_mul = kernel_mul
self.kernel_num = kernel_num
self.fix_sigma = fix_sigma
def forward(self, source, target):
return DAN(source, target, kernel_mul=self.kernel_mul, kernel_num=
self.kernel_num, fix_sigma=self.fix_sigma)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_exp_mul_neg_pow_sub_sum_0(in_ptr0, in_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 8
r1 = rindex // 8
r2 = rindex
tmp0 = r0
tl.full([1, 1], 0, tl.int64)
tmp3 = tl.full([1, 1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + tl.broadcast_to(4 * r0, [XBLOCK, RBLOCK]),
tmp4, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1, 1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + tl.broadcast_to(4 * (-4 + r0), [XBLOCK, RBLOCK
]), tmp6, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tmp11 = r1
tmp13 = tmp11 < tmp3
tmp14 = tl.load(in_ptr0 + tl.broadcast_to(4 * r1, [XBLOCK, RBLOCK]),
tmp13, eviction_policy='evict_last', other=0.0)
tmp15 = tmp11 >= tmp3
tmp17 = tl.load(in_ptr1 + tl.broadcast_to(4 * (-4 + r1), [XBLOCK,
RBLOCK]), tmp15, eviction_policy='evict_last', other=0.0)
tmp18 = tl.where(tmp13, tmp14, tmp17)
tmp19 = tmp10 - tmp18
tmp20 = tmp19 * tmp19
tmp21 = tl.load(in_ptr0 + tl.broadcast_to(1 + 4 * r0, [XBLOCK, RBLOCK]),
tmp4, eviction_policy='evict_last', other=0.0)
tmp22 = tl.load(in_ptr1 + tl.broadcast_to(1 + 4 * (-4 + r0), [XBLOCK,
RBLOCK]), tmp6, eviction_policy='evict_last', other=0.0)
tmp23 = tl.where(tmp4, tmp21, tmp22)
tmp24 = tl.load(in_ptr0 + tl.broadcast_to(1 + 4 * r1, [XBLOCK, RBLOCK]),
tmp13, eviction_policy='evict_last', other=0.0)
tmp25 = tl.load(in_ptr1 + tl.broadcast_to(1 + 4 * (-4 + r1), [XBLOCK,
RBLOCK]), tmp15, eviction_policy='evict_last', other=0.0)
tmp26 = tl.where(tmp13, tmp24, tmp25)
tmp27 = tmp23 - tmp26
tmp28 = tmp27 * tmp27
tmp29 = tmp20 + tmp28
tmp30 = tl.load(in_ptr0 + tl.broadcast_to(2 + 4 * r0, [XBLOCK, RBLOCK]),
tmp4, eviction_policy='evict_last', other=0.0)
tmp31 = tl.load(in_ptr1 + tl.broadcast_to(2 + 4 * (-4 + r0), [XBLOCK,
RBLOCK]), tmp6, eviction_policy='evict_last', other=0.0)
tmp32 = tl.where(tmp4, tmp30, tmp31)
tmp33 = tl.load(in_ptr0 + tl.broadcast_to(2 + 4 * r1, [XBLOCK, RBLOCK]),
tmp13, eviction_policy='evict_last', other=0.0)
tmp34 = tl.load(in_ptr1 + tl.broadcast_to(2 + 4 * (-4 + r1), [XBLOCK,
RBLOCK]), tmp15, eviction_policy='evict_last', other=0.0)
tmp35 = tl.where(tmp13, tmp33, tmp34)
tmp36 = tmp32 - tmp35
tmp37 = tmp36 * tmp36
tmp38 = tmp29 + tmp37
tmp39 = tl.load(in_ptr0 + tl.broadcast_to(3 + 4 * r0, [XBLOCK, RBLOCK]),
tmp4, eviction_policy='evict_last', other=0.0)
tmp40 = tl.load(in_ptr1 + tl.broadcast_to(3 + 4 * (-4 + r0), [XBLOCK,
RBLOCK]), tmp6, eviction_policy='evict_last', other=0.0)
tmp41 = tl.where(tmp4, tmp39, tmp40)
tmp42 = tl.load(in_ptr0 + tl.broadcast_to(3 + 4 * r1, [XBLOCK, RBLOCK]),
tmp13, eviction_policy='evict_last', other=0.0)
tmp43 = tl.load(in_ptr1 + tl.broadcast_to(3 + 4 * (-4 + r1), [XBLOCK,
RBLOCK]), tmp15, eviction_policy='evict_last', other=0.0)
tmp44 = tl.where(tmp13, tmp42, tmp43)
tmp45 = tmp41 - tmp44
tmp46 = tmp45 * tmp45
tmp47 = tmp38 + tmp46
tmp48 = tl.broadcast_to(tmp47, [XBLOCK, RBLOCK])
tmp50 = tl.sum(tmp48, 1)[:, None]
tmp51 = -tmp47
tmp52 = 0.017857142857142856
tmp53 = tmp50 * tmp52
tmp54 = 0.25
tmp55 = tmp53 * tmp54
tmp56 = 1.0
tmp57 = tmp55 * tmp56
tmp58 = tmp51 / tmp57
tmp59 = tl_math.exp(tmp58)
tmp60 = 0.0
tmp61 = tmp59 + tmp60
tmp62 = 2.0
tmp63 = tmp55 * tmp62
tmp64 = tmp51 / tmp63
tmp65 = tl_math.exp(tmp64)
tmp66 = tmp61 + tmp65
tmp67 = 4.0
tmp68 = tmp55 * tmp67
tmp69 = tmp51 / tmp68
tmp70 = tl_math.exp(tmp69)
tmp71 = tmp66 + tmp70
tmp72 = 8.0
tmp73 = tmp55 * tmp72
tmp74 = tmp51 / tmp73
tmp75 = tl_math.exp(tmp74)
tmp76 = tmp71 + tmp75
tmp77 = 16.0
tmp78 = tmp55 * tmp77
tmp79 = tmp51 / tmp78
tmp80 = tl_math.exp(tmp79)
tmp81 = tmp76 + tmp80
tl.store(out_ptr2 + tl.broadcast_to(r2, [XBLOCK, RBLOCK]), tmp81, None)
@triton.jit
def triton_per_fused_add_div_mul_sub_sum_triu_1(in_out_ptr0, in_ptr0,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 4
r1 = rindex // 4
tmp3 = tl.load(in_ptr0 + (r0 + 8 * r1), None)
tmp9 = tl.load(in_ptr0 + (36 + r0 + 8 * r1), None)
tmp14 = tl.load(in_ptr0 + (4 + r0 + 8 * r1), None)
tmp0 = r0 + -1 * r1
tmp1 = tl.full([1, 1], 1, tl.int64)
tmp2 = tmp0 >= tmp1
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.sum(tmp6, 1)[:, None]
tmp10 = tl.where(tmp2, tmp9, tmp4)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.sum(tmp11, 1)[:, None]
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.sum(tmp15, 1)[:, None]
tmp18 = tmp8 + tmp13
tmp19 = 0.16666666666666666
tmp20 = tmp18 * tmp19
tmp21 = 2.0
tmp22 = tmp17 * tmp21
tmp23 = 0.0625
tmp24 = tmp22 * tmp23
tmp25 = tmp20 - tmp24
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp25, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((8, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_add_div_exp_mul_neg_pow_sub_sum_0[grid(1)](arg0_1,
arg1_1, buf2, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf6 = buf3
del buf3
triton_per_fused_add_div_mul_sub_sum_triu_1[grid(1)](buf6, buf2, 1,
16, XBLOCK=1, num_warps=2, num_stages=1)
del buf2
return buf6,
def gaussian_kernel(source, target, kernel_mul=2.0, kernel_num=5, fix_sigma
=None):
n_samples = int(source.size()[0]) + int(target.size()[0])
total = torch.cat([source, target], dim=0)
total0 = total.unsqueeze(0).expand(int(total.size(0)), int(total.size(0
)), int(total.size(1)))
total1 = total.unsqueeze(1).expand(int(total.size(0)), int(total.size(0
)), int(total.size(1)))
L2_distance = ((total0 - total1) ** 2).sum(2)
if fix_sigma:
bandwidth = fix_sigma
else:
bandwidth = torch.sum(L2_distance.data) / (n_samples ** 2 - n_samples)
bandwidth /= kernel_mul ** (kernel_num // 2)
bandwidth_list = [(bandwidth * kernel_mul ** i) for i in range(kernel_num)]
kernel_val = [torch.exp(-L2_distance / bandwidth_temp) for
bandwidth_temp in bandwidth_list]
return sum(kernel_val)
def DAN(source, target, kernel_mul=2.0, kernel_num=5, fix_sigma=None):
n = int(source.size()[0])
k = gaussian_kernel(source, target, kernel_mul=kernel_mul, kernel_num=
kernel_num, fix_sigma=fix_sigma)
loss = (k[:n, :n].triu(1).sum() + k[n:, n:].triu(1).sum()) / (n * (n -
1) / 2) - torch.sum(k[:n, n:]) * 2 / (n * n)
return loss
class MMDLossNew(nn.Module):
def __init__(self, kernel_mul=2.0, kernel_num=5, fix_sigma=None):
super().__init__()
self.kernel_mul = kernel_mul
self.kernel_num = kernel_num
self.fix_sigma = fix_sigma
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
DA4EVENT/home
|
MMDLoss
| false
| 17,184
|
[
"MIT"
] | 5
|
18cc93a795ce132e05b886aa34565a102915b1c6
|
https://github.com/DA4EVENT/home/tree/18cc93a795ce132e05b886aa34565a102915b1c6
|
TransposedConvLayer
|
import torch
from torch import nn
import torch.nn.parallel
class TransposedConvLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, activation='relu', norm=None):
super(TransposedConvLayer, self).__init__()
bias = False if norm == 'BN' else True
self.transposed_conv2d = nn.ConvTranspose2d(in_channels,
out_channels, kernel_size, stride=2, padding=padding,
output_padding=1, bias=bias)
if activation is not None:
self.activation = getattr(torch, activation, 'relu')
else:
self.activation = None
self.norm = norm
if norm == 'BN':
self.norm_layer = nn.BatchNorm2d(out_channels)
elif norm == 'IN':
self.norm_layer = nn.InstanceNorm2d(out_channels,
track_running_stats=True)
def forward(self, x):
out = self.transposed_conv2d(x)
if self.norm in ['BN', 'IN']:
out = self.norm_layer(out)
if self.activation is not None:
out = self.activation(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1936
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 121 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr0 + x3, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2,
2), padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 11, 11), (484, 121, 11, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 11, 11), (484, 121, 11, 1), torch.bool
)
get_raw_stream(0)
triton_poi_fused_convolution_relu_threshold_backward_0[grid(1936)](buf1
, primals_2, buf2, 1936, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return buf1, primals_1, primals_3, buf2
class TransposedConvLayerNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, activation='relu', norm=None):
super(TransposedConvLayerNew, self).__init__()
bias = False if norm == 'BN' else True
self.transposed_conv2d = nn.ConvTranspose2d(in_channels,
out_channels, kernel_size, stride=2, padding=padding,
output_padding=1, bias=bias)
if activation is not None:
self.activation = getattr(torch, activation, 'relu')
else:
self.activation = None
self.norm = norm
if norm == 'BN':
self.norm_layer = nn.BatchNorm2d(out_channels)
elif norm == 'IN':
self.norm_layer = nn.InstanceNorm2d(out_channels,
track_running_stats=True)
def forward(self, input_0):
primals_1 = self.transposed_conv2d.weight
primals_2 = self.transposed_conv2d.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
DA4EVENT/home
|
TransposedConvLayer
| false
| 17,185
|
[
"MIT"
] | 5
|
18cc93a795ce132e05b886aa34565a102915b1c6
|
https://github.com/DA4EVENT/home/tree/18cc93a795ce132e05b886aa34565a102915b1c6
|
ConvLayer
|
import torch
from torch import nn
import torch.nn.parallel
class ConvLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, activation='relu', norm=None):
super(ConvLayer, self).__init__()
bias = False if norm == 'BN' else True
self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding, bias=bias)
if activation is not None:
self.activation = getattr(torch, activation, 'relu')
else:
self.activation = None
self.norm = norm
if norm == 'BN':
self.norm_layer = nn.BatchNorm2d(out_channels)
elif norm == 'IN':
self.norm_layer = nn.InstanceNorm2d(out_channels,
track_running_stats=True)
def forward(self, x):
out = self.conv2d(x)
if self.norm in ['BN', 'IN']:
out = self.norm_layer(out)
if self.activation is not None:
out = self.activation(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_convolution_relu_threshold_backward_0[grid(16)](buf1,
primals_2, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
return buf1, primals_1, primals_3, buf2
class ConvLayerNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, activation='relu', norm=None):
super(ConvLayerNew, self).__init__()
bias = False if norm == 'BN' else True
self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding, bias=bias)
if activation is not None:
self.activation = getattr(torch, activation, 'relu')
else:
self.activation = None
self.norm = norm
if norm == 'BN':
self.norm_layer = nn.BatchNorm2d(out_channels)
elif norm == 'IN':
self.norm_layer = nn.InstanceNorm2d(out_channels,
track_running_stats=True)
def forward(self, input_0):
primals_1 = self.conv2d.weight
primals_2 = self.conv2d.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
DA4EVENT/home
|
ConvLayer
| false
| 17,186
|
[
"MIT"
] | 5
|
18cc93a795ce132e05b886aa34565a102915b1c6
|
https://github.com/DA4EVENT/home/tree/18cc93a795ce132e05b886aa34565a102915b1c6
|
DQN_xy4
|
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.utils.data
class DQN_xy4(nn.Module):
"""
A MLP for DQN learning.
Note: Uses a one hot board representation
"""
def __init__(self):
super(DQN_xy4, self).__init__()
self.fc1 = nn.Linear(4, 100)
self.fc2 = nn.Linear(100, 25)
self.fc3 = nn.Linear(25, 1)
def forward(self, x):
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
return self.fc3(x)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 100
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 100
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 25
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (100, 4), (4, 1))
assert_size_stride(primals_3, (100,), (1,))
assert_size_stride(primals_4, (25, 100), (100, 1))
assert_size_stride(primals_5, (25,), (1,))
assert_size_stride(primals_6, (1, 25), (25, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 100), (100, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 100),
(1, 4), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(400)](buf1, primals_3, 400, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 25), (25, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (100, 25), (1,
100), 0), out=buf2)
buf3 = buf2
del buf2
triton_poi_fused_relu_1[grid(100)](buf3, primals_5, 100, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6,
(25, 1), (1, 25), 0), alpha=1, beta=1, out=buf5)
del primals_7
return buf5, primals_1, buf1, buf3, primals_6, primals_4
class DQN_xy4New(nn.Module):
"""
A MLP for DQN learning.
Note: Uses a one hot board representation
"""
def __init__(self):
super(DQN_xy4New, self).__init__()
self.fc1 = nn.Linear(4, 100)
self.fc2 = nn.Linear(100, 25)
self.fc3 = nn.Linear(25, 1)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
CoAxLab/azad
|
DQN_xy4
| false
| 17,187
|
[
"MIT"
] | 6
|
d1498069dd8856e93ae077b34dd7c9f1c7ce80e6
|
https://github.com/CoAxLab/azad/tree/d1498069dd8856e93ae077b34dd7c9f1c7ce80e6
|
DQN_hot5
|
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.utils.data
class DQN_hot5(nn.Module):
"""
A MLP for DQN learning.
Note: Uses a one hot board representation
Params
------
m, n: int
Board size
num_actions: int
Number of action-value to output, one-to-one correspondence
to action in game.
"""
def __init__(self, m, n, num_actions):
super(DQN_hot5, self).__init__()
self.fc1 = nn.Linear(m * n, 1000)
self.fc2 = nn.Linear(1000, 2000)
self.fc3 = nn.Linear(2000, num_actions)
def forward(self, x):
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
return self.fc3(x)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'m': 4, 'n': 4, 'num_actions': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 4000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 1000
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 8000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2000
x1 = xindex // 2000
tmp0 = tl.load(in_out_ptr0 + (x0 + 2016 * x1), xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x0 + 2016 * x1), tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (1000, 16), (16, 1))
assert_size_stride(primals_3, (1000,), (1,))
assert_size_stride(primals_4, (2000, 1000), (1000, 1))
assert_size_stride(primals_5, (2000,), (1,))
assert_size_stride(primals_6, (4, 2000), (2000, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1000), (1000, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 16), (16, 1), 0
), reinterpret_tensor(primals_2, (16, 1000), (1, 16), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(4000)](buf1, primals_3, 4000, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 2000), (2016, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (1000, 2000),
(1, 1000), 0), out=buf2)
buf3 = buf2
del buf2
triton_poi_fused_relu_1[grid(8000)](buf3, primals_5, 8000, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6,
(2000, 4), (1, 2000), 0), alpha=1, beta=1, out=buf4)
del primals_7
return buf4, reinterpret_tensor(primals_1, (4, 16), (16, 1), 0
), buf1, buf3, primals_6, primals_4
class DQN_hot5New(nn.Module):
"""
A MLP for DQN learning.
Note: Uses a one hot board representation
Params
------
m, n: int
Board size
num_actions: int
Number of action-value to output, one-to-one correspondence
to action in game.
"""
def __init__(self, m, n, num_actions):
super(DQN_hot5New, self).__init__()
self.fc1 = nn.Linear(m * n, 1000)
self.fc2 = nn.Linear(1000, 2000)
self.fc3 = nn.Linear(2000, num_actions)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
CoAxLab/azad
|
DQN_hot5
| false
| 17,188
|
[
"MIT"
] | 6
|
d1498069dd8856e93ae077b34dd7c9f1c7ce80e6
|
https://github.com/CoAxLab/azad/tree/d1498069dd8856e93ae077b34dd7c9f1c7ce80e6
|
DQN_xy5
|
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.utils.data
class DQN_xy5(nn.Module):
"""
A MLP for DQN learning.
Note: Uses a one hot board representation
Params
------
m, n: int
Board size
num_actions: int
Number of action-value to output, one-to-one
correspondence to action in game.
"""
def __init__(self):
super(DQN_xy5, self).__init__()
self.fc1 = nn.Linear(4, 1000)
self.fc2 = nn.Linear(1000, 2000)
self.fc3 = nn.Linear(2000, 1)
def forward(self, x):
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
return self.fc3(x)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 4000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 1000
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 8000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2000
x1 = xindex // 2000
tmp0 = tl.load(in_out_ptr0 + (x0 + 2016 * x1), xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x0 + 2016 * x1), tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (1000, 4), (4, 1))
assert_size_stride(primals_3, (1000,), (1,))
assert_size_stride(primals_4, (2000, 1000), (1000, 1))
assert_size_stride(primals_5, (2000,), (1,))
assert_size_stride(primals_6, (1, 2000), (2000, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1000), (1000, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 1000
), (1, 4), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(4000)](buf1, primals_3, 4000, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 2000), (2016, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (1000, 2000),
(1, 1000), 0), out=buf2)
buf3 = buf2
del buf2
triton_poi_fused_relu_1[grid(8000)](buf3, primals_5, 8000, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6,
(2000, 1), (1, 2000), 0), alpha=1, beta=1, out=buf5)
del primals_7
return buf5, primals_1, buf1, buf3, primals_6, primals_4
class DQN_xy5New(nn.Module):
"""
A MLP for DQN learning.
Note: Uses a one hot board representation
Params
------
m, n: int
Board size
num_actions: int
Number of action-value to output, one-to-one
correspondence to action in game.
"""
def __init__(self):
super(DQN_xy5New, self).__init__()
self.fc1 = nn.Linear(4, 1000)
self.fc2 = nn.Linear(1000, 2000)
self.fc3 = nn.Linear(2000, 1)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
CoAxLab/azad
|
DQN_xy5
| false
| 17,189
|
[
"MIT"
] | 6
|
d1498069dd8856e93ae077b34dd7c9f1c7ce80e6
|
https://github.com/CoAxLab/azad/tree/d1498069dd8856e93ae077b34dd7c9f1c7ce80e6
|
DQN_xy1
|
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.utils.data
class DQN_xy1(nn.Module):
"""
A MLP for DQN learning.
Note: Uses a (x,y) coordinate board/action representation.
"""
def __init__(self):
super(DQN_xy1, self).__init__()
self.fc1 = nn.Linear(4, 15)
self.fc2 = nn.Linear(15, 1)
def forward(self, x):
x = F.relu(self.fc1(x))
return self.fc2(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 960
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 15
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (15, 4), (4, 1))
assert_size_stride(primals_2, (15,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 15), (15, 1))
assert_size_stride(primals_5, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 15), (15, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 15), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 15), (240, 60, 15, 1), 0)
del buf0
buf4 = empty_strided_cuda((4, 4, 4, 15), (240, 60, 15, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(960)](buf1,
primals_2, buf4, 960, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 15),
(15, 1), 0), reinterpret_tensor(primals_4, (15, 1), (1, 15), 0),
alpha=1, beta=1, out=buf3)
del primals_5
return reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 15), (15, 1), 0), primals_4, buf4
class DQN_xy1New(nn.Module):
"""
A MLP for DQN learning.
Note: Uses a (x,y) coordinate board/action representation.
"""
def __init__(self):
super(DQN_xy1New, self).__init__()
self.fc1 = nn.Linear(4, 15)
self.fc2 = nn.Linear(15, 1)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
CoAxLab/azad
|
DQN_xy1
| false
| 17,190
|
[
"MIT"
] | 6
|
d1498069dd8856e93ae077b34dd7c9f1c7ce80e6
|
https://github.com/CoAxLab/azad/tree/d1498069dd8856e93ae077b34dd7c9f1c7ce80e6
|
down
|
import torch
from torch import nn
from torch.nn import functional as F
import torch.nn.parallel
class down(nn.Module):
"""
A class for creating neural network blocks containing layers:
Average Pooling --> Convlution + Leaky ReLU --> Convolution + Leaky ReLU
This is used in the UNet Class to create a UNet like NN architecture.
...
Methods
-------
forward(x)
Returns output tensor after passing input `x` to the neural network
block.
"""
def __init__(self, inChannels, outChannels, filterSize):
"""
Parameters
----------
inChannels : int
number of input channels for the first convolutional layer.
outChannels : int
number of output channels for the first convolutional layer.
This is also used as input and output channels for the
second convolutional layer.
filterSize : int
filter size for the convolution filter. input N would create
a N x N filter.
"""
super(down, self).__init__()
self.conv1 = nn.Conv2d(inChannels, outChannels, filterSize, stride=
1, padding=int((filterSize - 1) / 2))
self.conv2 = nn.Conv2d(outChannels, outChannels, filterSize, stride
=1, padding=int((filterSize - 1) / 2))
def forward(self, x):
"""
Returns output tensor after passing input `x` to the neural network
block.
Parameters
----------
x : tensor
input to the NN block.
Returns
-------
tensor
output of the NN block.
"""
x = F.avg_pool2d(x, 2)
x = F.leaky_relu(self.conv1(x), negative_slope=0.1)
x = F.leaky_relu(self.conv2(x), negative_slope=0.1)
return x
def get_inputs():
return [torch.rand([4, 4, 64, 64])]
def get_init_inputs():
return [[], {'inChannels': 4, 'outChannels': 4, 'filterSize': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 15376
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 961 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp7, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 14400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 900 % 4
x2 = xindex // 3600
x4 = xindex % 3600
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x4 + 3712 * x2), tmp4, xmask)
tl.store(out_ptr1 + x3, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 64, 64), (16384, 4096, 64, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 32, 32), (4096, 1024, 32, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_avg_pool2d_0[grid(16384)](primals_1, buf0, 16384,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 31, 31), (3844, 961, 31, 1))
buf2 = empty_strided_cuda((4, 4, 31, 31), (3844, 961, 31, 1), torch
.bool)
buf3 = empty_strided_cuda((4, 4, 31, 31), (3844, 961, 31, 1), torch
.float32)
triton_poi_fused_convolution_leaky_relu_1[grid(15376)](buf1,
primals_3, buf2, buf3, 15376, XBLOCK=256, num_warps=4, num_stages=1
)
del buf1
del primals_3
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 30, 30), (3600, 900, 30, 1))
buf5 = empty_strided_cuda((4, 4, 30, 30), (3712, 900, 30, 1), torch
.bool)
buf6 = empty_strided_cuda((4, 4, 30, 30), (3600, 900, 30, 1), torch
.float32)
triton_poi_fused_convolution_leaky_relu_2[grid(14400)](buf4,
primals_5, buf5, buf6, 14400, XBLOCK=256, num_warps=4, num_stages=1
)
del buf4
del primals_5
return buf6, primals_2, primals_4, buf0, buf2, buf3, buf5
class downNew(nn.Module):
"""
A class for creating neural network blocks containing layers:
Average Pooling --> Convlution + Leaky ReLU --> Convolution + Leaky ReLU
This is used in the UNet Class to create a UNet like NN architecture.
...
Methods
-------
forward(x)
Returns output tensor after passing input `x` to the neural network
block.
"""
def __init__(self, inChannels, outChannels, filterSize):
"""
Parameters
----------
inChannels : int
number of input channels for the first convolutional layer.
outChannels : int
number of output channels for the first convolutional layer.
This is also used as input and output channels for the
second convolutional layer.
filterSize : int
filter size for the convolution filter. input N would create
a N x N filter.
"""
super(downNew, self).__init__()
self.conv1 = nn.Conv2d(inChannels, outChannels, filterSize, stride=
1, padding=int((filterSize - 1) / 2))
self.conv2 = nn.Conv2d(outChannels, outChannels, filterSize, stride
=1, padding=int((filterSize - 1) / 2))
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
DA4EVENT/home
|
down
| false
| 17,191
|
[
"MIT"
] | 5
|
18cc93a795ce132e05b886aa34565a102915b1c6
|
https://github.com/DA4EVENT/home/tree/18cc93a795ce132e05b886aa34565a102915b1c6
|
MaskUpdate
|
import torch
from torch import nn
from torch.nn.parameter import Parameter
class MaskUpdate(nn.Module):
def __init__(self, alpha):
super(MaskUpdate, self).__init__()
self.updateFunc = nn.ReLU(False)
self.alpha = Parameter(torch.tensor(alpha, dtype=torch.float32))
def forward(self, inputMaskMap):
self.alpha.data = torch.clamp(self.alpha.data, 0.6, 0.8)
return torch.pow(self.updateFunc(inputMaskMap), self.alpha)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'alpha': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clamp_eq_ge_log_logical_and_mul_pow_relu_where_zeros_0(
in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp3 = tl.load(in_ptr1 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp5 = 0.6
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = 0.8
tmp8 = triton_helpers.minimum(tmp6, tmp7)
tmp9 = libdevice.pow(tmp2, tmp8)
tmp10 = 0.0
tmp11 = tmp2 == tmp10
tmp12 = tmp8 >= tmp10
tmp13 = tmp11 & tmp12
tmp14 = tl_math.log(tmp2)
tmp15 = tmp9 * tmp14
tmp16 = tl.where(tmp13, tmp10, tmp15)
tl.store(out_ptr0 + x0, tmp9, xmask)
tl.store(out_ptr1 + x0, tmp16, xmask)
@triton.jit
def triton_poi_fused_clamp_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = 0.6
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = 0.8
tmp5 = triton_helpers.minimum(tmp3, tmp4)
tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp5, None)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (), ())
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_eq_ge_log_logical_and_mul_pow_relu_where_zeros_0[
grid(256)](primals_2, primals_1, buf0, buf1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((), (), torch.float32)
triton_poi_fused_clamp_1[grid(1)](primals_1, buf2, 1, XBLOCK=1,
num_warps=1, num_stages=1)
buf3 = torch.ops.aten.set_.source_Tensor(primals_1, buf2)
assert_size_stride(buf3, (), ())
del primals_1
return buf0, buf1
class MaskUpdateNew(nn.Module):
def __init__(self, alpha):
super(MaskUpdateNew, self).__init__()
self.updateFunc = nn.ReLU(False)
self.alpha = Parameter(torch.tensor(alpha, dtype=torch.float32))
def forward(self, input_0):
primals_1 = self.alpha
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
DLwbm123/LBAM_inpainting
|
MaskUpdate
| false
| 17,192
|
[
"MIT"
] | 7
|
c809c3cedf09cda7c175e930c7834ac39d8f526f
|
https://github.com/DLwbm123/LBAM_inpainting/tree/c809c3cedf09cda7c175e930c7834ac39d8f526f
|
ResidualBlock
|
import torch
from torch import nn
import torch.nn.parallel
class ResidualBlock(nn.Module):
def __init__(self, in_channels, out_channels, stride=1, downsample=None,
norm=None):
super(ResidualBlock, self).__init__()
bias = False if norm == 'BN' else True
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3,
stride=stride, padding=1, bias=bias)
self.norm = norm
if norm == 'BN':
self.bn1 = nn.BatchNorm2d(out_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
elif norm == 'IN':
self.bn1 = nn.InstanceNorm2d(out_channels)
self.bn2 = nn.InstanceNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3,
stride=1, padding=1, bias=bias)
self.downsample = downsample
def forward(self, x):
residual = x
out = self.conv1(x)
if self.norm in ['BN', 'IN']:
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
if self.norm in ['BN', 'IN']:
out = self.bn2(out)
if self.downsample:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_convolution_relu_threshold_backward_1(in_out_ptr0,
in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = 0.0
tmp8 = tmp6 <= tmp7
tl.store(in_out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(256)](buf1, primals_3, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_add_convolution_relu_threshold_backward_1[grid(256)](
buf3, primals_5, primals_1, buf4, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_5
return buf3, primals_1, primals_2, primals_4, buf1, buf4
class ResidualBlockNew(nn.Module):
def __init__(self, in_channels, out_channels, stride=1, downsample=None,
norm=None):
super(ResidualBlockNew, self).__init__()
bias = False if norm == 'BN' else True
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3,
stride=stride, padding=1, bias=bias)
self.norm = norm
if norm == 'BN':
self.bn1 = nn.BatchNorm2d(out_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
elif norm == 'IN':
self.bn1 = nn.InstanceNorm2d(out_channels)
self.bn2 = nn.InstanceNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3,
stride=1, padding=1, bias=bias)
self.downsample = downsample
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
DA4EVENT/home
|
ResidualBlock
| false
| 17,193
|
[
"MIT"
] | 5
|
18cc93a795ce132e05b886aa34565a102915b1c6
|
https://github.com/DA4EVENT/home/tree/18cc93a795ce132e05b886aa34565a102915b1c6
|
DQN_mlp
|
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.utils.data
class DQN_mlp(nn.Module):
"""Layers for a Deep Q Network, based on a simple MLP."""
def __init__(self, m, n, num_actions, num_hidden1=1000, num_hidden2=2000):
super(DQN_mlp, self).__init__()
self.m = m
self.n = n
self.num_hidden1 = num_hidden1
self.num_hidden2 = num_hidden2
self.fc1 = nn.Linear(m * n, num_hidden1)
self.fc2 = nn.Linear(num_hidden1, num_hidden2)
self.fc3 = nn.Linear(num_hidden2, num_hidden2)
self.fc4 = nn.Linear(num_hidden2, num_actions)
def forward(self, x):
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
return self.fc4(x)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'m': 4, 'n': 4, 'num_actions': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 4000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 1000
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 8000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2000
x1 = xindex // 2000
tmp0 = tl.load(in_out_ptr0 + (x0 + 2016 * x1), xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x0 + 2016 * x1), tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (1000, 16), (16, 1))
assert_size_stride(primals_3, (1000,), (1,))
assert_size_stride(primals_4, (2000, 1000), (1000, 1))
assert_size_stride(primals_5, (2000,), (1,))
assert_size_stride(primals_6, (2000, 2000), (2000, 1))
assert_size_stride(primals_7, (2000,), (1,))
assert_size_stride(primals_8, (4, 2000), (2000, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1000), (1000, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 16), (16, 1), 0
), reinterpret_tensor(primals_2, (16, 1000), (1, 16), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(4000)](buf1, primals_3, 4000, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 2000), (2016, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (1000, 2000),
(1, 1000), 0), out=buf2)
buf3 = buf2
del buf2
triton_poi_fused_relu_1[grid(8000)](buf3, primals_5, 8000, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 2000), (2016, 1), torch.float32)
extern_kernels.mm(buf3, reinterpret_tensor(primals_6, (2000, 2000),
(1, 2000), 0), out=buf4)
buf5 = buf4
del buf4
triton_poi_fused_relu_1[grid(8000)](buf5, primals_7, 8000, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, buf5, reinterpret_tensor(primals_8,
(2000, 4), (1, 2000), 0), alpha=1, beta=1, out=buf6)
del primals_9
return buf6, reinterpret_tensor(primals_1, (4, 16), (16, 1), 0
), buf1, buf3, buf5, primals_8, primals_6, primals_4
class DQN_mlpNew(nn.Module):
"""Layers for a Deep Q Network, based on a simple MLP."""
def __init__(self, m, n, num_actions, num_hidden1=1000, num_hidden2=2000):
super(DQN_mlpNew, self).__init__()
self.m = m
self.n = n
self.num_hidden1 = num_hidden1
self.num_hidden2 = num_hidden2
self.fc1 = nn.Linear(m * n, num_hidden1)
self.fc2 = nn.Linear(num_hidden1, num_hidden2)
self.fc3 = nn.Linear(num_hidden2, num_hidden2)
self.fc4 = nn.Linear(num_hidden2, num_actions)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_8 = self.fc4.weight
primals_9 = self.fc4.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
CoAxLab/azad
|
DQN_mlp
| false
| 17,194
|
[
"MIT"
] | 6
|
d1498069dd8856e93ae077b34dd7c9f1c7ce80e6
|
https://github.com/CoAxLab/azad/tree/d1498069dd8856e93ae077b34dd7c9f1c7ce80e6
|
Attention
|
import torch
import torch.nn as nn
class Attention(nn.Module):
def __init__(self, n_h):
super(Attention, self).__init__()
self.linear = nn.Linear(n_h * 2, 1)
self.softmax = nn.Softmax(dim=2)
def forward(self, x):
curr_node = x[:, :, 0, :].unsqueeze(2).expand_as(x)
stacked_x = torch.cat((curr_node, x), 3)
x1 = self.linear(stacked_x).squeeze()
weights = self.softmax(x1).unsqueeze(3)
x2 = torch.sum(torch.mul(x, weights), dim=2)
x3 = torch.mean(x2, dim=1)
return x3
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_h': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x2 = xindex // 32
x3 = xindex // 8
x4 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (16 * x2 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr0 + (4 * x3 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x4, tmp10, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_mean_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr1 + 16 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x0 + 64 * x1), xmask)
tmp4 = tl.load(in_ptr1 + (1 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp7 = tl.load(in_ptr0 + (8 + x0 + 64 * x1), xmask)
tmp8 = tl.load(in_ptr1 + (2 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (12 + x0 + 64 * x1), xmask)
tmp12 = tl.load(in_ptr1 + (3 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp16 = tl.load(in_ptr1 + (4 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr0 + (20 + x0 + 64 * x1), xmask)
tmp19 = tl.load(in_ptr1 + (5 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp22 = tl.load(in_ptr0 + (24 + x0 + 64 * x1), xmask)
tmp23 = tl.load(in_ptr1 + (6 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp26 = tl.load(in_ptr0 + (28 + x0 + 64 * x1), xmask)
tmp27 = tl.load(in_ptr1 + (7 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp31 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp32 = tl.load(in_ptr1 + (8 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp34 = tl.load(in_ptr0 + (36 + x0 + 64 * x1), xmask)
tmp35 = tl.load(in_ptr1 + (9 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp38 = tl.load(in_ptr0 + (40 + x0 + 64 * x1), xmask)
tmp39 = tl.load(in_ptr1 + (10 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp42 = tl.load(in_ptr0 + (44 + x0 + 64 * x1), xmask)
tmp43 = tl.load(in_ptr1 + (11 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp47 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp48 = tl.load(in_ptr1 + (12 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp50 = tl.load(in_ptr0 + (52 + x0 + 64 * x1), xmask)
tmp51 = tl.load(in_ptr1 + (13 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp54 = tl.load(in_ptr0 + (56 + x0 + 64 * x1), xmask)
tmp55 = tl.load(in_ptr1 + (14 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp58 = tl.load(in_ptr0 + (60 + x0 + 64 * x1), xmask)
tmp59 = tl.load(in_ptr1 + (15 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp17 = tmp15 * tmp16
tmp20 = tmp18 * tmp19
tmp21 = tmp17 + tmp20
tmp24 = tmp22 * tmp23
tmp25 = tmp21 + tmp24
tmp28 = tmp26 * tmp27
tmp29 = tmp25 + tmp28
tmp30 = tmp14 + tmp29
tmp33 = tmp31 * tmp32
tmp36 = tmp34 * tmp35
tmp37 = tmp33 + tmp36
tmp40 = tmp38 * tmp39
tmp41 = tmp37 + tmp40
tmp44 = tmp42 * tmp43
tmp45 = tmp41 + tmp44
tmp46 = tmp30 + tmp45
tmp49 = tmp47 * tmp48
tmp52 = tmp50 * tmp51
tmp53 = tmp49 + tmp52
tmp56 = tmp54 * tmp55
tmp57 = tmp53 + tmp56
tmp60 = tmp58 * tmp59
tmp61 = tmp57 + tmp60
tmp62 = tmp46 + tmp61
tmp63 = 4.0
tmp64 = tmp62 / tmp63
tl.store(out_ptr0 + x2, tmp64, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 8), (8, 1))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_1, buf0, 512, XBLOCK=128,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (64, 8), (
8, 1), 0), reinterpret_tensor(primals_2, (8, 1), (1, 8), 0),
alpha=1, beta=1, out=buf2)
del primals_2
del primals_3
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(64)](buf2, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(64)](buf3, buf4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf3
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_mean_mul_sum_3[grid(16)](primals_1, buf4, buf5, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del buf4
return buf5, primals_1, reinterpret_tensor(buf0, (64, 8), (8, 1), 0), buf2
class AttentionNew(nn.Module):
def __init__(self, n_h):
super(AttentionNew, self).__init__()
self.linear = nn.Linear(n_h * 2, 1)
self.softmax = nn.Softmax(dim=2)
def forward(self, input_0):
primals_2 = self.linear.weight
primals_3 = self.linear.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
CrowdDynamicsLab/InfoMotif
|
Attention
| false
| 17,195
|
[
"BSD-3-Clause"
] | 7
|
cca1ffa14cc94408a5c4c50b7b1707c608e3bc9b
|
https://github.com/CrowdDynamicsLab/InfoMotif/tree/cca1ffa14cc94408a5c4c50b7b1707c608e3bc9b
|
MCDO
|
import torch
from torch import nn
class MCDO(nn.Module):
def __init__(self, in_dim, out_dim, n_layers=1, hid_dim=50, p=0.05):
super().__init__()
self.n_layers = n_layers
self.linear_in = nn.Linear(in_dim, hid_dim)
nn.init.normal_(self.linear_in.weight, std=1 / (4 * hid_dim) ** 0.5)
nn.init.zeros_(self.linear_in.bias)
self.dropout_in = nn.Dropout(p)
if n_layers > 1:
models = list(range(3 * (n_layers - 1)))
for i in range(0, len(models), 3):
models[i] = nn.Linear(hid_dim, hid_dim)
nn.init.normal_(models[i].weight, std=1 / (4 * hid_dim) ** 0.5)
nn.init.zeros_(models[i].bias)
for i in range(1, len(models), 3):
models[i] = nn.ReLU()
for i in range(2, len(models), 3):
models[i] = nn.Dropout(p)
self.hid_layers = nn.Sequential(*models)
self.linear_out = nn.Linear(hid_dim, out_dim)
nn.init.normal_(self.linear_out.weight, std=1 / (4 * out_dim) ** 0.5)
nn.init.zeros_(self.linear_out.bias)
def forward(self, x):
x = torch.relu(self.linear_in(x))
x = self.dropout_in(x)
if self.n_layers > 1:
x = self.hid_layers(x)
x = self.linear_out(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4, 'out_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 3200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 50
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (50, 4), (4, 1))
assert_size_stride(primals_2, (50,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 50), (50, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 50), (50, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 50), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 50), (800, 200, 50, 1), 0)
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 50), (800, 200, 50, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(3200)](buf1,
primals_2, buf3, 3200, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 50),
(50, 1), 0), reinterpret_tensor(primals_4, (50, 4), (1, 50), 0),
alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 50), (50, 1), 0), primals_4, buf3
class MCDONew(nn.Module):
def __init__(self, in_dim, out_dim, n_layers=1, hid_dim=50, p=0.05):
super().__init__()
self.n_layers = n_layers
self.linear_in = nn.Linear(in_dim, hid_dim)
nn.init.normal_(self.linear_in.weight, std=1 / (4 * hid_dim) ** 0.5)
nn.init.zeros_(self.linear_in.bias)
self.dropout_in = nn.Dropout(p)
if n_layers > 1:
models = list(range(3 * (n_layers - 1)))
for i in range(0, len(models), 3):
models[i] = nn.Linear(hid_dim, hid_dim)
nn.init.normal_(models[i].weight, std=1 / (4 * hid_dim) ** 0.5)
nn.init.zeros_(models[i].bias)
for i in range(1, len(models), 3):
models[i] = nn.ReLU()
for i in range(2, len(models), 3):
models[i] = nn.Dropout(p)
self.hid_layers = nn.Sequential(*models)
self.linear_out = nn.Linear(hid_dim, out_dim)
nn.init.normal_(self.linear_out.weight, std=1 / (4 * out_dim) ** 0.5)
nn.init.zeros_(self.linear_out.bias)
def forward(self, input_0):
primals_1 = self.linear_in.weight
primals_2 = self.linear_in.bias
primals_4 = self.linear_out.weight
primals_5 = self.linear_out.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Daniil-Selikhanovych/bnn-vi
|
MCDO
| false
| 17,196
|
[
"MIT"
] | 3
|
6788edc1438c66609abca249e33a81da7a0ff1a2
|
https://github.com/Daniil-Selikhanovych/bnn-vi/tree/6788edc1438c66609abca249e33a81da7a0ff1a2
|
GaussActivation
|
import torch
from torch import nn
from torch.nn.parameter import Parameter
class GaussActivation(nn.Module):
def __init__(self, a, mu, sigma1, sigma2):
super(GaussActivation, self).__init__()
self.a = Parameter(torch.tensor(a, dtype=torch.float32))
self.mu = Parameter(torch.tensor(mu, dtype=torch.float32))
self.sigma1 = Parameter(torch.tensor(sigma1, dtype=torch.float32))
self.sigma2 = Parameter(torch.tensor(sigma2, dtype=torch.float32))
def forward(self, inputFeatures):
self.a.data = torch.clamp(self.a.data, 1.01, 6.0)
self.mu.data = torch.clamp(self.mu.data, 0.1, 3.0)
self.sigma1.data = torch.clamp(self.sigma1.data, 1.0, 2.0)
self.sigma2.data = torch.clamp(self.sigma2.data, 1.0, 2.0)
lowerThanMu = inputFeatures < self.mu
largerThanMu = inputFeatures >= self.mu
leftValuesActiv = self.a * torch.exp(-self.sigma1 * (inputFeatures -
self.mu) ** 2)
leftValuesActiv.masked_fill_(largerThanMu, 0.0)
rightValueActiv = 1 + (self.a - 1) * torch.exp(-self.sigma2 * (
inputFeatures - self.mu) ** 2)
rightValueActiv.masked_fill_(lowerThanMu, 0.0)
output = leftValuesActiv + rightValueActiv
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'a': 4, 'mu': 4, 'sigma1': 4, 'sigma2': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clamp_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = 1.01
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = 6.0
tmp5 = triton_helpers.minimum(tmp3, tmp4)
tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp5, None)
@triton.jit
def triton_poi_fused_clamp_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = 0.1
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = 3.0
tmp5 = triton_helpers.minimum(tmp3, tmp4)
tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp5, None)
@triton.jit
def triton_poi_fused_clamp_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = 1.0
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = 2.0
tmp5 = triton_helpers.minimum(tmp3, tmp4)
tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp5, None)
@triton.jit
def triton_poi_fused_add_exp_ge_lt_masked_fill_mul_neg_pow_sub_3(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp5 = tl.load(in_ptr2 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp7 = tl.load(in_ptr3 + 0)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp19 = tl.load(in_ptr4 + 0)
tmp20 = tl.broadcast_to(tmp19, [XBLOCK])
tmp3 = tmp0 < tmp2
tmp4 = tmp0 >= tmp2
tmp9 = -tmp8
tmp10 = tmp0 - tmp2
tmp11 = tmp10 * tmp10
tmp12 = tmp9 * tmp11
tmp13 = tl_math.exp(tmp12)
tmp14 = tmp6 * tmp13
tmp15 = 0.0
tmp16 = tl.where(tmp4, tmp15, tmp14)
tmp17 = 1.0
tmp18 = tmp6 - tmp17
tmp21 = -tmp20
tmp22 = tmp21 * tmp11
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp18 * tmp23
tmp25 = tmp24 + tmp17
tmp26 = tl.where(tmp3, tmp15, tmp25)
tmp27 = tmp16 + tmp26
tl.store(out_ptr0 + x0, tmp3, xmask)
tl.store(out_ptr1 + x0, tmp4, xmask)
tl.store(out_ptr2 + x0, tmp27, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (), ())
assert_size_stride(primals_2, (), ())
assert_size_stride(primals_3, (), ())
assert_size_stride(primals_4, (), ())
assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_0[grid(1)](primals_1, buf0, 1, XBLOCK=1,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((), (), torch.float32)
triton_poi_fused_clamp_1[grid(1)](primals_2, buf1, 1, XBLOCK=1,
num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((), (), torch.float32)
triton_poi_fused_clamp_2[grid(1)](primals_3, buf2, 1, XBLOCK=1,
num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((), (), torch.float32)
triton_poi_fused_clamp_2[grid(1)](primals_4, buf3, 1, XBLOCK=1,
num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_exp_ge_lt_masked_fill_mul_neg_pow_sub_3[grid(256)
](primals_5, buf1, buf0, buf2, buf3, buf4, buf5, buf6, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf7 = torch.ops.aten.set_.source_Tensor(primals_1, buf0)
assert_size_stride(buf7, (), ())
del primals_1
buf17 = torch.ops.aten.set_.source_Tensor(primals_2, buf1)
assert_size_stride(buf17, (), ())
del primals_2
buf27 = torch.ops.aten.set_.source_Tensor(primals_3, buf2)
assert_size_stride(buf27, (), ())
del primals_3
buf32 = torch.ops.aten.set_.source_Tensor(primals_4, buf3)
assert_size_stride(buf32, (), ())
del primals_4
return buf6, primals_5, buf0, buf1, buf2, buf3, buf4, buf5
class GaussActivationNew(nn.Module):
def __init__(self, a, mu, sigma1, sigma2):
super(GaussActivationNew, self).__init__()
self.a = Parameter(torch.tensor(a, dtype=torch.float32))
self.mu = Parameter(torch.tensor(mu, dtype=torch.float32))
self.sigma1 = Parameter(torch.tensor(sigma1, dtype=torch.float32))
self.sigma2 = Parameter(torch.tensor(sigma2, dtype=torch.float32))
def forward(self, input_0):
primals_1 = self.a
primals_2 = self.mu
primals_3 = self.sigma1
primals_4 = self.sigma2
primals_5 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
DLwbm123/LBAM_inpainting
|
GaussActivation
| false
| 17,197
|
[
"MIT"
] | 7
|
c809c3cedf09cda7c175e930c7834ac39d8f526f
|
https://github.com/DLwbm123/LBAM_inpainting/tree/c809c3cedf09cda7c175e930c7834ac39d8f526f
|
FactorizationMachine
|
from torch.nn import Module
import torch
from torch import FloatTensor
from torch.nn import Parameter
class SecondOrderInteraction(Module):
"""
Factorized parameters for the Second Order Interactions
Parameters
----------
n_features: int
Length of the input vector.
n_factors: int, optional
Number of factors of the factorized parameters
"""
def __init__(self, n_features, n_factors):
super(SecondOrderInteraction, self).__init__()
self.batch_size = None
self.n_features = n_features
self.n_factors = n_factors
self.v = Parameter(torch.Tensor(self.n_features, self.n_factors))
self.v.data.uniform_(-0.01, 0.01)
def forward(self, x):
self.batch_size = x.size()[0]
pow_x = torch.pow(x, 2)
pow_v = torch.pow(self.v, 2)
pow_sum = torch.pow(torch.mm(x, self.v), 2)
sum_pow = torch.mm(pow_x, pow_v)
out = 0.5 * (pow_sum - sum_pow).sum(1)
return out.unsqueeze(-1)
class FactorizationMachine(Module):
"""
Pointwise Factorization Machine Model
Parameters
----------
n_features: int
Length of the input vector.
n_factors: int, optional
Number of factors of the factorized parameters
"""
def __init__(self, n_features, n_factors=10):
super(FactorizationMachine, self).__init__()
self.n_features, self.factors = n_features, n_factors
self.global_bias = Parameter(FloatTensor(1))
self.global_bias.data.uniform_(-0.01, 0.01)
self.linear = Parameter(FloatTensor(self.n_features))
self.linear.data.uniform_(-0.01, 0.01)
self.second_order = SecondOrderInteraction(self.n_features, self.
factors)
@property
def v(self):
return self.second_order.v
@v.getter
def v(self):
return self.second_order.v
def forward(self, x):
mm = x * self.linear
linear = mm.sum(1).unsqueeze(-1)
interaction = self.second_order(x)
res = self.global_bias + linear + interaction
return res.squeeze(-1)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'n_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.nn import Module
from torch import FloatTensor
from torch.nn import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0 * tmp0
tl.store(out_ptr0 + x0, tmp1, xmask)
@triton.jit
def triton_poi_fused_pow_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 40
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0 * tmp0
tl.store(out_ptr0 + x0, tmp1, xmask)
@triton.jit
def triton_per_fused_add_pow_sub_sum_2(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0)
tmp2 = tl.load(in_ptr1 + (r1 + 10 * x0), rmask & xmask, other=0.0)
tmp8 = tl.load(in_ptr2 + 0)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, 1])
tmp10 = tl.load(in_ptr3 + 4 * x0, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + 0)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, 1])
tmp14 = tl.load(in_ptr3 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr4 + 1)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, 1])
tmp19 = tl.load(in_ptr3 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr4 + 2)
tmp21 = tl.broadcast_to(tmp20, [XBLOCK, 1])
tmp24 = tl.load(in_ptr3 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr4 + 3)
tmp26 = tl.broadcast_to(tmp25, [XBLOCK, 1])
tmp1 = tmp0 * tmp0
tmp3 = tmp1 - tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask & xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp13 = tmp10 * tmp12
tmp17 = tmp14 * tmp16
tmp18 = tmp13 + tmp17
tmp22 = tmp19 * tmp21
tmp23 = tmp18 + tmp22
tmp27 = tmp24 * tmp26
tmp28 = tmp23 + tmp27
tmp29 = tmp9 + tmp28
tmp30 = 0.5
tmp31 = tmp7 * tmp30
tmp32 = tmp29 + tmp31
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp32, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 10), (10, 1))
assert_size_stride(primals_4, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_pow_0[grid(16)](primals_2, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.mm(primals_2, primals_3, out=buf1)
buf2 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
triton_poi_fused_pow_1[grid(40)](primals_3, buf2, 40, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.mm(buf0, buf2, out=buf3)
del buf2
buf4 = empty_strided_cuda((4,), (1,), torch.float32)
buf6 = reinterpret_tensor(buf4, (4, 1), (1, 1), 0)
del buf4
triton_per_fused_add_pow_sub_sum_2[grid(4)](buf6, buf1, buf3,
primals_4, primals_2, primals_1, 4, 10, XBLOCK=1, num_warps=2,
num_stages=1)
del buf3
del primals_1
del primals_4
return reinterpret_tensor(buf6, (4,), (1,), 0
), primals_2, primals_3, buf1, reinterpret_tensor(buf0, (4, 4), (1,
4), 0)
class SecondOrderInteraction(Module):
"""
Factorized parameters for the Second Order Interactions
Parameters
----------
n_features: int
Length of the input vector.
n_factors: int, optional
Number of factors of the factorized parameters
"""
def __init__(self, n_features, n_factors):
super(SecondOrderInteraction, self).__init__()
self.batch_size = None
self.n_features = n_features
self.n_factors = n_factors
self.v = Parameter(torch.Tensor(self.n_features, self.n_factors))
self.v.data.uniform_(-0.01, 0.01)
def forward(self, x):
self.batch_size = x.size()[0]
pow_x = torch.pow(x, 2)
pow_v = torch.pow(self.v, 2)
pow_sum = torch.pow(torch.mm(x, self.v), 2)
sum_pow = torch.mm(pow_x, pow_v)
out = 0.5 * (pow_sum - sum_pow).sum(1)
return out.unsqueeze(-1)
class FactorizationMachineNew(Module):
"""
Pointwise Factorization Machine Model
Parameters
----------
n_features: int
Length of the input vector.
n_factors: int, optional
Number of factors of the factorized parameters
"""
def __init__(self, n_features, n_factors=10):
super(FactorizationMachineNew, self).__init__()
self.n_features, self.factors = n_features, n_factors
self.global_bias = Parameter(FloatTensor(1))
self.global_bias.data.uniform_(-0.01, 0.01)
self.linear = Parameter(FloatTensor(self.n_features))
self.linear.data.uniform_(-0.01, 0.01)
self.second_order = SecondOrderInteraction(self.n_features, self.
factors)
@property
def v(self):
return self.second_order.v
@v.getter
def v(self):
return self.second_order.v
def forward(self, input_0):
primals_4 = self.global_bias
primals_1 = self.linear
primals_3 = self.second_order.v
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
DanielMorales9/FactorizationPyTorch
|
FactorizationMachine
| false
| 17,198
|
[
"MIT"
] | 4
|
50f0644fdb4a903550fb3f1ba78fb9fb8649ceb1
|
https://github.com/DanielMorales9/FactorizationPyTorch/tree/50f0644fdb4a903550fb3f1ba78fb9fb8649ceb1
|
GEGLU
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class GEGLU(nn.Module):
def forward(self, x):
x, gate = x.chunk(2, dim=-1)
return F.gelu(gate) * x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_gelu_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 + x0 + 4 * x1), xmask)
tmp9 = tl.load(in_ptr0 + (x0 + 4 * x1), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tmp10 = tmp8 * tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_gelu_mul_0[grid(128)](arg0_1, buf0, 128, XBLOCK=
128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class GEGLUNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
DannielSilva/MMBERT
|
GEGLU
| false
| 17,199
|
[
"MIT"
] | 4
|
2c9069b59b66b8f3fec6de2e68ec42b489a3a437
|
https://github.com/DannielSilva/MMBERT/tree/2c9069b59b66b8f3fec6de2e68ec42b489a3a437
|
SecondOrderInteraction
|
from torch.nn import Module
import torch
from torch.nn import Parameter
class SecondOrderInteraction(Module):
"""
Factorized parameters for the Second Order Interactions
Parameters
----------
n_features: int
Length of the input vector.
n_factors: int, optional
Number of factors of the factorized parameters
"""
def __init__(self, n_features, n_factors):
super(SecondOrderInteraction, self).__init__()
self.batch_size = None
self.n_features = n_features
self.n_factors = n_factors
self.v = Parameter(torch.Tensor(self.n_features, self.n_factors))
self.v.data.uniform_(-0.01, 0.01)
def forward(self, x):
self.batch_size = x.size()[0]
pow_x = torch.pow(x, 2)
pow_v = torch.pow(self.v, 2)
pow_sum = torch.pow(torch.mm(x, self.v), 2)
sum_pow = torch.mm(pow_x, pow_v)
out = 0.5 * (pow_sum - sum_pow).sum(1)
return out.unsqueeze(-1)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'n_features': 4, 'n_factors': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.nn import Module
from torch.nn import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0 * tmp0
tl.store(out_ptr0 + x0, tmp1, xmask)
@triton.jit
def triton_poi_fused_mul_pow_sub_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = tmp0 * tmp0
tmp3 = tmp1 - tmp2
tmp5 = tmp4 * tmp4
tmp7 = tmp5 - tmp6
tmp8 = tmp3 + tmp7
tmp10 = tmp9 * tmp9
tmp12 = tmp10 - tmp11
tmp13 = tmp8 + tmp12
tmp15 = tmp14 * tmp14
tmp17 = tmp15 - tmp16
tmp18 = tmp13 + tmp17
tmp19 = 0.5
tmp20 = tmp18 * tmp19
tl.store(out_ptr0 + x0, tmp20, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_pow_0[grid(16)](primals_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, primals_2, out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_pow_0[grid(16)](primals_2, buf2, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, buf2, out=buf3)
del buf2
buf4 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused_mul_pow_sub_sum_1[grid(4)](buf1, buf3, buf4, 4,
XBLOCK=4, num_warps=1, num_stages=1)
del buf3
return reinterpret_tensor(buf4, (4, 1), (1, 1), 0
), primals_2, buf1, reinterpret_tensor(buf0, (4, 4), (1, 4), 0
), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0)
class SecondOrderInteractionNew(Module):
"""
Factorized parameters for the Second Order Interactions
Parameters
----------
n_features: int
Length of the input vector.
n_factors: int, optional
Number of factors of the factorized parameters
"""
def __init__(self, n_features, n_factors):
super(SecondOrderInteractionNew, self).__init__()
self.batch_size = None
self.n_features = n_features
self.n_factors = n_factors
self.v = Parameter(torch.Tensor(self.n_features, self.n_factors))
self.v.data.uniform_(-0.01, 0.01)
def forward(self, input_0):
primals_1 = self.v
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
DanielMorales9/FactorizationPyTorch
|
SecondOrderInteraction
| false
| 17,200
|
[
"MIT"
] | 4
|
50f0644fdb4a903550fb3f1ba78fb9fb8649ceb1
|
https://github.com/DanielMorales9/FactorizationPyTorch/tree/50f0644fdb4a903550fb3f1ba78fb9fb8649ceb1
|
GatedConv2d
|
import torch
import torch.nn as nn
import torch.utils.data
class GatedConv2d(nn.Module):
def __init__(self, input_channels, output_channels, kernel_size, stride,
padding, dilation=1, activation=None):
super(GatedConv2d, self).__init__()
self.activation = activation
self.sigmoid = nn.Sigmoid()
self.h = nn.Conv2d(input_channels, output_channels, kernel_size,
stride, padding, dilation)
self.g = nn.Conv2d(input_channels, output_channels, kernel_size,
stride, padding, dilation)
def forward(self, x):
if self.activation is None:
h = self.h(x)
else:
h = self.activation(self.h(x))
g = self.sigmoid(self.g(x))
return h * g
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_channels': 4, 'output_channels': 4, 'kernel_size':
4, 'stride': 1, 'padding': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_mul_sigmoid_0(in_out_ptr0, in_out_ptr1,
in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1296
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 81 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_out_ptr1 + x3, xmask)
tmp4 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tl.sigmoid(tmp5)
tmp7 = tmp2 * tmp6
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(in_out_ptr1 + x3, tmp5, xmask)
tl.store(out_ptr0 + x3, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(4, 4), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 9, 9), (324, 81, 9, 1))
buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1,
1), padding=(4, 4), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 9, 9), (324, 81, 9, 1))
buf1 = buf0
del buf0
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((4, 4, 9, 9), (324, 81, 9, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_mul_sigmoid_0[grid(1296)](buf1, buf3,
primals_2, primals_5, buf4, 1296, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_2
del primals_5
return buf4, primals_1, primals_3, primals_4, buf1, buf3
class GatedConv2dNew(nn.Module):
def __init__(self, input_channels, output_channels, kernel_size, stride,
padding, dilation=1, activation=None):
super(GatedConv2dNew, self).__init__()
self.activation = activation
self.sigmoid = nn.Sigmoid()
self.h = nn.Conv2d(input_channels, output_channels, kernel_size,
stride, padding, dilation)
self.g = nn.Conv2d(input_channels, output_channels, kernel_size,
stride, padding, dilation)
def forward(self, input_0):
primals_1 = self.h.weight
primals_2 = self.h.bias
primals_3 = self.g.weight
primals_5 = self.g.bias
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Daulbaev/IRDM
|
GatedConv2d
| false
| 17,201
|
[
"MIT"
] | 10
|
4bb60191ac0072e4349ca47092675d06b39a979a
|
https://github.com/Daulbaev/IRDM/tree/4bb60191ac0072e4349ca47092675d06b39a979a
|
SERF
|
import torch
import torch.nn as nn
class SERF(nn.Module):
def __init__(self, thresh=50):
super().__init__()
self.thresh = thresh
None
def forward(self, x):
return self.serf_log1pexp(x)
def serf(self, x):
return x * torch.erf(torch.log(1 + torch.exp(x)))
def serf_log1pexp(self, x):
return x * torch.erf(torch.log1p(torch.exp(torch.clamp(x, max=self.
thresh))))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clamp_erf_exp_log1p_mul_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 50.0
tmp2 = triton_helpers.minimum(tmp0, tmp1)
tmp3 = tl_math.exp(tmp2)
tmp4 = libdevice.log1p(tmp3)
tmp5 = libdevice.erf(tmp4)
tmp6 = tmp0 * tmp5
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_erf_exp_log1p_mul_0[grid(256)](arg0_1, buf0,
256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class SERFNew(nn.Module):
def __init__(self, thresh=50):
super().__init__()
self.thresh = thresh
None
def serf(self, x):
return x * torch.erf(torch.log(1 + torch.exp(x)))
def serf_log1pexp(self, x):
return x * torch.erf(torch.log1p(torch.exp(torch.clamp(x, max=self.
thresh))))
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
DannielSilva/MMBERT
|
SERF
| false
| 17,202
|
[
"MIT"
] | 4
|
2c9069b59b66b8f3fec6de2e68ec42b489a3a437
|
https://github.com/DannielSilva/MMBERT/tree/2c9069b59b66b8f3fec6de2e68ec42b489a3a437
|
BCEWithLogitsWithClassWeightLoss
|
import torch
from torch import Tensor
from typing import NoReturn
from torch import nn
class BCEWithLogitsWithClassWeightLoss(nn.BCEWithLogitsLoss):
""" finished, checked,
"""
__name__ = 'BCEWithLogitsWithClassWeightsLoss'
def __init__(self, class_weight: 'Tensor') ->NoReturn:
""" finished, checked,
Parameters
----------
class_weight: Tensor,
class weight, of shape (1, n_classes)
"""
super().__init__(reduction='none')
self.class_weight = class_weight
def forward(self, input: 'Tensor', target: 'Tensor') ->Tensor:
"""
Parameters
----------
input: Tensor,
the prediction tensor, of shape (batch_size, ...)
target: Tensor,
the target tensor, of shape (batch_size, ...)
Returns
-------
loss: Tensor,
the loss (scalar tensor) w.r.t. `input` and `target`
"""
loss = super().forward(input, target)
loss = torch.mean(loss * self.class_weight)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'class_weight': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import Tensor
from typing import NoReturn
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_binary_cross_entropy_with_logits_mean_mul_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp13 = 4.0
tmp14 = tmp12 * tmp13
tmp15 = tl.broadcast_to(tmp14, [RBLOCK])
tmp17 = triton_helpers.promote_to_tensor(tl.sum(tmp15, 0))
tmp18 = 256.0
tmp19 = tmp17 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp19, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_binary_cross_entropy_with_logits_mean_mul_0[grid(1)](
buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class BCEWithLogitsWithClassWeightLossNew(nn.BCEWithLogitsLoss):
""" finished, checked,
"""
__name__ = 'BCEWithLogitsWithClassWeightsLoss'
def __init__(self, class_weight: 'Tensor') ->NoReturn:
""" finished, checked,
Parameters
----------
class_weight: Tensor,
class weight, of shape (1, n_classes)
"""
super().__init__(reduction='none')
self.class_weight = class_weight
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
DeepPSP/torch_ecg
|
BCEWithLogitsWithClassWeightLoss
| false
| 17,203
|
[
"MIT"
] | 9
|
6db5ffb063d0e8fb4ce97029a0d184a658f43a37
|
https://github.com/DeepPSP/torch_ecg/tree/6db5ffb063d0e8fb4ce97029a0d184a658f43a37
|
MLP
|
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.utils.data
def create_all_possible_moves(m, n):
"""Create all moves on a (m,n) board."""
moves = []
for i in range(m):
for j in range(n):
moves.append((i, j))
return list(set(moves))
class MLP(nn.Module):
"""3-layer MLP for AlphaZero"""
def __init__(self, board_size=15, num_hidden1=2000, num_hidden2=1000):
super(MLP, self).__init__()
self.board_size = board_size
self.all_moves = create_all_possible_moves(board_size, board_size)
self.num_hidden1 = num_hidden1
self.num_hidden2 = num_hidden2
if len(self.all_moves) != self.board_size ** 2:
raise ValueError("moves and board don't match")
self.fc1 = nn.Linear(self.board_size ** 2, self.num_hidden1)
self.fc2 = nn.Linear(self.num_hidden1, self.num_hidden2)
self.logsoftmax = nn.LogSoftmax(dim=1)
self.fc3 = nn.Linear(self.num_hidden2, self.board_size ** 2)
self.fc4 = nn.Linear(self.num_hidden2, 1)
def forward(self, x):
x = x.view(-1, self.board_size ** 2)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
p = F.relu(self.fc3(x))
p = self.logsoftmax(p).exp()
v = torch.tanh(self.fc4(x))
return p, v
def get_inputs():
return [torch.rand([4, 225])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 8000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2000
x1 = xindex // 2000
tmp0 = tl.load(in_out_ptr0 + (x0 + 2016 * x1), xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x0 + 2016 * x1), tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 4000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 1000
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_per_fused__log_softmax_exp_relu_2(in_out_ptr0, in_ptr0, out_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 225
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 225 * x0), rmask & xmask, other=0.0)
tmp1 = tl.full([1, 1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(rmask & xmask, tmp3, float('-inf'))
tmp6 = triton_helpers.max2(tmp5, 1)[:, None]
tmp7 = tmp2 - tmp6
tmp8 = tl_math.exp(tmp7)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.where(rmask & xmask, tmp9, 0)
tmp12 = tl.sum(tmp11, 1)[:, None]
tmp13 = tl_math.log(tmp12)
tmp14 = tmp7 - tmp13
tmp15 = tl_math.exp(tmp14)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp13, xmask)
tl.store(out_ptr1 + (r1 + 225 * x0), tmp15, rmask & xmask)
tl.store(out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_tanh_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = libdevice.tanh(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 225), (225, 1))
assert_size_stride(primals_2, (2000, 225), (225, 1))
assert_size_stride(primals_3, (2000,), (1,))
assert_size_stride(primals_4, (1000, 2000), (2000, 1))
assert_size_stride(primals_5, (1000,), (1,))
assert_size_stride(primals_6, (225, 1000), (1000, 1))
assert_size_stride(primals_7, (225,), (1,))
assert_size_stride(primals_8, (1, 1000), (1000, 1))
assert_size_stride(primals_9, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2000), (2016, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (225,
2000), (1, 225), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(8000)](buf1, primals_3, 8000, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 1000), (1000, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (2000, 1000),
(1, 2000), 0), out=buf2)
buf3 = buf2
del buf2
triton_poi_fused_relu_1[grid(4000)](buf3, primals_5, 4000, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 225), (225, 1), torch.float32)
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6,
(1000, 225), (1, 1000), 0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
buf6 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf7 = reinterpret_tensor(buf6, (4, 1), (1, 1), 0)
del buf6
buf8 = empty_strided_cuda((4, 225), (225, 1), torch.float32)
triton_per_fused__log_softmax_exp_relu_2[grid(4)](buf7, buf4, buf5,
buf8, 4, 225, XBLOCK=1, num_warps=2, num_stages=1)
buf9 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf3, reinterpret_tensor(primals_8, (1000, 1), (1,
1000), 0), out=buf9)
buf10 = buf9
del buf9
triton_poi_fused_tanh_3[grid(4)](buf10, primals_9, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_9
return (buf8, buf10, primals_1, buf1, buf3, buf4, buf5, buf7, buf8,
buf10, primals_8, primals_6, primals_4)
def create_all_possible_moves(m, n):
"""Create all moves on a (m,n) board."""
moves = []
for i in range(m):
for j in range(n):
moves.append((i, j))
return list(set(moves))
class MLPNew(nn.Module):
"""3-layer MLP for AlphaZero"""
def __init__(self, board_size=15, num_hidden1=2000, num_hidden2=1000):
super(MLPNew, self).__init__()
self.board_size = board_size
self.all_moves = create_all_possible_moves(board_size, board_size)
self.num_hidden1 = num_hidden1
self.num_hidden2 = num_hidden2
if len(self.all_moves) != self.board_size ** 2:
raise ValueError("moves and board don't match")
self.fc1 = nn.Linear(self.board_size ** 2, self.num_hidden1)
self.fc2 = nn.Linear(self.num_hidden1, self.num_hidden2)
self.logsoftmax = nn.LogSoftmax(dim=1)
self.fc3 = nn.Linear(self.num_hidden2, self.board_size ** 2)
self.fc4 = nn.Linear(self.num_hidden2, 1)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_8 = self.fc4.weight
primals_9 = self.fc4.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0], output[1]
|
CoAxLab/azad
|
MLP
| false
| 17,204
|
[
"MIT"
] | 6
|
d1498069dd8856e93ae077b34dd7c9f1c7ce80e6
|
https://github.com/CoAxLab/azad/tree/d1498069dd8856e93ae077b34dd7c9f1c7ce80e6
|
PatchEmbedding
|
import torch
import torch.nn as nn
class PatchEmbedding(nn.Module):
def __init__(self, image_size, patch_size, embed_dim, channels):
super().__init__()
self.image_size = image_size
if image_size[0] % patch_size != 0 or image_size[1] % patch_size != 0:
raise ValueError(
'image dimensions must be divisible by the patch size')
self.grid_size = image_size[0] // patch_size, image_size[1
] // patch_size
self.num_patches = self.grid_size[0] * self.grid_size[1]
self.patch_size = patch_size
self.proj = nn.Conv2d(channels, embed_dim, kernel_size=patch_size,
stride=patch_size)
def forward(self, im):
_B, _C, _H, _W = im.shape
x = self.proj(im).flatten(2).transpose(1, 2)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'image_size': [4, 4], 'patch_size': 4, 'embed_dim': 4,
'channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(4,
4), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1))
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 16, 16), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16)](buf1, primals_3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
return reinterpret_tensor(buf1, (4, 1, 4), (4, 1, 1), 0
), primals_1, primals_2
class PatchEmbeddingNew(nn.Module):
def __init__(self, image_size, patch_size, embed_dim, channels):
super().__init__()
self.image_size = image_size
if image_size[0] % patch_size != 0 or image_size[1] % patch_size != 0:
raise ValueError(
'image dimensions must be divisible by the patch size')
self.grid_size = image_size[0] // patch_size, image_size[1
] // patch_size
self.num_patches = self.grid_size[0] * self.grid_size[1]
self.patch_size = patch_size
self.proj = nn.Conv2d(channels, embed_dim, kernel_size=patch_size,
stride=patch_size)
def forward(self, input_0):
primals_1 = self.proj.weight
primals_3 = self.proj.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Curli-quan/fewshot-select
|
PatchEmbedding
| false
| 17,205
|
[
"Apache-2.0"
] | 7
|
34f8ce5069ed1fbd01c1fa73a3ef264c98dadafe
|
https://github.com/Curli-quan/fewshot-select/tree/34f8ce5069ed1fbd01c1fa73a3ef264c98dadafe
|
h_swish
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class h_swish(nn.Module):
def __init__(self, inplace=True):
super(h_swish, self).__init__()
self.inplace = inplace
def forward(self, x):
out = F.relu6(x + 3.0, inplace=self.inplace) / 6.0
return out * x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_hardtanh_mul_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 3.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = 0.16666666666666666
tmp8 = tmp6 * tmp7
tmp9 = tmp8 * tmp0
tl.store(out_ptr0 + x0, tmp9, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_hardtanh_mul_0[grid(256)](arg0_1, buf0,
256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class h_swishNew(nn.Module):
def __init__(self, inplace=True):
super(h_swishNew, self).__init__()
self.inplace = inplace
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
DandelionLau/NetworkCollections
|
h_swish
| false
| 17,206
|
[
"Apache-2.0"
] | 8
|
29e5cd2091f7085b3241209ed9447f2baadbce41
|
https://github.com/DandelionLau/NetworkCollections/tree/29e5cd2091f7085b3241209ed9447f2baadbce41
|
Net1
|
import torch
import torch.nn.functional as F
from torch import nn
class Net1(nn.Module):
def __init__(self):
super(Net1, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=0)
self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=0)
self.pool2 = nn.MaxPool2d(kernel_size=3, stride=2)
def forward(self, x):
x = self.pool1(F.relu(self.conv1(x)))
x = self.pool2(F.relu(self.conv2(x)))
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 984064
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3844 % 64
x0 = xindex % 3844
x4 = xindex // 3844
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + 3872 * x4), tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 230400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 30
x1 = xindex // 30 % 30
x2 = xindex // 900
x3 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 124 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 124 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 2 * x0 + 124 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (62 + 2 * x0 + 124 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (63 + 2 * x0 + 124 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (64 + 2 * x0 + 124 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (124 + 2 * x0 + 124 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (125 + 2 * x0 + 124 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (126 + 2 * x0 + 124 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp17 = tmp1 > tmp0
tmp18 = tl.full([1], 1, tl.int8)
tmp19 = tl.full([1], 0, tl.int8)
tmp20 = tl.where(tmp17, tmp18, tmp19)
tmp21 = tmp3 > tmp2
tmp22 = tl.full([1], 2, tl.int8)
tmp23 = tl.where(tmp21, tmp22, tmp20)
tmp24 = tmp5 > tmp4
tmp25 = tl.full([1], 3, tl.int8)
tmp26 = tl.where(tmp24, tmp25, tmp23)
tmp27 = tmp7 > tmp6
tmp28 = tl.full([1], 4, tl.int8)
tmp29 = tl.where(tmp27, tmp28, tmp26)
tmp30 = tmp9 > tmp8
tmp31 = tl.full([1], 5, tl.int8)
tmp32 = tl.where(tmp30, tmp31, tmp29)
tmp33 = tmp11 > tmp10
tmp34 = tl.full([1], 6, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp13 > tmp12
tmp37 = tl.full([1], 7, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tmp15 > tmp14
tmp40 = tl.full([1], 8, tl.int8)
tmp41 = tl.where(tmp39, tmp40, tmp38)
tl.store(out_ptr0 + x3, tmp16, xmask)
tl.store(out_ptr1 + x3, tmp41, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 784 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 43264
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 13
x1 = xindex // 13 % 13
x2 = xindex // 169
x5 = xindex
x4 = xindex // 10816
x6 = xindex % 10816
tmp0 = tl.load(in_ptr0 + (2 * x0 + 56 * x1 + 784 * x2), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 56 * x1 + 784 * x2), xmask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 2 * x0 + 56 * x1 + 784 * x2), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (28 + 2 * x0 + 56 * x1 + 784 * x2), xmask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (29 + 2 * x0 + 56 * x1 + 784 * x2), xmask,
eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (30 + 2 * x0 + 56 * x1 + 784 * x2), xmask,
eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (56 + 2 * x0 + 56 * x1 + 784 * x2), xmask,
eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (57 + 2 * x0 + 56 * x1 + 784 * x2), xmask,
eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (58 + 2 * x0 + 56 * x1 + 784 * x2), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp17 = tmp1 > tmp0
tmp18 = tl.full([1], 1, tl.int8)
tmp19 = tl.full([1], 0, tl.int8)
tmp20 = tl.where(tmp17, tmp18, tmp19)
tmp21 = tmp3 > tmp2
tmp22 = tl.full([1], 2, tl.int8)
tmp23 = tl.where(tmp21, tmp22, tmp20)
tmp24 = tmp5 > tmp4
tmp25 = tl.full([1], 3, tl.int8)
tmp26 = tl.where(tmp24, tmp25, tmp23)
tmp27 = tmp7 > tmp6
tmp28 = tl.full([1], 4, tl.int8)
tmp29 = tl.where(tmp27, tmp28, tmp26)
tmp30 = tmp9 > tmp8
tmp31 = tl.full([1], 5, tl.int8)
tmp32 = tl.where(tmp30, tmp31, tmp29)
tmp33 = tmp11 > tmp10
tmp34 = tl.full([1], 6, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp13 > tmp12
tmp37 = tl.full([1], 7, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tmp15 > tmp14
tmp40 = tl.full([1], 8, tl.int8)
tmp41 = tl.where(tmp39, tmp40, tmp38)
tl.store(out_ptr0 + x5, tmp16, xmask)
tl.store(out_ptr1 + (x6 + 10880 * x4), tmp41, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 62, 62), (246016, 3844, 62, 1))
buf1 = empty_strided_cuda((4, 64, 62, 62), (247808, 3872, 62, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(984064)](buf0, primals_2,
buf1, 984064, XBLOCK=1024, num_warps=4, num_stages=1)
del buf0
del primals_2
buf2 = empty_strided_cuda((4, 64, 30, 30), (57600, 900, 30, 1),
torch.float32)
buf3 = empty_strided_cuda((4, 64, 30, 30), (57600, 900, 30, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(230400)](buf1, buf2,
buf3, 230400, XBLOCK=512, num_warps=8, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 64, 28, 28), (50176, 784, 28, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(200704)](buf5, primals_5,
200704, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 64, 13, 13), (10816, 169, 13, 1),
torch.float32)
buf7 = empty_strided_cuda((4, 64, 13, 13), (10880, 169, 13, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_3[grid(43264)](buf5, buf6,
buf7, 43264, XBLOCK=256, num_warps=4, num_stages=1)
return buf6, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf7
class Net1New(nn.Module):
def __init__(self):
super(Net1New, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=0)
self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=0)
self.pool2 = nn.MaxPool2d(kernel_size=3, stride=2)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
DPBayes/DP-cross-silo-federated-learning
|
Net1
| false
| 17,207
|
[
"Apache-2.0"
] | 8
|
6707db703de5fae48c06116ae8ceee0685c9615d
|
https://github.com/DPBayes/DP-cross-silo-federated-learning/tree/6707db703de5fae48c06116ae8ceee0685c9615d
|
Upsample
|
import torch
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
import torch._utils
class Upsample(nn.Module):
def __init__(self, stride=2):
super(Upsample, self).__init__()
self.stride = stride
def forward(self, x):
stride = self.stride
assert x.data.dim() == 4
B = x.data.size(0)
C = x.data.size(1)
H = x.data.size(2)
W = x.data.size(3)
x = x.view(B, C, H, 1, W, 1).expand(B, C, H, stride, W, stride
).contiguous().view(B, C, H * stride, W * stride)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
import torch._utils
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 2 % 4
x3 = xindex // 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1 + 4 * x3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + x4, tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 2, 4, 2), (256, 64, 16, 8, 2, 1
), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(1024)](arg0_1, buf0, 1024, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 4, 8, 8), (256, 64, 8, 1), 0),
class UpsampleNew(nn.Module):
def __init__(self, stride=2):
super(UpsampleNew, self).__init__()
self.stride = stride
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
DatatangAILAB/SuanFaShiXun04
|
Upsample
| false
| 17,209
|
[
"Apache-2.0"
] | 5
|
f478e40dd84240ac71cbb54e6bacf9ff556fbb3e
|
https://github.com/DatatangAILAB/SuanFaShiXun04/tree/f478e40dd84240ac71cbb54e6bacf9ff556fbb3e
|
PatchEmbed
|
import torch
import torch.nn as nn
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
num_patches = img_size // patch_size * (img_size // patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size,
stride=patch_size)
def forward(self, x):
_B, _C, _H, _W = x.shape
x = self.proj(x).flatten(2).transpose(1, 2)
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 2304
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 256 * y3), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 768 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_2(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 768
y1 = yindex // 768
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 768 * x2 + 12288 * y1), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_2, (768, 3, 16, 16), (768, 256, 16, 1))
assert_size_stride(primals_3, (768,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch
.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(12, 4096)](primals_1, buf0, 12, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((768, 3, 16, 16), (768, 1, 48, 3), torch.
float32)
triton_poi_fused_1[grid(2304, 256)](primals_2, buf1, 2304, 256,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf0, buf1, stride=(16, 16),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 768, 4, 4), (12288, 1, 3072, 768))
buf3 = empty_strided_cuda((4, 768, 4, 4), (12288, 16, 4, 1), torch.
float32)
triton_poi_fused_convolution_2[grid(3072, 16)](buf2, primals_3,
buf3, 3072, 16, XBLOCK=16, YBLOCK=32, num_warps=4, num_stages=1)
del buf2
del primals_3
return reinterpret_tensor(buf3, (4, 16, 768), (12288, 1, 16), 0
), buf0, buf1
class PatchEmbedNew(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
num_patches = img_size // patch_size * (img_size // patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size,
stride=patch_size)
def forward(self, input_0):
primals_2 = self.proj.weight
primals_3 = self.proj.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Curli-quan/fewshot-select
|
PatchEmbed
| false
| 17,210
|
[
"Apache-2.0"
] | 7
|
34f8ce5069ed1fbd01c1fa73a3ef264c98dadafe
|
https://github.com/Curli-quan/fewshot-select/tree/34f8ce5069ed1fbd01c1fa73a3ef264c98dadafe
|
ExtResNetBlock
|
import torch
from torch.nn import functional as F
import torch.nn as nn
def padding(im, patch_size, fill_value=0):
H, W = im.size(2), im.size(3)
pad_h, pad_w = 0, 0
if H % patch_size > 0:
pad_h = patch_size - H % patch_size
if W % patch_size > 0:
pad_w = patch_size - W % patch_size
im_padded = im
if pad_h > 0 or pad_w > 0:
im_padded = F.pad(im, (0, pad_w, 0, pad_h), value=fill_value)
return im_padded
def conv3d(in_channels, out_channels, kernel_size, bias, padding):
return nn.Conv3d(in_channels, out_channels, kernel_size, padding=
padding, bias=bias)
def create_conv(in_channels, out_channels, kernel_size, order, num_groups,
padding):
"""
Create a list of modules with together constitute a single conv layer with non-linearity
and optional batchnorm/groupnorm.
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
kernel_size(int or tuple): size of the convolving kernel
order (string): order of things, e.g.
'cr' -> conv + ReLU
'gcr' -> groupnorm + conv + ReLU
'cl' -> conv + LeakyReLU
'ce' -> conv + ELU
'bcr' -> batchnorm + conv + ReLU
num_groups (int): number of groups for the GroupNorm
padding (int or tuple): add zero-padding added to all three sides of the input
Return:
list of tuple (name, module)
"""
assert 'c' in order, 'Conv layer MUST be present'
assert order[0
] not in 'rle', 'Non-linearity cannot be the first operation in the layer'
modules = []
for i, char in enumerate(order):
if char == 'r':
modules.append(('ReLU', nn.ReLU(inplace=True)))
elif char == 'l':
modules.append(('LeakyReLU', nn.LeakyReLU(inplace=True)))
elif char == 'e':
modules.append(('ELU', nn.ELU(inplace=True)))
elif char == 'c':
bias = not ('g' in order or 'b' in order)
modules.append(('conv', conv3d(in_channels, out_channels,
kernel_size, bias, padding=padding)))
elif char == 'g':
is_before_conv = i < order.index('c')
if is_before_conv:
num_channels = in_channels
else:
num_channels = out_channels
if num_channels < num_groups:
num_groups = 1
assert num_channels % num_groups == 0, f'Expected number of channels in input to be divisible by num_groups. num_channels={num_channels}, num_groups={num_groups}'
modules.append(('groupnorm', nn.GroupNorm(num_groups=num_groups,
num_channels=num_channels)))
elif char == 'b':
is_before_conv = i < order.index('c')
if is_before_conv:
modules.append(('batchnorm', nn.BatchNorm3d(in_channels)))
else:
modules.append(('batchnorm', nn.BatchNorm3d(out_channels)))
else:
raise ValueError(
f"Unsupported layer type '{char}'. MUST be one of ['b', 'g', 'r', 'l', 'e', 'c']"
)
return modules
class SingleConv(nn.Sequential):
"""
Basic convolutional module consisting of a Conv3d, non-linearity and optional batchnorm/groupnorm. The order
of operations can be specified via the `order` parameter
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
kernel_size (int or tuple): size of the convolving kernel
order (string): determines the order of layers, e.g.
'cr' -> conv + ReLU
'crg' -> conv + ReLU + groupnorm
'cl' -> conv + LeakyReLU
'ce' -> conv + ELU
num_groups (int): number of groups for the GroupNorm
padding (int or tuple):
"""
def __init__(self, in_channels, out_channels, kernel_size=3, order=
'gcr', num_groups=8, padding=1):
super(SingleConv, self).__init__()
for name, module in create_conv(in_channels, out_channels,
kernel_size, order, num_groups, padding=padding):
self.add_module(name, module)
class ExtResNetBlock(nn.Module):
"""
Basic UNet block consisting of a SingleConv followed by the residual block.
The SingleConv takes care of increasing/decreasing the number of channels and also ensures that the number
of output channels is compatible with the residual block that follows.
This block can be used instead of standard DoubleConv in the Encoder module.
Motivated by: https://arxiv.org/pdf/1706.00120.pdf
Notice we use ELU instead of ReLU (order='cge') and put non-linearity after the groupnorm.
"""
def __init__(self, in_channels, out_channels, kernel_size=3, order=
'cge', num_groups=8, **kwargs):
super(ExtResNetBlock, self).__init__()
self.conv1 = SingleConv(in_channels, out_channels, kernel_size=
kernel_size, order=order, num_groups=num_groups)
self.conv2 = SingleConv(out_channels, out_channels, kernel_size=
kernel_size, order=order, num_groups=num_groups)
n_order = order
for c in 'rel':
n_order = n_order.replace(c, '')
self.conv3 = SingleConv(out_channels, out_channels, kernel_size=
kernel_size, order=n_order, num_groups=num_groups)
if 'l' in order:
self.non_linearity = nn.LeakyReLU(negative_slope=0.1, inplace=True)
elif 'e' in order:
self.non_linearity = nn.ELU(inplace=True)
else:
self.non_linearity = nn.ReLU(inplace=True)
def forward(self, x):
out = self.conv1(x)
residual = out
out = self.conv2(out)
out = self.conv3(out)
out += residual
out = self.non_linearity(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch.nn import functional as F
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_elu_native_group_norm_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r3 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 64.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = 0.0
tmp29 = tmp27 > tmp28
tmp30 = 1.0
tmp31 = tmp27 * tmp30
tmp32 = libdevice.expm1(tmp31)
tmp33 = tmp32 * tmp30
tmp34 = tl.where(tmp29, tmp31, tmp33)
tl.store(in_out_ptr0 + (r1 + 64 * x0), tmp34, xmask)
tl.store(out_ptr2 + x0, tmp22, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_per_fused_add_elu_native_group_norm_1(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r3 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr3 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 64.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp29 = tmp27 + tmp28
tmp30 = 0.0
tmp31 = tmp29 > tmp30
tmp32 = 1.0
tmp33 = tmp29 * tmp32
tmp34 = libdevice.expm1(tmp33)
tmp35 = tmp34 * tmp32
tmp36 = tl.where(tmp31, tmp33, tmp35)
tl.store(in_out_ptr0 + (r1 + 64 * x0), tmp36, xmask)
tl.store(out_ptr2 + x0, tmp22, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_2, (1,
4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1,
1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf6 = buf4
del buf4
buf5 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
get_raw_stream(0)
triton_per_fused_elu_native_group_norm_0[grid(4)](buf6, buf0,
primals_3, primals_4, buf1, buf5, 4, 64, XBLOCK=1, num_warps=2,
num_stages=1)
del primals_4
buf7 = extern_kernels.convolution(reinterpret_tensor(buf6, (1, 4, 4,
4, 4), (256, 64, 16, 4, 1), 0), primals_5, stride=(1, 1, 1),
padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf7, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
buf8 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf13 = buf11
del buf11
buf12 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
triton_per_fused_elu_native_group_norm_0[grid(4)](buf13, buf7,
primals_6, primals_7, buf8, buf12, 4, 64, XBLOCK=1, num_warps=2,
num_stages=1)
del primals_7
buf14 = extern_kernels.convolution(reinterpret_tensor(buf13, (1, 4,
4, 4, 4), (256, 64, 16, 4, 1), 0), primals_8, stride=(1, 1, 1),
padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf14, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
buf15 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf19 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf20 = buf19
del buf19
buf18 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
triton_per_fused_add_elu_native_group_norm_1[grid(4)](buf20, buf14,
primals_9, primals_10, buf6, buf15, buf18, 4, 64, XBLOCK=1,
num_warps=2, num_stages=1)
del primals_10
return (buf20, primals_1, primals_3, primals_5, primals_6, primals_8,
primals_9, reinterpret_tensor(primals_2, (1, 4, 4, 4, 4), (256, 64,
16, 4, 1), 0), buf0, reinterpret_tensor(buf1, (4, 1), (1, 1), 0),
reinterpret_tensor(buf5, (4, 1), (1, 1), 0), buf6, buf7,
reinterpret_tensor(buf8, (4, 1), (1, 1), 0), reinterpret_tensor(
buf12, (4, 1), (1, 1), 0), buf13, buf14, reinterpret_tensor(buf15,
(4, 1), (1, 1), 0), reinterpret_tensor(buf18, (4, 1), (1, 1), 0), buf20
)
def padding(im, patch_size, fill_value=0):
H, W = im.size(2), im.size(3)
pad_h, pad_w = 0, 0
if H % patch_size > 0:
pad_h = patch_size - H % patch_size
if W % patch_size > 0:
pad_w = patch_size - W % patch_size
im_padded = im
if pad_h > 0 or pad_w > 0:
im_padded = F.pad(im, (0, pad_w, 0, pad_h), value=fill_value)
return im_padded
def conv3d(in_channels, out_channels, kernel_size, bias, padding):
return nn.Conv3d(in_channels, out_channels, kernel_size, padding=
padding, bias=bias)
def create_conv(in_channels, out_channels, kernel_size, order, num_groups,
padding):
"""
Create a list of modules with together constitute a single conv layer with non-linearity
and optional batchnorm/groupnorm.
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
kernel_size(int or tuple): size of the convolving kernel
order (string): order of things, e.g.
'cr' -> conv + ReLU
'gcr' -> groupnorm + conv + ReLU
'cl' -> conv + LeakyReLU
'ce' -> conv + ELU
'bcr' -> batchnorm + conv + ReLU
num_groups (int): number of groups for the GroupNorm
padding (int or tuple): add zero-padding added to all three sides of the input
Return:
list of tuple (name, module)
"""
assert 'c' in order, 'Conv layer MUST be present'
assert order[0
] not in 'rle', 'Non-linearity cannot be the first operation in the layer'
modules = []
for i, char in enumerate(order):
if char == 'r':
modules.append(('ReLU', nn.ReLU(inplace=True)))
elif char == 'l':
modules.append(('LeakyReLU', nn.LeakyReLU(inplace=True)))
elif char == 'e':
modules.append(('ELU', nn.ELU(inplace=True)))
elif char == 'c':
bias = not ('g' in order or 'b' in order)
modules.append(('conv', conv3d(in_channels, out_channels,
kernel_size, bias, padding=padding)))
elif char == 'g':
is_before_conv = i < order.index('c')
if is_before_conv:
num_channels = in_channels
else:
num_channels = out_channels
if num_channels < num_groups:
num_groups = 1
assert num_channels % num_groups == 0, f'Expected number of channels in input to be divisible by num_groups. num_channels={num_channels}, num_groups={num_groups}'
modules.append(('groupnorm', nn.GroupNorm(num_groups=num_groups,
num_channels=num_channels)))
elif char == 'b':
is_before_conv = i < order.index('c')
if is_before_conv:
modules.append(('batchnorm', nn.BatchNorm3d(in_channels)))
else:
modules.append(('batchnorm', nn.BatchNorm3d(out_channels)))
else:
raise ValueError(
f"Unsupported layer type '{char}'. MUST be one of ['b', 'g', 'r', 'l', 'e', 'c']"
)
return modules
class SingleConv(nn.Sequential):
"""
Basic convolutional module consisting of a Conv3d, non-linearity and optional batchnorm/groupnorm. The order
of operations can be specified via the `order` parameter
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
kernel_size (int or tuple): size of the convolving kernel
order (string): determines the order of layers, e.g.
'cr' -> conv + ReLU
'crg' -> conv + ReLU + groupnorm
'cl' -> conv + LeakyReLU
'ce' -> conv + ELU
num_groups (int): number of groups for the GroupNorm
padding (int or tuple):
"""
def __init__(self, in_channels, out_channels, kernel_size=3, order=
'gcr', num_groups=8, padding=1):
super(SingleConv, self).__init__()
for name, module in create_conv(in_channels, out_channels,
kernel_size, order, num_groups, padding=padding):
self.add_module(name, module)
class ExtResNetBlockNew(nn.Module):
"""
Basic UNet block consisting of a SingleConv followed by the residual block.
The SingleConv takes care of increasing/decreasing the number of channels and also ensures that the number
of output channels is compatible with the residual block that follows.
This block can be used instead of standard DoubleConv in the Encoder module.
Motivated by: https://arxiv.org/pdf/1706.00120.pdf
Notice we use ELU instead of ReLU (order='cge') and put non-linearity after the groupnorm.
"""
def __init__(self, in_channels, out_channels, kernel_size=3, order=
'cge', num_groups=8, **kwargs):
super(ExtResNetBlockNew, self).__init__()
self.conv1 = SingleConv(in_channels, out_channels, kernel_size=
kernel_size, order=order, num_groups=num_groups)
self.conv2 = SingleConv(out_channels, out_channels, kernel_size=
kernel_size, order=order, num_groups=num_groups)
n_order = order
for c in 'rel':
n_order = n_order.replace(c, '')
self.conv3 = SingleConv(out_channels, out_channels, kernel_size=
kernel_size, order=n_order, num_groups=num_groups)
if 'l' in order:
self.non_linearity = nn.LeakyReLU(negative_slope=0.1, inplace=True)
elif 'e' in order:
self.non_linearity = nn.ELU(inplace=True)
else:
self.non_linearity = nn.ReLU(inplace=True)
def forward(self, input_0):
primals_1 = self.conv1.conv.weight
primals_3 = self.conv1.groupnorm.weight
primals_4 = self.conv1.groupnorm.bias
primals_5 = self.conv2.conv.weight
primals_6 = self.conv2.groupnorm.weight
primals_7 = self.conv2.groupnorm.bias
primals_8 = self.conv3.conv.weight
primals_9 = self.conv3.groupnorm.weight
primals_10 = self.conv3.groupnorm.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
|
Curli-quan/fewshot-select
|
ExtResNetBlock
| false
| 17,211
|
[
"Apache-2.0"
] | 7
|
34f8ce5069ed1fbd01c1fa73a3ef264c98dadafe
|
https://github.com/Curli-quan/fewshot-select/tree/34f8ce5069ed1fbd01c1fa73a3ef264c98dadafe
|
HardSwish
|
import torch
import torch.nn as nn
class HardSwish(nn.Module):
def __init__(self, inplace=False):
super(HardSwish, self).__init__()
self.act = nn.ReLU6(inplace)
"""forward"""
def forward(self, x):
return x * self.act(x + 3) / 6
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_hardtanh_mul_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 3.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = tmp0 * tmp6
tmp8 = 0.16666666666666666
tmp9 = tmp7 * tmp8
tl.store(out_ptr0 + x0, tmp9, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_hardtanh_mul_0[grid(256)](arg0_1, buf0,
256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class HardSwishNew(nn.Module):
def __init__(self, inplace=False):
super(HardSwishNew, self).__init__()
self.act = nn.ReLU6(inplace)
"""forward"""
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
DetectionBLWX/WSDDN.pytorch
|
HardSwish
| false
| 17,212
|
[
"MIT"
] | 7
|
05020d9d0445af90ba0af3f095aa12b18e3da7d2
|
https://github.com/DetectionBLWX/WSDDN.pytorch/tree/05020d9d0445af90ba0af3f095aa12b18e3da7d2
|
ReverseMaskConv
|
import math
import torch
from torch import nn
from torch.nn.parameter import Parameter
def weights_init(init_type='gaussian'):
def init_fun(m):
classname = m.__class__.__name__
if (classname.find('Conv') == 0 or classname.find('Linear') == 0
) and hasattr(m, 'weight'):
if init_type == 'gaussian':
nn.init.normal_(m.weight, 0.0, 0.02)
elif init_type == 'xavier':
nn.init.xavier_normal_(m.weight, gain=math.sqrt(2))
elif init_type == 'kaiming':
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
elif init_type == 'orthogonal':
nn.init.orthogonal_(m.weight, gain=math.sqrt(2))
elif init_type == 'default':
pass
else:
assert 0, 'Unsupported initialization: {}'.format(init_type)
if hasattr(m, 'bias') and m.bias is not None:
nn.init.constant_(m.bias, 0.0)
return init_fun
class GaussActivation(nn.Module):
def __init__(self, a, mu, sigma1, sigma2):
super(GaussActivation, self).__init__()
self.a = Parameter(torch.tensor(a, dtype=torch.float32))
self.mu = Parameter(torch.tensor(mu, dtype=torch.float32))
self.sigma1 = Parameter(torch.tensor(sigma1, dtype=torch.float32))
self.sigma2 = Parameter(torch.tensor(sigma2, dtype=torch.float32))
def forward(self, inputFeatures):
self.a.data = torch.clamp(self.a.data, 1.01, 6.0)
self.mu.data = torch.clamp(self.mu.data, 0.1, 3.0)
self.sigma1.data = torch.clamp(self.sigma1.data, 1.0, 2.0)
self.sigma2.data = torch.clamp(self.sigma2.data, 1.0, 2.0)
lowerThanMu = inputFeatures < self.mu
largerThanMu = inputFeatures >= self.mu
leftValuesActiv = self.a * torch.exp(-self.sigma1 * (inputFeatures -
self.mu) ** 2)
leftValuesActiv.masked_fill_(largerThanMu, 0.0)
rightValueActiv = 1 + (self.a - 1) * torch.exp(-self.sigma2 * (
inputFeatures - self.mu) ** 2)
rightValueActiv.masked_fill_(lowerThanMu, 0.0)
output = leftValuesActiv + rightValueActiv
return output
class MaskUpdate(nn.Module):
def __init__(self, alpha):
super(MaskUpdate, self).__init__()
self.updateFunc = nn.ReLU(False)
self.alpha = Parameter(torch.tensor(alpha, dtype=torch.float32))
def forward(self, inputMaskMap):
self.alpha.data = torch.clamp(self.alpha.data, 0.6, 0.8)
return torch.pow(self.updateFunc(inputMaskMap), self.alpha)
class ReverseMaskConv(nn.Module):
def __init__(self, inputChannels, outputChannels, kernelSize=4, stride=
2, padding=1, dilation=1, groups=1, convBias=False):
super(ReverseMaskConv, self).__init__()
self.reverseMaskConv = nn.Conv2d(inputChannels, outputChannels,
kernelSize, stride, padding, dilation, groups, bias=convBias)
self.reverseMaskConv.apply(weights_init())
self.activationFuncG_A = GaussActivation(1.1, 2.0, 1.0, 1.0)
self.updateMask = MaskUpdate(0.8)
def forward(self, inputMasks):
maskFeatures = self.reverseMaskConv(inputMasks)
maskActiv = self.activationFuncG_A(maskFeatures)
maskUpdate = self.updateMask(maskFeatures)
return maskActiv, maskUpdate
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'inputChannels': 4, 'outputChannels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
from torch import nn
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clamp_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = 1.01
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = 6.0
tmp5 = triton_helpers.minimum(tmp3, tmp4)
tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp5, None)
@triton.jit
def triton_poi_fused_clamp_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = 0.1
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = 3.0
tmp5 = triton_helpers.minimum(tmp3, tmp4)
tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp5, None)
@triton.jit
def triton_poi_fused_clamp_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = 1.0
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = 2.0
tmp5 = triton_helpers.minimum(tmp3, tmp4)
tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp5, None)
@triton.jit
def triton_poi_fused_clamp_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = 0.6
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = 0.8
tmp5 = triton_helpers.minimum(tmp3, tmp4)
tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp5, None)
@triton.jit
def triton_poi_fused_add_exp_ge_lt_masked_fill_mul_neg_pow_relu_sub_4(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1,
out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp5 = tl.load(in_ptr2 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp7 = tl.load(in_ptr3 + 0)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp19 = tl.load(in_ptr4 + 0)
tmp20 = tl.broadcast_to(tmp19, [XBLOCK])
tmp30 = tl.load(in_ptr5 + 0)
tmp31 = tl.broadcast_to(tmp30, [XBLOCK])
tmp3 = tmp0 < tmp2
tmp4 = tmp0 >= tmp2
tmp9 = -tmp8
tmp10 = tmp0 - tmp2
tmp11 = tmp10 * tmp10
tmp12 = tmp9 * tmp11
tmp13 = tl_math.exp(tmp12)
tmp14 = tmp6 * tmp13
tmp15 = 0.0
tmp16 = tl.where(tmp4, tmp15, tmp14)
tmp17 = 1.0
tmp18 = tmp6 - tmp17
tmp21 = -tmp20
tmp22 = tmp21 * tmp11
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp18 * tmp23
tmp25 = tmp24 + tmp17
tmp26 = tl.where(tmp3, tmp15, tmp25)
tmp27 = tmp16 + tmp26
tmp28 = tl.full([1], 0, tl.int32)
tmp29 = triton_helpers.maximum(tmp28, tmp0)
tmp32 = libdevice.pow(tmp29, tmp31)
tl.store(out_ptr0 + x0, tmp3, xmask)
tl.store(out_ptr1 + x0, tmp4, xmask)
tl.store(out_ptr2 + x0, tmp27, xmask)
tl.store(out_ptr3 + x0, tmp32, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (), ())
assert_size_stride(primals_4, (), ())
assert_size_stride(primals_5, (), ())
assert_size_stride(primals_6, (), ())
assert_size_stride(primals_7, (), ())
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(2,
2), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 2, 2), (16, 4, 2, 1))
buf1 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_0[grid(1)](primals_3, buf1, 1, XBLOCK=1,
num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((), (), torch.float32)
triton_poi_fused_clamp_1[grid(1)](primals_4, buf2, 1, XBLOCK=1,
num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((), (), torch.float32)
triton_poi_fused_clamp_2[grid(1)](primals_5, buf3, 1, XBLOCK=1,
num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((), (), torch.float32)
triton_poi_fused_clamp_2[grid(1)](primals_6, buf4, 1, XBLOCK=1,
num_warps=1, num_stages=1)
buf8 = empty_strided_cuda((), (), torch.float32)
triton_poi_fused_clamp_3[grid(1)](primals_7, buf8, 1, XBLOCK=1,
num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.bool)
buf6 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.bool)
buf7 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
buf9 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
triton_poi_fused_add_exp_ge_lt_masked_fill_mul_neg_pow_relu_sub_4[grid
(64)](buf0, buf2, buf1, buf3, buf4, buf8, buf5, buf6, buf7,
buf9, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf10 = torch.ops.aten.set_.source_Tensor(primals_3, buf1)
assert_size_stride(buf10, (), ())
del primals_3
buf20 = torch.ops.aten.set_.source_Tensor(primals_4, buf2)
assert_size_stride(buf20, (), ())
del primals_4
buf30 = torch.ops.aten.set_.source_Tensor(primals_5, buf3)
assert_size_stride(buf30, (), ())
del primals_5
buf35 = torch.ops.aten.set_.source_Tensor(primals_6, buf4)
assert_size_stride(buf35, (), ())
del primals_6
buf40 = torch.ops.aten.set_.source_Tensor(primals_7, buf8)
assert_size_stride(buf40, (), ())
del primals_7
return (buf7, buf9, primals_1, primals_2, buf0, buf1, buf2, buf3, buf4,
buf5, buf6, buf8, buf9)
def weights_init(init_type='gaussian'):
def init_fun(m):
classname = m.__class__.__name__
if (classname.find('Conv') == 0 or classname.find('Linear') == 0
) and hasattr(m, 'weight'):
if init_type == 'gaussian':
nn.init.normal_(m.weight, 0.0, 0.02)
elif init_type == 'xavier':
nn.init.xavier_normal_(m.weight, gain=math.sqrt(2))
elif init_type == 'kaiming':
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
elif init_type == 'orthogonal':
nn.init.orthogonal_(m.weight, gain=math.sqrt(2))
elif init_type == 'default':
pass
else:
assert 0, 'Unsupported initialization: {}'.format(init_type)
if hasattr(m, 'bias') and m.bias is not None:
nn.init.constant_(m.bias, 0.0)
return init_fun
class GaussActivation(nn.Module):
def __init__(self, a, mu, sigma1, sigma2):
super(GaussActivation, self).__init__()
self.a = Parameter(torch.tensor(a, dtype=torch.float32))
self.mu = Parameter(torch.tensor(mu, dtype=torch.float32))
self.sigma1 = Parameter(torch.tensor(sigma1, dtype=torch.float32))
self.sigma2 = Parameter(torch.tensor(sigma2, dtype=torch.float32))
def forward(self, inputFeatures):
self.a.data = torch.clamp(self.a.data, 1.01, 6.0)
self.mu.data = torch.clamp(self.mu.data, 0.1, 3.0)
self.sigma1.data = torch.clamp(self.sigma1.data, 1.0, 2.0)
self.sigma2.data = torch.clamp(self.sigma2.data, 1.0, 2.0)
lowerThanMu = inputFeatures < self.mu
largerThanMu = inputFeatures >= self.mu
leftValuesActiv = self.a * torch.exp(-self.sigma1 * (inputFeatures -
self.mu) ** 2)
leftValuesActiv.masked_fill_(largerThanMu, 0.0)
rightValueActiv = 1 + (self.a - 1) * torch.exp(-self.sigma2 * (
inputFeatures - self.mu) ** 2)
rightValueActiv.masked_fill_(lowerThanMu, 0.0)
output = leftValuesActiv + rightValueActiv
return output
class MaskUpdate(nn.Module):
def __init__(self, alpha):
super(MaskUpdate, self).__init__()
self.updateFunc = nn.ReLU(False)
self.alpha = Parameter(torch.tensor(alpha, dtype=torch.float32))
def forward(self, inputMaskMap):
self.alpha.data = torch.clamp(self.alpha.data, 0.6, 0.8)
return torch.pow(self.updateFunc(inputMaskMap), self.alpha)
class ReverseMaskConvNew(nn.Module):
def __init__(self, inputChannels, outputChannels, kernelSize=4, stride=
2, padding=1, dilation=1, groups=1, convBias=False):
super(ReverseMaskConvNew, self).__init__()
self.reverseMaskConv = nn.Conv2d(inputChannels, outputChannels,
kernelSize, stride, padding, dilation, groups, bias=convBias)
self.reverseMaskConv.apply(weights_init())
self.activationFuncG_A = GaussActivation(1.1, 2.0, 1.0, 1.0)
self.updateMask = MaskUpdate(0.8)
def forward(self, input_0):
primals_1 = self.reverseMaskConv.weight
primals_3 = self.activationFuncG_A.a
primals_4 = self.activationFuncG_A.mu
primals_5 = self.activationFuncG_A.sigma1
primals_6 = self.activationFuncG_A.sigma2
primals_7 = self.updateMask.alpha
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0], output[1]
|
DLwbm123/LBAM_inpainting
|
ReverseMaskConv
| false
| 17,213
|
[
"MIT"
] | 7
|
c809c3cedf09cda7c175e930c7834ac39d8f526f
|
https://github.com/DLwbm123/LBAM_inpainting/tree/c809c3cedf09cda7c175e930c7834ac39d8f526f
|
ReOrgLayer
|
import torch
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
import torch._utils
class ReOrgLayer(nn.Module):
def __init__(self, stride=2):
super(ReOrgLayer, self).__init__()
self.stride = stride
def forward(self, x):
assert x.data.dim() == 4
B, C, H, W = x.data.shape
hs = self.stride
ws = self.stride
assert H % hs == 0, 'The stride ' + str(self.stride
) + ' is not a proper divisor of height ' + str(H)
assert W % ws == 0, 'The stride ' + str(self.stride
) + ' is not a proper divisor of height ' + str(W)
x = x.view(B, C, H // hs, hs, W // ws, ws).transpose(-2, -3
).contiguous()
x = x.view(B, C, H // hs * W // ws, hs, ws)
x = x.view(B, C, H // hs * W // ws, hs * ws).transpose(-1, -2
).contiguous()
x = x.view(B, C, ws * hs, H // ws, W // ws).transpose(1, 2).contiguous(
)
x = x.view(B, C * ws * hs, H // ws, W // ws)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
import torch._utils
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex % 2
x3 = xindex // 2
y0 = yindex % 4
y1 = yindex // 4
x5 = xindex
y4 = yindex
tmp0 = tl.load(in_ptr0 + (2 * x2 + 4 * (y0 // 2) + 8 * x3 + 64 * y1 +
y0 % 2), xmask & ymask)
tl.store(out_ptr0 + (x5 + 16 * y4), tmp0, xmask & ymask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 2, 2), (64, 16, 4, 2, 1), torch
.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(16, 16)](arg0_1, buf0, 16, 16, XBLOCK
=16, YBLOCK=16, num_warps=4, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 16, 2, 2), (64, 4, 2, 1), 0),
class ReOrgLayerNew(nn.Module):
def __init__(self, stride=2):
super(ReOrgLayerNew, self).__init__()
self.stride = stride
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
DatatangAILAB/SuanFaShiXun04
|
ReOrgLayer
| false
| 17,214
|
[
"Apache-2.0"
] | 5
|
f478e40dd84240ac71cbb54e6bacf9ff556fbb3e
|
https://github.com/DatatangAILAB/SuanFaShiXun04/tree/f478e40dd84240ac71cbb54e6bacf9ff556fbb3e
|
Net
|
import torch
import torch.nn.functional as F
import torch.nn as nn
class Net(nn.Module):
def __init__(self, x_d, w_d, out_d, hidden_d1=256, hidden_d2=512,
hidden_d3=256, is_discrete_input=False, is_discrete_output=False,
embedding_dim=None):
super().__init__()
self._x_d = x_d
self._out_d = out_d
self.is_discrete_input = is_discrete_input
self.is_discrete_output = is_discrete_output
if is_discrete_input:
assert x_d is not None, 'Please specify the dimension of the'
"""treatment vector."""
embedding_dim = x_d if embedding_dim is None else embedding_dim
self.embed = nn.Embedding(x_d, embedding_dim)
in_d = int(embedding_dim + w_d)
else:
self.embed = nn.Identity()
in_d = int(x_d + w_d)
self.fc1 = nn.Linear(in_d, hidden_d1)
self.fc2 = nn.Linear(hidden_d1, hidden_d2)
self.fc3 = nn.Linear(hidden_d2, hidden_d3)
self.fc4 = nn.Linear(hidden_d3, out_d)
def forward(self, x, w):
x = self.embed(x)
x = torch.cat((x, w), dim=1)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
x = F.relu(x)
output = self.fc4(x)
return output
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'x_d': 4, 'w_d': 4, 'out_d': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (256, 8), (8, 1))
assert_size_stride(primals_4, (256,), (1,))
assert_size_stride(primals_5, (512, 256), (256, 1))
assert_size_stride(primals_6, (512,), (1,))
assert_size_stride(primals_7, (256, 512), (512, 1))
assert_size_stride(primals_8, (256,), (1,))
assert_size_stride(primals_9, (4, 256), (256, 1))
assert_size_stride(primals_10, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 256), (1,
8), 0), out=buf1)
del primals_3
buf2 = buf1
del buf1
triton_poi_fused_relu_1[grid(1024)](buf2, primals_4, 1024, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 512), (512, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (256, 512), (
1, 256), 0), out=buf3)
buf4 = buf3
del buf3
triton_poi_fused_relu_2[grid(2048)](buf4, primals_6, 2048, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_6
buf5 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
extern_kernels.mm(buf4, reinterpret_tensor(primals_7, (512, 256), (
1, 512), 0), out=buf5)
buf6 = buf5
del buf5
triton_poi_fused_relu_1[grid(1024)](buf6, primals_8, 1024, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_8
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_10, buf6, reinterpret_tensor(primals_9,
(256, 4), (1, 256), 0), alpha=1, beta=1, out=buf7)
del primals_10
return buf7, buf0, buf2, buf4, buf6, primals_9, primals_7, primals_5
class NetNew(nn.Module):
def __init__(self, x_d, w_d, out_d, hidden_d1=256, hidden_d2=512,
hidden_d3=256, is_discrete_input=False, is_discrete_output=False,
embedding_dim=None):
super().__init__()
self._x_d = x_d
self._out_d = out_d
self.is_discrete_input = is_discrete_input
self.is_discrete_output = is_discrete_output
if is_discrete_input:
assert x_d is not None, 'Please specify the dimension of the'
"""treatment vector."""
embedding_dim = x_d if embedding_dim is None else embedding_dim
self.embed = nn.Embedding(x_d, embedding_dim)
in_d = int(embedding_dim + w_d)
else:
self.embed = nn.Identity()
in_d = int(x_d + w_d)
self.fc1 = nn.Linear(in_d, hidden_d1)
self.fc2 = nn.Linear(hidden_d1, hidden_d2)
self.fc3 = nn.Linear(hidden_d2, hidden_d3)
self.fc4 = nn.Linear(hidden_d3, out_d)
def forward(self, input_0, input_1):
primals_3 = self.fc1.weight
primals_4 = self.fc1.bias
primals_5 = self.fc2.weight
primals_6 = self.fc2.bias
primals_7 = self.fc3.weight
primals_8 = self.fc3.bias
primals_9 = self.fc4.weight
primals_10 = self.fc4.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
|
DataCanvasIO/YLearn
|
Net
| false
| 17,215
|
[
"Apache-2.0"
] | 3
|
d65b5afb83deed154c710de9096317165d95014a
|
https://github.com/DataCanvasIO/YLearn/tree/d65b5afb83deed154c710de9096317165d95014a
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.