entry_point
stringlengths
1
65
original_triton_python_code
stringlengths
208
619k
optimised_triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
listlengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
NaiveGroupNorm
from torch.nn import Module import torch from torch.nn import Parameter from torch.nn import init import torch.nn.parallel class NaiveGroupNorm(Module): """NaiveGroupNorm implements Group Normalization with the high-level matrix operations in PyTorch. It is a temporary solution to export GN by ONNX before the official GN can be exported by ONNX. The usage of NaiveGroupNorm is exactly the same as the official :class:`torch.nn.GroupNorm`. Args: num_groups (int): number of groups to separate the channels into num_channels (int): number of channels expected in input eps: a value added to the denominator for numerical stability. Default: 1e-5 affine: a boolean value that when set to ``True``, this module has learnable per-channel affine parameters initialized to ones (for weights) and zeros (for biases). Default: ``True``. Shape: - Input: :math:`(N, C, *)` where :math:`C=\\text{num\\_channels}` - Output: :math:`(N, C, *)` (same shape as input) Examples:: >>> input = torch.randn(20, 6, 10, 10) >>> # Separate 6 channels into 3 groups >>> m = NaiveGroupNorm(3, 6) >>> # Separate 6 channels into 6 groups (equivalent with InstanceNorm) >>> m = NaiveGroupNorm(6, 6) >>> # Put all 6 channels into a single group (equivalent with LayerNorm) >>> m = NaiveGroupNorm(1, 6) >>> # Activating the module >>> output = m(input) .. _`Group Normalization`: https://arxiv.org/abs/1803.08494 """ __constants__ = ['num_groups', 'num_channels', 'eps', 'affine', 'weight', 'bias'] def __init__(self, num_groups, num_channels, eps=1e-05, affine=True): super(NaiveGroupNorm, self).__init__() self.num_groups = num_groups self.num_channels = num_channels self.eps = eps self.affine = affine if self.affine: self.weight = Parameter(torch.Tensor(num_channels)) self.bias = Parameter(torch.Tensor(num_channels)) else: self.register_parameter('weight', None) self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): if self.affine: init.ones_(self.weight) init.zeros_(self.bias) def forward(self, input): N, C, H, W = input.size() assert C % self.num_groups == 0 input = input.reshape(N, self.num_groups, -1) mean = input.mean(dim=-1, keepdim=True) var = (input ** 2).mean(dim=-1, keepdim=True) - mean ** 2 std = torch.sqrt(var + self.eps) input = (input - mean) / std input = input.reshape(N, C, H, W) if self.affine: input = input * self.weight.reshape(1, C, 1, 1 ) + self.bias.reshape(1, C, 1, 1) return input def extra_repr(self): return ('{num_groups}, {num_channels}, eps={eps}, affine={affine}'. format(**self.__dict__)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_groups': 1, 'num_channels': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch.nn import Module from torch.nn import Parameter from torch.nn import init import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_mean_mul_pow_sqrt_sub_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex r3 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp20 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last') tmp22 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = tmp0 * tmp0 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = 64.0 tmp11 = tmp4 / tmp10 tmp12 = tmp9 / tmp10 tmp13 = tmp11 * tmp11 tmp14 = tmp12 - tmp13 tmp15 = 1e-05 tmp16 = tmp14 + tmp15 tmp17 = libdevice.sqrt(tmp16) tmp18 = tmp0 - tmp11 tmp19 = tmp18 / tmp17 tmp21 = tmp19 * tmp20 tmp23 = tmp21 + tmp22 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp11, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + x0, tmp17, xmask) tl.store(out_ptr0 + (r1 + 64 * x0), tmp23, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32) buf2 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 1, 1), (1, 1, 1), 0) del buf0 buf3 = reinterpret_tensor(buf2, (4, 1, 1), (1, 1, 1), 0) del buf2 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_add_mean_mul_pow_sqrt_sub_0[grid(4)](buf1, buf3, primals_1, primals_2, primals_3, buf4, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del primals_2 del primals_3 return buf4, primals_1, buf1, buf3 class NaiveGroupNormNew(Module): """NaiveGroupNorm implements Group Normalization with the high-level matrix operations in PyTorch. It is a temporary solution to export GN by ONNX before the official GN can be exported by ONNX. The usage of NaiveGroupNorm is exactly the same as the official :class:`torch.nn.GroupNorm`. Args: num_groups (int): number of groups to separate the channels into num_channels (int): number of channels expected in input eps: a value added to the denominator for numerical stability. Default: 1e-5 affine: a boolean value that when set to ``True``, this module has learnable per-channel affine parameters initialized to ones (for weights) and zeros (for biases). Default: ``True``. Shape: - Input: :math:`(N, C, *)` where :math:`C=\\text{num\\_channels}` - Output: :math:`(N, C, *)` (same shape as input) Examples:: >>> input = torch.randn(20, 6, 10, 10) >>> # Separate 6 channels into 3 groups >>> m = NaiveGroupNorm(3, 6) >>> # Separate 6 channels into 6 groups (equivalent with InstanceNorm) >>> m = NaiveGroupNorm(6, 6) >>> # Put all 6 channels into a single group (equivalent with LayerNorm) >>> m = NaiveGroupNorm(1, 6) >>> # Activating the module >>> output = m(input) .. _`Group Normalization`: https://arxiv.org/abs/1803.08494 """ __constants__ = ['num_groups', 'num_channels', 'eps', 'affine', 'weight', 'bias'] def __init__(self, num_groups, num_channels, eps=1e-05, affine=True): super(NaiveGroupNormNew, self).__init__() self.num_groups = num_groups self.num_channels = num_channels self.eps = eps self.affine = affine if self.affine: self.weight = Parameter(torch.Tensor(num_channels)) self.bias = Parameter(torch.Tensor(num_channels)) else: self.register_parameter('weight', None) self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): if self.affine: init.ones_(self.weight) init.zeros_(self.bias) def extra_repr(self): return ('{num_groups}, {num_channels}, eps={eps}, affine={affine}'. format(**self.__dict__)) def forward(self, input_0): primals_2 = self.weight primals_3 = self.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
hav4ik/AdelaiDet
NaiveGroupNorm
false
3,719
[ "BSD-2-Clause" ]
0
6ed9c1e1a25a3e25dddfa858ce0f219a30593ce2
https://github.com/hav4ik/AdelaiDet/tree/6ed9c1e1a25a3e25dddfa858ce0f219a30593ce2
UpsampleLayer
import torch import torch.nn as nn class UpsampleLayer(nn.Module): """ """ def __init__(self, scale_factor, mode='bilinear'): """ :param scale_factor: :param mode: """ super().__init__() self.scale_factor = scale_factor self.mode = mode def forward(self, x): """ :param x: :return: """ return nn.functional.interpolate(x, scale_factor=self.scale_factor, mode=self.mode, align_corners=True) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'scale_factor': 1.0}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0( in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x0 = xindex % 4 x2 = xindex // 16 x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tl.full([1], 1, tl.int64) tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 3, tl.int64) tmp10 = triton_helpers.minimum(tmp8, tmp9) tmp11 = x0 tmp12 = tmp11.to(tl.float32) tmp13 = tmp12 * tmp2 tmp14 = triton_helpers.maximum(tmp13, tmp4) tmp15 = tmp14.to(tl.int32) tmp16 = tl.load(in_ptr0 + (tmp15 + 4 * tmp10 + 16 * x2), xmask, eviction_policy='evict_last') tmp17 = tmp15 + tmp7 tmp18 = triton_helpers.minimum(tmp17, tmp9) tmp19 = tl.load(in_ptr0 + (tmp18 + 4 * tmp10 + 16 * x2), xmask, eviction_policy='evict_last') tmp20 = tmp19 - tmp16 tmp21 = tmp15.to(tl.float32) tmp22 = tmp14 - tmp21 tmp23 = triton_helpers.maximum(tmp22, tmp4) tmp24 = triton_helpers.minimum(tmp23, tmp2) tmp25 = tmp20 * tmp24 tmp26 = tmp16 + tmp25 tmp27 = tl.load(in_ptr0 + (tmp15 + 4 * tmp6 + 16 * x2), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr0 + (tmp18 + 4 * tmp6 + 16 * x2), xmask, eviction_policy='evict_last') tmp29 = tmp28 - tmp27 tmp30 = tmp29 * tmp24 tmp31 = tmp27 + tmp30 tmp32 = tmp26 - tmp31 tmp33 = tmp6.to(tl.float32) tmp34 = tmp5 - tmp33 tmp35 = triton_helpers.maximum(tmp34, tmp4) tmp36 = triton_helpers.minimum(tmp35, tmp2) tmp37 = tmp32 * tmp36 tmp38 = tmp31 + tmp37 tl.store(in_out_ptr0 + x4, tmp38, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0[grid (256)](buf1, arg0_1, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf1, class UpsampleLayerNew(nn.Module): """ """ def __init__(self, scale_factor, mode='bilinear'): """ :param scale_factor: :param mode: """ super().__init__() self.scale_factor = scale_factor self.mode = mode def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
jianantian/yolo3-pytorch
UpsampleLayer
false
3,720
[ "BSD-3-Clause" ]
0
8966f04c5b514a4f60fcb63b1fc753d0b13ebdcc
https://github.com/jianantian/yolo3-pytorch/tree/8966f04c5b514a4f60fcb63b1fc753d0b13ebdcc
KLDLoss
from _paritybench_helpers import _mock_config import torch from torch import nn class KLDLoss(nn.Module): def __init__(self, opt): super().__init__() def forward(self, mu, logvar): kld_loss = torch.mean(-0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), dim=1), dim=0) return kld_loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'opt': _mock_config()}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_exp_pow_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask) tmp8 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp10 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask) tmp16 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp18 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask) tmp24 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp26 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask) tmp1 = 1.0 tmp2 = tmp0 + tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 - tmp4 tmp6 = tl_math.exp(tmp0) tmp7 = tmp5 - tmp6 tmp9 = tmp8 + tmp1 tmp11 = tmp10 * tmp10 tmp12 = tmp9 - tmp11 tmp13 = tl_math.exp(tmp8) tmp14 = tmp12 - tmp13 tmp15 = tmp7 + tmp14 tmp17 = tmp16 + tmp1 tmp19 = tmp18 * tmp18 tmp20 = tmp17 - tmp19 tmp21 = tl_math.exp(tmp16) tmp22 = tmp20 - tmp21 tmp23 = tmp15 + tmp22 tmp25 = tmp24 + tmp1 tmp27 = tmp26 * tmp26 tmp28 = tmp25 - tmp27 tmp29 = tl_math.exp(tmp24) tmp30 = tmp28 - tmp29 tmp31 = tmp23 + tmp30 tl.store(out_ptr0 + x2, tmp31, xmask) @triton.jit def triton_poi_fused_mean_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr0 + (16 + x0), xmask) tmp6 = tl.load(in_ptr0 + (32 + x0), xmask) tmp9 = tl.load(in_ptr0 + (48 + x0), xmask) tmp1 = -0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp1 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp1 tmp11 = tmp8 + tmp10 tmp12 = 4.0 tmp13 = tmp11 / tmp12 tl.store(out_ptr0 + x0, tmp13, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_exp_pow_sub_sum_0[grid(64)](arg0_1, arg1_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_mean_mul_1[grid(16)](buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf0 return buf1, class KLDLossNew(nn.Module): def __init__(self, opt): super().__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
DSciLab/VAE-Lab
KLDLoss
false
3,721
[ "MIT" ]
0
ab37cc1399e3ece28ce426d8bd31149b8f492f82
https://github.com/DSciLab/VAE-Lab/tree/ab37cc1399e3ece28ce426d8bd31149b8f492f82
MovingAvg
import torch import torch.nn as nn import torch.fft class MovingAvg(nn.Module): """Moving average block to highlight the trend of time series.""" def __init__(self, kernel_size, stride): super(MovingAvg, self).__init__() self.kernel_size = kernel_size self.avg = nn.AvgPool1d(kernel_size=kernel_size, stride=stride, padding=0) def forward(self, x): front = x[:, 0:1, :].repeat(1, (self.kernel_size - 1) // 2, 1) end = x[:, -1:, :].repeat(1, (self.kernel_size - 1) // 2, 1) x = torch.cat([front, x, end], dim=1) x = self.avg(x.permute(0, 2, 1)) x = x.permute(0, 2, 1) return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'kernel_size': 4, 'stride': 1}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.fft assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 96 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 6 x0 = xindex % 4 x2 = xindex // 24 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 5, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (x0 + 4 * (-1 + x1) + 16 * x2), tmp9 & xmask, other=0.0) tmp11 = tmp0 >= tmp7 tl.full([1], 6, tl.int64) tmp14 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp10, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + x3, tmp16, xmask) @triton.jit def triton_poi_fused_avg_pool2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = xindex // 12 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 24 * x1), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + 24 * x1), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0 + 24 * x1), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0 + 24 * x1), xmask) tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 6, 4), (24, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(96)](arg0_1, buf0, 96, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 1, 3), (12, 1, 48, 4), torch.float32) triton_poi_fused_avg_pool2d_1[grid(48)](buf0, buf1, 48, XBLOCK=64, num_warps=1, num_stages=1) del buf0 return reinterpret_tensor(buf1, (4, 3, 4), (12, 4, 1), 0), class MovingAvgNew(nn.Module): """Moving average block to highlight the trend of time series.""" def __init__(self, kernel_size, stride): super(MovingAvgNew, self).__init__() self.kernel_size = kernel_size self.avg = nn.AvgPool1d(kernel_size=kernel_size, stride=stride, padding=0) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
jianzhnie/TsFormer
MovingAvg
false
3,722
[ "Apache-2.0" ]
0
47e362f02445ba00d5ab8db206667767e72faca7
https://github.com/jianzhnie/TsFormer/tree/47e362f02445ba00d5ab8db206667767e72faca7
MLP
from torch.nn import Module import torch from torch.nn import Linear from torch.nn import Sigmoid from torch.nn.init import xavier_uniform_ class MLP(Module): """ Defines the NN model - in this case, there are 3 hidden layers, 13 inputs (defined by data) in the 1st, 10 inputs in the second, and 8 in the 3rd (with 1 output). The first two are activated by a sigmoid function, weighted by a xavier initalization scheme. """ def __init__(self, n_inputs): super(MLP, self).__init__() self.hidden1 = Linear(n_inputs, 10) xavier_uniform_(self.hidden1.weight) self.act1 = Sigmoid() self.hidden2 = Linear(10, 8) xavier_uniform_(self.hidden2.weight) self.act2 = Sigmoid() self.hidden3 = Linear(8, 6) xavier_uniform_(self.hidden3.weight) self.act3 = Sigmoid() self.hidden4 = Linear(6, 1) xavier_uniform_(self.hidden4.weight) def forward(self, X): X = self.hidden1(X) X = self.act1(X) X = self.hidden2(X) X = self.act2(X) X = self.hidden3(X) X = self.act3(X) X = self.hidden4(X) return X def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_inputs': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module from torch.nn import Linear from torch.nn import Sigmoid from torch.nn.init import xavier_uniform_ assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 640 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 10 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 6 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (10, 4), (4, 1)) assert_size_stride(primals_2, (10,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (8, 10), (10, 1)) assert_size_stride(primals_5, (8,), (1,)) assert_size_stride(primals_6, (6, 8), (8, 1)) assert_size_stride(primals_7, (6,), (1,)) assert_size_stride(primals_8, (1, 6), (6, 1)) assert_size_stride(primals_9, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 10), (10, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 10), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 10), (160, 40, 10, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_sigmoid_0[grid(640)](buf1, primals_2, 640, XBLOCK= 256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 8), (8, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 10), (10, 1), 0), reinterpret_tensor(primals_4, (10, 8), (1, 10), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 8), (128, 32, 8, 1), 0) del buf2 triton_poi_fused_sigmoid_1[grid(512)](buf3, primals_5, 512, XBLOCK= 128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 6), (6, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 8), (8, 1), 0), reinterpret_tensor(primals_6, (8, 6), (1, 8), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 6), (96, 24, 6, 1), 0) del buf4 triton_poi_fused_sigmoid_2[grid(384)](buf5, primals_7, 384, XBLOCK= 256, num_warps=4, num_stages=1) del primals_7 buf7 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 6), ( 6, 1), 0), reinterpret_tensor(primals_8, (6, 1), (1, 6), 0), alpha=1, beta=1, out=buf7) del primals_9 return reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1, buf3, buf5, primals_8, primals_6, primals_4 class MLPNew(Module): """ Defines the NN model - in this case, there are 3 hidden layers, 13 inputs (defined by data) in the 1st, 10 inputs in the second, and 8 in the 3rd (with 1 output). The first two are activated by a sigmoid function, weighted by a xavier initalization scheme. """ def __init__(self, n_inputs): super(MLPNew, self).__init__() self.hidden1 = Linear(n_inputs, 10) xavier_uniform_(self.hidden1.weight) self.act1 = Sigmoid() self.hidden2 = Linear(10, 8) xavier_uniform_(self.hidden2.weight) self.act2 = Sigmoid() self.hidden3 = Linear(8, 6) xavier_uniform_(self.hidden3.weight) self.act3 = Sigmoid() self.hidden4 = Linear(6, 1) xavier_uniform_(self.hidden4.weight) def forward(self, input_0): primals_1 = self.hidden1.weight primals_2 = self.hidden1.bias primals_4 = self.hidden2.weight primals_5 = self.hidden2.bias primals_6 = self.hidden3.weight primals_7 = self.hidden3.bias primals_8 = self.hidden4.weight primals_9 = self.hidden4.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
jfmalloy1/UltraMarathon_Prediction
MLP
false
3,724
[ "MIT" ]
0
8eef7bd2860ce255994d32a0150c09b3b655cee7
https://github.com/jfmalloy1/UltraMarathon_Prediction/tree/8eef7bd2860ce255994d32a0150c09b3b655cee7
CoarseGenerator
import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.utils import spectral_norm as spectral_norm_fn from torch.nn.utils import weight_norm as weight_norm_fn def gen_conv(input_dim, output_dim, kernel_size=3, stride=1, padding=0, rate=1, activation='elu', gated=False): """ Convolutions used in the generator. Args: input_dim (int): number of input channels. output_dim (int): number of output features. kernel_size (int): kernel size of convolutional filters. stride (int): convolutional stride. padding (int): padding for convolution. rate (int): dilation rate of dilated convolution. activation (string): activation on computed features. gated (bool): boolean deciding on making convolutions "gated". Return: (tensor): result from convolution """ if gated: conv2 = Conv2dBlockGated(input_dim, output_dim, kernel_size, stride, conv_padding=padding, dilation=rate, activation=activation) else: conv2 = Conv2dBlock(input_dim, output_dim, kernel_size, stride, conv_padding=padding, dilation=rate, activation=activation) return conv2 class Conv2dBlock(nn.Module): def __init__(self, input_dim, output_dim, kernel_size, stride, padding= 0, conv_padding=0, dilation=1, weight_norm='none', norm='none', activation='relu', pad_type='zero', transpose=False): super(Conv2dBlock, self).__init__() self.use_bias = True if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) elif pad_type == 'none': self.pad = None else: assert 0, 'Unsupported padding type: {}'.format(pad_type) norm_dim = output_dim if norm == 'bn': self.norm = nn.BatchNorm2d(norm_dim) elif norm == 'in': self.norm = nn.InstanceNorm2d(norm_dim) elif norm == 'none': self.norm = None else: assert 0, 'Unsupported normalization: {}'.format(norm) if weight_norm == 'sn': self.weight_norm = spectral_norm_fn elif weight_norm == 'wn': self.weight_norm = weight_norm_fn elif weight_norm == 'none': self.weight_norm = None else: assert 0, 'Unsupported normalization: {}'.format(weight_norm) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'elu': self.activation = nn.ELU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'prelu': self.activation = nn.PReLU() elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'none': self.activation = None else: assert 0, 'Unsupported activation: {}'.format(activation) if transpose: self.conv = nn.ConvTranspose2d(input_dim, output_dim, kernel_size, stride, padding=conv_padding, output_padding= conv_padding, dilation=dilation, bias=self.use_bias) else: self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride, padding=conv_padding, dilation=dilation, bias=self. use_bias) if self.weight_norm: self.conv = self.weight_norm(self.conv) def forward(self, x): if self.pad: x = self.conv(self.pad(x)) else: x = self.conv(x) if self.norm: x = self.norm(x) if self.activation: x = self.activation(x) return x class Conv2dBlockGated(nn.Module): def __init__(self, input_dim, output_dim, kernel_size, stride, padding= 0, conv_padding=0, dilation=1, weight_norm='none', norm='none', activation='relu', pad_type='zero', transpose=False): super(Conv2dBlockGated, self).__init__() self.use_bias = True self._output_dim = output_dim if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) elif pad_type == 'none': self.pad = None else: assert 0, 'Unsupported padding type: {}'.format(pad_type) norm_dim = output_dim if norm == 'bn': self.norm = nn.BatchNorm2d(norm_dim) elif norm == 'in': self.norm = nn.InstanceNorm2d(norm_dim) elif norm == 'none': self.norm = None else: assert 0, 'Unsupported normalization: {}'.format(norm) if weight_norm == 'sn': self.weight_norm = spectral_norm_fn elif weight_norm == 'wn': self.weight_norm = weight_norm_fn elif weight_norm == 'none': self.weight_norm = None else: assert 0, 'Unsupported normalization: {}'.format(weight_norm) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'elu': self.activation = nn.ELU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'prelu': self.activation = nn.PReLU() elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'none': self.activation = None else: assert 0, 'Unsupported activation: {}'.format(activation) if transpose: self.conv = nn.ConvTranspose2d(input_dim, output_dim, kernel_size, stride, padding=conv_padding, output_padding= conv_padding, dilation=dilation, bias=self.use_bias) self.gate = nn.ConvTranspose2d(input_dim, output_dim, kernel_size, stride, padding=conv_padding, output_padding= conv_padding, dilation=dilation, bias=self.use_bias) else: self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride, padding=conv_padding, dilation=dilation, bias=self. use_bias) self.gate = nn.Conv2d(input_dim, output_dim, kernel_size, stride, padding=conv_padding, dilation=dilation, bias=self. use_bias) if self.weight_norm: self.conv = self.weight_norm(self.conv) self.gate = self.weight_norm(self.gate) def forward(self, x): if self.pad: feat = self.conv(self.pad(x)) gate = self.gate(self.pad(x)) else: feat = self.conv(x) gate = self.gate(x) if self.norm: feat = self.norm(feat) gate = self.norm(gate) if self.activation is None or self._output_dim == 3: return feat feat = self.activation(feat) gate = torch.sigmoid(gate) return feat * gate class CoarseGenerator(nn.Module): def __init__(self, input_dim, cnum, gated=False, use_cuda=True, device=0): super(CoarseGenerator, self).__init__() self.use_cuda = use_cuda self.device = device self.conv1 = gen_conv(input_dim + 2, cnum, 5, 1, 2, gated=gated) self.conv2_downsample = gen_conv(cnum, cnum * 2, 3, 2, 1, gated=gated) self.conv3 = gen_conv(cnum * 2, cnum * 2, 3, 1, 1, gated=gated) self.conv4_downsample = gen_conv(cnum * 2, cnum * 4, 3, 2, 1, gated =gated) self.conv5 = gen_conv(cnum * 4, cnum * 4, 3, 1, 1, gated=gated) self.conv6 = gen_conv(cnum * 4, cnum * 4, 3, 1, 1, gated=gated) self.conv7_atrous = gen_conv(cnum * 4, cnum * 4, 3, 1, 2, rate=2, gated=gated) self.conv8_atrous = gen_conv(cnum * 4, cnum * 4, 3, 1, 4, rate=4, gated=gated) self.conv9_atrous = gen_conv(cnum * 4, cnum * 4, 3, 1, 8, rate=8, gated=gated) self.conv10_atrous = gen_conv(cnum * 4, cnum * 4, 3, 1, 16, rate=16, gated=gated) self.conv11 = gen_conv(cnum * 4, cnum * 4, 3, 1, 1, gated=gated) self.conv12 = gen_conv(cnum * 4, cnum * 4, 3, 1, 1, gated=gated) self.conv13 = gen_conv(cnum * 4, cnum * 2, 3, 1, 1, gated=gated) self.conv14 = gen_conv(cnum * 2, cnum * 2, 3, 1, 1, gated=gated) self.conv15 = gen_conv(cnum * 2, cnum, 3, 1, 1, gated=gated) self.conv16 = gen_conv(cnum, cnum // 2, 3, 1, 1, gated=gated) self.conv17 = gen_conv(cnum // 2, input_dim, 3, 1, 1, activation= 'none', gated=gated) def forward(self, x, mask): ones = torch.ones(x.size(0), 1, x.size(2), x.size(3), device=x.device) x = torch.cat([x, ones, mask], dim=1) x = self.conv1(x) x = self.conv2_downsample(x) x = self.conv3(x) x = self.conv4_downsample(x) x = self.conv5(x) x = self.conv6(x) x = self.conv7_atrous(x) x = self.conv8_atrous(x) x = self.conv9_atrous(x) x = self.conv10_atrous(x) x = self.conv11(x) x = self.conv12(x) x = F.interpolate(x, scale_factor=2, mode='nearest') x = self.conv13(x) x = self.conv14(x) x = F.interpolate(x, scale_factor=2, mode='nearest') x = self.conv15(x) x = self.conv16(x) x = self.conv17(x) x_stage1 = torch.clamp(x, -1.0, 1.0) return x_stage1 def get_inputs(): return [torch.rand([4, 1, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'cnum': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from torch.nn.utils import spectral_norm as spectral_norm_fn from torch.nn.utils import weight_norm as weight_norm_fn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 6 x0 = xindex % 16 x2 = xindex // 96 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 2, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = 1.0 tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype) tmp12 = tl.where(tmp9, tmp10, tmp11) tmp13 = tmp0 >= tmp7 tl.full([1], 6, tl.int64) tmp16 = tl.load(in_ptr1 + (x0 + 16 * (-2 + x1) + 64 * x2), tmp13 & xmask, other=0.0) tmp17 = tl.where(tmp9, tmp12, tmp16) tmp18 = tl.where(tmp4, tmp5, tmp17) tl.store(out_ptr0 + x3, tmp18, xmask) @triton.jit def triton_poi_fused_convolution_elu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused_convolution_elu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 8 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused_convolution_elu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_4(out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 2 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused__unsafe_index_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 2 % 2 x0 = xindex % 2 x2 = xindex // 4 x3 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 1, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tl.where(tmp7, tmp6, tmp5) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_6(out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused__unsafe_index_7(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x0 = xindex % 4 x2 = xindex // 16 x4 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 2, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + 2 * tmp4 + 4 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_convolution_elu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 2 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused_clamp_convolution_ge_le_logical_and_9(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = -1.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 1.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = tmp2 >= tmp3 tmp8 = tmp2 <= tmp5 tmp9 = tmp7 & tmp8 tl.store(out_ptr0 + x3, tmp6, xmask) tl.store(out_ptr1 + x3, tmp9, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36) = args args.clear() assert_size_stride(primals_1, (4, 1, 4, 4), (16, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 6, 5, 5), (150, 25, 5, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (8, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_6, (8,), (1,)) assert_size_stride(primals_7, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_8, (8,), (1,)) assert_size_stride(primals_9, (16, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_10, (16,), (1,)) assert_size_stride(primals_11, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_12, (16,), (1,)) assert_size_stride(primals_13, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_14, (16,), (1,)) assert_size_stride(primals_15, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_16, (16,), (1,)) assert_size_stride(primals_17, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_18, (16,), (1,)) assert_size_stride(primals_19, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_20, (16,), (1,)) assert_size_stride(primals_21, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_22, (16,), (1,)) assert_size_stride(primals_23, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_24, (16,), (1,)) assert_size_stride(primals_25, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_26, (16,), (1,)) assert_size_stride(primals_27, (8, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_28, (8,), (1,)) assert_size_stride(primals_29, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_30, (8,), (1,)) assert_size_stride(primals_31, (4, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_32, (4,), (1,)) assert_size_stride(primals_33, (2, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_34, (2,), (1,)) assert_size_stride(primals_35, (4, 2, 3, 3), (18, 9, 3, 1)) assert_size_stride(primals_36, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 6, 4, 4), (96, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(384)](primals_1, primals_2, buf0, 384, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_elu_1[grid(256)](buf2, primals_4, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_4 buf3 = extern_kernels.convolution(buf2, primals_5, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 8, 2, 2), (32, 4, 2, 1)) buf4 = buf3 del buf3 triton_poi_fused_convolution_elu_2[grid(128)](buf4, primals_6, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_6 buf5 = extern_kernels.convolution(buf4, primals_7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 8, 2, 2), (32, 4, 2, 1)) buf6 = buf5 del buf5 triton_poi_fused_convolution_elu_2[grid(128)](buf6, primals_8, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_8 buf7 = extern_kernels.convolution(buf6, primals_9, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 16, 1, 1), (16, 1, 1, 1)) buf8 = buf7 del buf7 triton_poi_fused_convolution_elu_3[grid(64)](buf8, primals_10, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_10 buf9 = extern_kernels.convolution(buf8, primals_11, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 16, 1, 1), (16, 1, 1, 1)) buf10 = buf9 del buf9 triton_poi_fused_convolution_elu_3[grid(64)](buf10, primals_12, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_12 buf11 = extern_kernels.convolution(buf10, primals_13, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 16, 1, 1), (16, 1, 1, 1)) buf12 = buf11 del buf11 triton_poi_fused_convolution_elu_3[grid(64)](buf12, primals_14, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_14 buf13 = extern_kernels.convolution(buf12, primals_15, stride=(1, 1), padding=(2, 2), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf13, (4, 16, 1, 1), (16, 1, 1, 1)) buf14 = buf13 del buf13 triton_poi_fused_convolution_elu_3[grid(64)](buf14, primals_16, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_16 buf15 = extern_kernels.convolution(buf14, primals_17, stride=(1, 1), padding=(4, 4), dilation=(4, 4), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 16, 1, 1), (16, 1, 1, 1)) buf16 = buf15 del buf15 triton_poi_fused_convolution_elu_3[grid(64)](buf16, primals_18, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_18 buf17 = extern_kernels.convolution(buf16, primals_19, stride=(1, 1), padding=(8, 8), dilation=(8, 8), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 16, 1, 1), (16, 1, 1, 1)) buf18 = buf17 del buf17 triton_poi_fused_convolution_elu_3[grid(64)](buf18, primals_20, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_20 buf19 = extern_kernels.convolution(buf18, primals_21, stride=(1, 1), padding=(16, 16), dilation=(16, 16), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 16, 1, 1), (16, 1, 1, 1)) buf20 = buf19 del buf19 triton_poi_fused_convolution_elu_3[grid(64)](buf20, primals_22, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_22 buf21 = extern_kernels.convolution(buf20, primals_23, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf21, (4, 16, 1, 1), (16, 1, 1, 1)) buf22 = buf21 del buf21 triton_poi_fused_convolution_elu_3[grid(64)](buf22, primals_24, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_24 buf23 = extern_kernels.convolution(buf22, primals_25, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf23, (4, 16, 1, 1), (16, 1, 1, 1)) buf24 = buf23 del buf23 triton_poi_fused_convolution_elu_3[grid(64)](buf24, primals_26, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_26 buf25 = empty_strided_cuda((2,), (1,), torch.int64) triton_poi_fused__to_copy_add_arange_mul_4[grid(2)](buf25, 2, XBLOCK=2, num_warps=1, num_stages=1) buf26 = empty_strided_cuda((4, 16, 2, 2), (64, 4, 2, 1), torch.float32) triton_poi_fused__unsafe_index_5[grid(256)](buf25, buf24, buf26, 256, XBLOCK=256, num_warps=4, num_stages=1) buf27 = extern_kernels.convolution(buf26, primals_27, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf27, (4, 8, 2, 2), (32, 4, 2, 1)) buf28 = buf27 del buf27 triton_poi_fused_convolution_elu_2[grid(128)](buf28, primals_28, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_28 buf29 = extern_kernels.convolution(buf28, primals_29, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf29, (4, 8, 2, 2), (32, 4, 2, 1)) buf30 = buf29 del buf29 triton_poi_fused_convolution_elu_2[grid(128)](buf30, primals_30, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_30 buf31 = empty_strided_cuda((4,), (1,), torch.int64) triton_poi_fused__to_copy_add_arange_mul_6[grid(4)](buf31, 4, XBLOCK=4, num_warps=1, num_stages=1) buf32 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32 ) triton_poi_fused__unsafe_index_7[grid(512)](buf31, buf30, buf32, 512, XBLOCK=256, num_warps=4, num_stages=1) buf33 = extern_kernels.convolution(buf32, primals_31, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf33, (4, 4, 4, 4), (64, 16, 4, 1)) buf34 = buf33 del buf33 triton_poi_fused_convolution_elu_1[grid(256)](buf34, primals_32, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_32 buf35 = extern_kernels.convolution(buf34, primals_33, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf35, (4, 2, 4, 4), (32, 16, 4, 1)) buf36 = buf35 del buf35 triton_poi_fused_convolution_elu_8[grid(128)](buf36, primals_34, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_34 buf37 = extern_kernels.convolution(buf36, primals_35, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf37, (4, 4, 4, 4), (64, 16, 4, 1)) buf38 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf39 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_clamp_convolution_ge_le_logical_and_9[grid(256)](buf37 , primals_36, buf38, buf39, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf37 del primals_36 return (buf38, primals_3, primals_5, primals_7, primals_9, primals_11, primals_13, primals_15, primals_17, primals_19, primals_21, primals_23, primals_25, primals_27, primals_29, primals_31, primals_33, primals_35, buf0, buf2, buf4, buf6, buf8, buf10, buf12, buf14, buf16, buf18, buf20, buf22, buf24, buf25, buf26, buf28, buf30, buf31, buf32, buf34, buf36, buf39) def gen_conv(input_dim, output_dim, kernel_size=3, stride=1, padding=0, rate=1, activation='elu', gated=False): """ Convolutions used in the generator. Args: input_dim (int): number of input channels. output_dim (int): number of output features. kernel_size (int): kernel size of convolutional filters. stride (int): convolutional stride. padding (int): padding for convolution. rate (int): dilation rate of dilated convolution. activation (string): activation on computed features. gated (bool): boolean deciding on making convolutions "gated". Return: (tensor): result from convolution """ if gated: conv2 = Conv2dBlockGated(input_dim, output_dim, kernel_size, stride, conv_padding=padding, dilation=rate, activation=activation) else: conv2 = Conv2dBlock(input_dim, output_dim, kernel_size, stride, conv_padding=padding, dilation=rate, activation=activation) return conv2 class Conv2dBlock(nn.Module): def __init__(self, input_dim, output_dim, kernel_size, stride, padding= 0, conv_padding=0, dilation=1, weight_norm='none', norm='none', activation='relu', pad_type='zero', transpose=False): super(Conv2dBlock, self).__init__() self.use_bias = True if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) elif pad_type == 'none': self.pad = None else: assert 0, 'Unsupported padding type: {}'.format(pad_type) norm_dim = output_dim if norm == 'bn': self.norm = nn.BatchNorm2d(norm_dim) elif norm == 'in': self.norm = nn.InstanceNorm2d(norm_dim) elif norm == 'none': self.norm = None else: assert 0, 'Unsupported normalization: {}'.format(norm) if weight_norm == 'sn': self.weight_norm = spectral_norm_fn elif weight_norm == 'wn': self.weight_norm = weight_norm_fn elif weight_norm == 'none': self.weight_norm = None else: assert 0, 'Unsupported normalization: {}'.format(weight_norm) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'elu': self.activation = nn.ELU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'prelu': self.activation = nn.PReLU() elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'none': self.activation = None else: assert 0, 'Unsupported activation: {}'.format(activation) if transpose: self.conv = nn.ConvTranspose2d(input_dim, output_dim, kernel_size, stride, padding=conv_padding, output_padding= conv_padding, dilation=dilation, bias=self.use_bias) else: self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride, padding=conv_padding, dilation=dilation, bias=self. use_bias) if self.weight_norm: self.conv = self.weight_norm(self.conv) def forward(self, x): if self.pad: x = self.conv(self.pad(x)) else: x = self.conv(x) if self.norm: x = self.norm(x) if self.activation: x = self.activation(x) return x class Conv2dBlockGated(nn.Module): def __init__(self, input_dim, output_dim, kernel_size, stride, padding= 0, conv_padding=0, dilation=1, weight_norm='none', norm='none', activation='relu', pad_type='zero', transpose=False): super(Conv2dBlockGated, self).__init__() self.use_bias = True self._output_dim = output_dim if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) elif pad_type == 'none': self.pad = None else: assert 0, 'Unsupported padding type: {}'.format(pad_type) norm_dim = output_dim if norm == 'bn': self.norm = nn.BatchNorm2d(norm_dim) elif norm == 'in': self.norm = nn.InstanceNorm2d(norm_dim) elif norm == 'none': self.norm = None else: assert 0, 'Unsupported normalization: {}'.format(norm) if weight_norm == 'sn': self.weight_norm = spectral_norm_fn elif weight_norm == 'wn': self.weight_norm = weight_norm_fn elif weight_norm == 'none': self.weight_norm = None else: assert 0, 'Unsupported normalization: {}'.format(weight_norm) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'elu': self.activation = nn.ELU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'prelu': self.activation = nn.PReLU() elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'none': self.activation = None else: assert 0, 'Unsupported activation: {}'.format(activation) if transpose: self.conv = nn.ConvTranspose2d(input_dim, output_dim, kernel_size, stride, padding=conv_padding, output_padding= conv_padding, dilation=dilation, bias=self.use_bias) self.gate = nn.ConvTranspose2d(input_dim, output_dim, kernel_size, stride, padding=conv_padding, output_padding= conv_padding, dilation=dilation, bias=self.use_bias) else: self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride, padding=conv_padding, dilation=dilation, bias=self. use_bias) self.gate = nn.Conv2d(input_dim, output_dim, kernel_size, stride, padding=conv_padding, dilation=dilation, bias=self. use_bias) if self.weight_norm: self.conv = self.weight_norm(self.conv) self.gate = self.weight_norm(self.gate) def forward(self, x): if self.pad: feat = self.conv(self.pad(x)) gate = self.gate(self.pad(x)) else: feat = self.conv(x) gate = self.gate(x) if self.norm: feat = self.norm(feat) gate = self.norm(gate) if self.activation is None or self._output_dim == 3: return feat feat = self.activation(feat) gate = torch.sigmoid(gate) return feat * gate class CoarseGeneratorNew(nn.Module): def __init__(self, input_dim, cnum, gated=False, use_cuda=True, device=0): super(CoarseGeneratorNew, self).__init__() self.use_cuda = use_cuda self.device = device self.conv1 = gen_conv(input_dim + 2, cnum, 5, 1, 2, gated=gated) self.conv2_downsample = gen_conv(cnum, cnum * 2, 3, 2, 1, gated=gated) self.conv3 = gen_conv(cnum * 2, cnum * 2, 3, 1, 1, gated=gated) self.conv4_downsample = gen_conv(cnum * 2, cnum * 4, 3, 2, 1, gated =gated) self.conv5 = gen_conv(cnum * 4, cnum * 4, 3, 1, 1, gated=gated) self.conv6 = gen_conv(cnum * 4, cnum * 4, 3, 1, 1, gated=gated) self.conv7_atrous = gen_conv(cnum * 4, cnum * 4, 3, 1, 2, rate=2, gated=gated) self.conv8_atrous = gen_conv(cnum * 4, cnum * 4, 3, 1, 4, rate=4, gated=gated) self.conv9_atrous = gen_conv(cnum * 4, cnum * 4, 3, 1, 8, rate=8, gated=gated) self.conv10_atrous = gen_conv(cnum * 4, cnum * 4, 3, 1, 16, rate=16, gated=gated) self.conv11 = gen_conv(cnum * 4, cnum * 4, 3, 1, 1, gated=gated) self.conv12 = gen_conv(cnum * 4, cnum * 4, 3, 1, 1, gated=gated) self.conv13 = gen_conv(cnum * 4, cnum * 2, 3, 1, 1, gated=gated) self.conv14 = gen_conv(cnum * 2, cnum * 2, 3, 1, 1, gated=gated) self.conv15 = gen_conv(cnum * 2, cnum, 3, 1, 1, gated=gated) self.conv16 = gen_conv(cnum, cnum // 2, 3, 1, 1, gated=gated) self.conv17 = gen_conv(cnum // 2, input_dim, 3, 1, 1, activation= 'none', gated=gated) def forward(self, input_0, input_1): primals_3 = self.conv1.conv.weight primals_4 = self.conv1.conv.bias primals_5 = self.conv2_downsample.conv.weight primals_6 = self.conv2_downsample.conv.bias primals_7 = self.conv3.conv.weight primals_8 = self.conv3.conv.bias primals_9 = self.conv4_downsample.conv.weight primals_10 = self.conv4_downsample.conv.bias primals_11 = self.conv5.conv.weight primals_12 = self.conv5.conv.bias primals_13 = self.conv6.conv.weight primals_14 = self.conv6.conv.bias primals_15 = self.conv7_atrous.conv.weight primals_16 = self.conv7_atrous.conv.bias primals_17 = self.conv8_atrous.conv.weight primals_18 = self.conv8_atrous.conv.bias primals_19 = self.conv9_atrous.conv.weight primals_20 = self.conv9_atrous.conv.bias primals_21 = self.conv10_atrous.conv.weight primals_22 = self.conv10_atrous.conv.bias primals_23 = self.conv11.conv.weight primals_24 = self.conv11.conv.bias primals_25 = self.conv12.conv.weight primals_26 = self.conv12.conv.bias primals_27 = self.conv13.conv.weight primals_28 = self.conv13.conv.bias primals_29 = self.conv14.conv.weight primals_30 = self.conv14.conv.bias primals_31 = self.conv15.conv.weight primals_32 = self.conv15.conv.bias primals_33 = self.conv16.conv.weight primals_34 = self.conv16.conv.bias primals_35 = self.conv17.conv.weight primals_36 = self.conv17.conv.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36]) return output[0]
jacobwjs/generative-inpainting-pytorch
CoarseGenerator
false
3,725
[ "MIT" ]
0
5cd5e818aa7394444b6c21df448d8b395492e4d7
https://github.com/jacobwjs/generative-inpainting-pytorch/tree/5cd5e818aa7394444b6c21df448d8b395492e4d7
SeasonalLayerNorm
import torch import torch.nn as nn import torch.fft class SeasonalLayerNorm(nn.Module): """Special designed layernorm for the seasonal part.""" def __init__(self, channels): super(SeasonalLayerNorm, self).__init__() self.layernorm = nn.LayerNorm(channels) def forward(self, x): x_hat = self.layernorm(x) bias = torch.mean(x_hat, dim=1).unsqueeze(1).repeat(1, x.shape[1], 1) return x_hat - bias def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.fft assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_repeat_sub_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tl.store(out_ptr0 + x3, tmp10, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(16)](primals_3, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(64)](primals_3, buf0, buf1, primals_1, primals_2, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf0 del buf1 del primals_1 del primals_2 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_repeat_sub_2[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf2 return buf3, primals_3 class SeasonalLayerNormNew(nn.Module): """Special designed layernorm for the seasonal part.""" def __init__(self, channels): super(SeasonalLayerNormNew, self).__init__() self.layernorm = nn.LayerNorm(channels) def forward(self, input_0): primals_1 = self.layernorm.weight primals_2 = self.layernorm.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
jianzhnie/TsFormer
SeasonalLayerNorm
false
3,726
[ "Apache-2.0" ]
0
47e362f02445ba00d5ab8db206667767e72faca7
https://github.com/jianzhnie/TsFormer/tree/47e362f02445ba00d5ab8db206667767e72faca7
TokenEmbedding
import torch import torch.nn as nn import torch.fft class TokenEmbedding(nn.Module): def __init__(self, c_in, d_model): super(TokenEmbedding, self).__init__() padding = 1 if torch.__version__ >= '1.5.0' else 2 self.tokenConv = nn.Conv1d(in_channels=c_in, out_channels=d_model, kernel_size=3, padding=padding, padding_mode='circular', bias=False ) for m in self.modules(): if isinstance(m, nn.Conv1d): nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='leaky_relu') def forward(self, x): x = self.tokenConv(x.permute(0, 2, 1)).transpose(1, 2) return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'c_in': 4, 'd_model': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.fft assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_copy_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 24 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel y0 = yindex % 6 x2 = xindex y1 = yindex // 6 tmp0 = y0 tmp1 = tl.full([1, 1], 5, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.broadcast_to(-4 + y0, [XBLOCK, YBLOCK]) tmp4 = tl.full([1, 1], 1, tl.int64) tmp5 = tmp3 < tmp4 tmp6 = tmp5 & tmp2 tmp7 = tl.broadcast_to(y0, [XBLOCK, YBLOCK]) tmp8 = tmp7 >= tmp4 tmp9 = tmp7 < tmp1 tmp10 = tmp8 & tmp9 tmp11 = tmp10 & tmp6 tmp12 = tl.load(in_ptr0 + (-4 + x2 + 4 * y0 + 16 * y1), tmp11 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp13 = float('nan') tmp14 = tl.where(tmp10, tmp12, tmp13) tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype) tmp16 = tl.where(tmp6, tmp14, tmp15) tmp17 = tmp3 >= tmp4 tmp18 = tmp3 < tmp1 tmp19 = tmp17 & tmp18 tmp20 = tmp19 & tmp2 tmp21 = tl.load(in_ptr0 + (-20 + x2 + 4 * y0 + 16 * y1), tmp20 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp22 = tl.where(tmp19, tmp21, tmp13) tmp23 = tl.where(tmp5, tmp16, tmp22) tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype) tmp25 = tl.where(tmp2, tmp23, tmp24) tmp26 = tmp0 < tmp4 tmp27 = tl.broadcast_to(4 + y0, [XBLOCK, YBLOCK]) tmp28 = tmp27 >= tmp4 tmp29 = tmp27 < tmp1 tmp30 = tmp28 & tmp29 tmp31 = tmp30 & tmp26 tmp32 = tl.load(in_ptr0 + (12 + x2 + 4 * y0 + 16 * y1), tmp31 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp33 = tl.where(tmp30, tmp32, tmp13) tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype) tmp35 = tl.where(tmp26, tmp33, tmp34) tmp36 = tmp0 >= tmp4 tmp37 = tmp0 < tmp1 tmp38 = tmp36 & tmp37 tmp39 = tl.load(in_ptr0 + (-4 + x2 + 4 * y0 + 16 * y1), tmp38 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp40 = tl.where(tmp38, tmp39, tmp13) tmp41 = tl.where(tmp26, tmp35, tmp40) tmp42 = tl.where(tmp2, tmp25, tmp41) tl.store(out_ptr0 + (y0 + 6 * x2 + 24 * y1), tmp42, xmask & ymask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3), (12, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4, 6), (24, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_copy_0[grid(24, 4)](primals_1, buf1, 24, 4, XBLOCK =4, YBLOCK=32, num_warps=4, num_stages=1) del primals_1 buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4), (16, 4, 1)) return reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), primals_2, buf1 class TokenEmbeddingNew(nn.Module): def __init__(self, c_in, d_model): super(TokenEmbeddingNew, self).__init__() padding = 1 if torch.__version__ >= '1.5.0' else 2 self.tokenConv = nn.Conv1d(in_channels=c_in, out_channels=d_model, kernel_size=3, padding=padding, padding_mode='circular', bias=False ) for m in self.modules(): if isinstance(m, nn.Conv1d): nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='leaky_relu') def forward(self, input_0): primals_2 = self.tokenConv.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
jianzhnie/TsFormer
TokenEmbedding
false
3,727
[ "Apache-2.0" ]
0
47e362f02445ba00d5ab8db206667767e72faca7
https://github.com/jianzhnie/TsFormer/tree/47e362f02445ba00d5ab8db206667767e72faca7
InjectNoise
import torch from torch import nn import torch.utils.data class InjectNoise(nn.Module): def __init__(self, channels): super().__init__() self.weight = nn.Parameter(torch.zeros(1, channels, 1, 1)) def forward(self, x): noise = torch.randn((x.shape[0], 1, x.shape[2], x.shape[3]), device =x.device) return x + self.weight * noise def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4}]
import torch from torch import device import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 * tmp2 tmp4 = tmp0 + tmp3 tl.store(out_ptr0 + x3, tmp4, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = torch.ops.aten.randn.default([4, 1, 4, 4], device=device( type='cuda', index=0), pin_memory=False) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_0[grid(256)](primals_1, primals_2, buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf2, buf1 class InjectNoiseNew(nn.Module): def __init__(self, channels): super().__init__() self.weight = nn.Parameter(torch.zeros(1, channels, 1, 1)) def forward(self, input_0): primals_2 = self.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
jiazhi412/Machine-Learning-Collection
InjectNoise
false
3,728
[ "MIT" ]
0
1c30faf1e27a79eeca966c017e956df8f7f6ef17
https://github.com/jiazhi412/Machine-Learning-Collection/tree/1c30faf1e27a79eeca966c017e956df8f7f6ef17
WSConv2d
import torch from torch import nn import torch.utils.data class WSConv2d(nn.Module): """ Weight scaled Conv2d (Equalized Learning Rate) Note that input is multiplied rather than changing weights this will have the same result. Inspired and looked at: https://github.com/nvnbny/progressive_growing_of_gans/blob/master/modelUtils.py """ def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, gain=2): super(WSConv2d, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding) self.scale = (gain / (in_channels * kernel_size ** 2)) ** 0.5 self.bias = self.conv.bias self.conv.bias = None nn.init.normal_(self.conv.weight) nn.init.zeros_(self.bias) def forward(self, x): return self.conv(x * self.scale) + self.bias.view(1, self.bias. shape[0], 1, 1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.23570226039551584 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_add_1[grid(256)](buf2, primals_3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 return buf2, primals_2, buf0 class WSConv2dNew(nn.Module): """ Weight scaled Conv2d (Equalized Learning Rate) Note that input is multiplied rather than changing weights this will have the same result. Inspired and looked at: https://github.com/nvnbny/progressive_growing_of_gans/blob/master/modelUtils.py """ def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, gain=2): super(WSConv2dNew, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding) self.scale = (gain / (in_channels * kernel_size ** 2)) ** 0.5 self.bias = self.conv.bias self.conv.bias = None nn.init.normal_(self.conv.weight) nn.init.zeros_(self.bias) def forward(self, input_0): primals_3 = self.bias primals_2 = self.conv.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
jiazhi412/Machine-Learning-Collection
WSConv2d
false
3,729
[ "MIT" ]
0
1c30faf1e27a79eeca966c017e956df8f7f6ef17
https://github.com/jiazhi412/Machine-Learning-Collection/tree/1c30faf1e27a79eeca966c017e956df8f7f6ef17
MLP
from _paritybench_helpers import _mock_config import math import torch import torch.nn as nn from torch.nn.parameter import Parameter def gelu(x): return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) class Conv1D(nn.Module): def __init__(self, nf, nx): super(Conv1D, self).__init__() self.nf = nf w = torch.empty(nx, nf) nn.init.normal_(w, std=0.02) self.weight = Parameter(w) self.bias = Parameter(torch.zeros(nf)) def forward(self, x): size_out = x.size()[:-1] + (self.nf,) x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight) x = x.view(*size_out) return x class MLP(nn.Module): def __init__(self, n_state, config): super(MLP, self).__init__() nx = config.n_embd self.c_fc = Conv1D(n_state, nx) self.c_proj = Conv1D(nx, n_state) self.act = gelu def forward(self, x): h = self.act(self.c_fc(x)) h2 = self.c_proj(h) return h2 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_state': 4, 'config': _mock_config(n_embd=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math import torch.nn as nn from torch.nn.parameter import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_pow_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = tmp0 * tmp0 tmp4 = tmp3 * tmp0 tmp5 = 0.044715 tmp6 = tmp4 * tmp5 tmp7 = tmp0 + tmp6 tmp8 = 0.7978845608028654 tmp9 = tmp7 * tmp8 tmp10 = libdevice.tanh(tmp9) tmp11 = 1.0 tmp12 = tmp10 + tmp11 tmp13 = tmp2 * tmp12 tl.store(out_ptr0 + x0, tmp13, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), primals_3, alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_pow_tanh_0[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_4, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), primals_5, alpha=1, beta=1, out=buf2) del primals_4 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), buf0, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0 ), reinterpret_tensor(buf1, (4, 64), (1, 4), 0), reinterpret_tensor( primals_1, (4, 64), (1, 4), 0) def gelu(x): return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) class Conv1D(nn.Module): def __init__(self, nf, nx): super(Conv1D, self).__init__() self.nf = nf w = torch.empty(nx, nf) nn.init.normal_(w, std=0.02) self.weight = Parameter(w) self.bias = Parameter(torch.zeros(nf)) def forward(self, x): size_out = x.size()[:-1] + (self.nf,) x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight) x = x.view(*size_out) return x class MLPNew(nn.Module): def __init__(self, n_state, config): super(MLPNew, self).__init__() nx = config.n_embd self.c_fc = Conv1D(n_state, nx) self.c_proj = Conv1D(nx, n_state) self.act = gelu def forward(self, input_0): primals_3 = self.c_fc.weight primals_2 = self.c_fc.bias primals_5 = self.c_proj.weight primals_4 = self.c_proj.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
CaptainJa/demo-torch-gpt2
MLP
false
3,730
[ "MIT" ]
0
83d6074e8b321101e08c0aa5749c8eb988a5faa8
https://github.com/CaptainJa/demo-torch-gpt2/tree/83d6074e8b321101e08c0aa5749c8eb988a5faa8
CNN
import torch import torch.nn as nn import torch.fft class CNN(nn.Module): """Convolutional Neural Networks.""" def __init__(self, input_size, hidden_dim, output_size): super(CNN, self).__init__() self.Conv1 = nn.Conv1d(in_channels=input_size, out_channels= hidden_dim, kernel_size=3, padding=1, padding_mode='circular', bias=False) self.flatten = nn.Flatten() self.relu = nn.ReLU() self.fc1 = nn.Linear(hidden_dim * output_size, hidden_dim) self.fc2 = nn.Linear(hidden_dim, output_size) def forward(self, x): x = x.permute(0, 2, 1) out = self.Conv1(x).transpose(1, 2) out = self.relu(out) out = self.flatten(out) out = self.fc1(out) out = self.fc2(out) return out def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_dim': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.fft assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_copy_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 24 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel y0 = yindex % 6 x2 = xindex y1 = yindex // 6 tmp0 = y0 tmp1 = tl.full([1, 1], 5, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.broadcast_to(-4 + y0, [XBLOCK, YBLOCK]) tmp4 = tl.full([1, 1], 1, tl.int64) tmp5 = tmp3 < tmp4 tmp6 = tmp5 & tmp2 tmp7 = tl.broadcast_to(y0, [XBLOCK, YBLOCK]) tmp8 = tmp7 >= tmp4 tmp9 = tmp7 < tmp1 tmp10 = tmp8 & tmp9 tmp11 = tmp10 & tmp6 tmp12 = tl.load(in_ptr0 + (-4 + x2 + 4 * y0 + 16 * y1), tmp11 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp13 = float('nan') tmp14 = tl.where(tmp10, tmp12, tmp13) tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype) tmp16 = tl.where(tmp6, tmp14, tmp15) tmp17 = tmp3 >= tmp4 tmp18 = tmp3 < tmp1 tmp19 = tmp17 & tmp18 tmp20 = tmp19 & tmp2 tmp21 = tl.load(in_ptr0 + (-20 + x2 + 4 * y0 + 16 * y1), tmp20 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp22 = tl.where(tmp19, tmp21, tmp13) tmp23 = tl.where(tmp5, tmp16, tmp22) tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype) tmp25 = tl.where(tmp2, tmp23, tmp24) tmp26 = tmp0 < tmp4 tmp27 = tl.broadcast_to(4 + y0, [XBLOCK, YBLOCK]) tmp28 = tmp27 >= tmp4 tmp29 = tmp27 < tmp1 tmp30 = tmp28 & tmp29 tmp31 = tmp30 & tmp26 tmp32 = tl.load(in_ptr0 + (12 + x2 + 4 * y0 + 16 * y1), tmp31 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp33 = tl.where(tmp30, tmp32, tmp13) tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype) tmp35 = tl.where(tmp26, tmp33, tmp34) tmp36 = tmp0 >= tmp4 tmp37 = tmp0 < tmp1 tmp38 = tmp36 & tmp37 tmp39 = tl.load(in_ptr0 + (-4 + x2 + 4 * y0 + 16 * y1), tmp38 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp40 = tl.where(tmp38, tmp39, tmp13) tmp41 = tl.where(tmp26, tmp35, tmp40) tmp42 = tl.where(tmp2, tmp25, tmp41) tl.store(out_ptr0 + (y0 + 6 * x2 + 24 * y1), tmp42, xmask & ymask) @triton.jit def triton_poi_fused_clone_relu_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.full([1, 1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3), (12, 3, 1)) assert_size_stride(primals_3, (4, 16), (16, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4, 6), (24, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_copy_0[grid(24, 4)](primals_1, buf1, 24, 4, XBLOCK =4, YBLOCK=32, num_warps=4, num_stages=1) del primals_1 buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4), (16, 4, 1)) buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_relu_1[grid(16, 4)](buf2, buf3, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_4, reinterpret_tensor(buf3, (4, 16), ( 16, 1), 0), reinterpret_tensor(primals_3, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf4) del primals_4 buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_6, buf4, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf5) del primals_6 buf6 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(64)](buf2, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf2 return buf5, primals_2, buf1, reinterpret_tensor(buf3, (4, 16), (16, 1), 0 ), buf4, primals_5, primals_3, buf6 class CNNNew(nn.Module): """Convolutional Neural Networks.""" def __init__(self, input_size, hidden_dim, output_size): super(CNNNew, self).__init__() self.Conv1 = nn.Conv1d(in_channels=input_size, out_channels= hidden_dim, kernel_size=3, padding=1, padding_mode='circular', bias=False) self.flatten = nn.Flatten() self.relu = nn.ReLU() self.fc1 = nn.Linear(hidden_dim * output_size, hidden_dim) self.fc2 = nn.Linear(hidden_dim, output_size) def forward(self, input_0): primals_2 = self.Conv1.weight primals_3 = self.fc1.weight primals_4 = self.fc1.bias primals_5 = self.fc2.weight primals_6 = self.fc2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
jianzhnie/TsFormer
CNN
false
3,731
[ "Apache-2.0" ]
0
47e362f02445ba00d5ab8db206667767e72faca7
https://github.com/jianzhnie/TsFormer/tree/47e362f02445ba00d5ab8db206667767e72faca7
SeriesDecomp
import torch import torch.nn as nn import torch.fft class MovingAvg(nn.Module): """Moving average block to highlight the trend of time series.""" def __init__(self, kernel_size, stride): super(MovingAvg, self).__init__() self.kernel_size = kernel_size self.avg = nn.AvgPool1d(kernel_size=kernel_size, stride=stride, padding=0) def forward(self, x): front = x[:, 0:1, :].repeat(1, (self.kernel_size - 1) // 2, 1) end = x[:, -1:, :].repeat(1, (self.kernel_size - 1) // 2, 1) x = torch.cat([front, x, end], dim=1) x = self.avg(x.permute(0, 2, 1)) x = x.permute(0, 2, 1) return x class SeriesDecomp(nn.Module): """Series decomposition block.""" def __init__(self, kernel_size): super(SeriesDecomp, self).__init__() self.moving_avg = MovingAvg(kernel_size, stride=1) def forward(self, x): moving_mean = self.moving_avg(x) res = x - moving_mean return res, moving_mean def get_inputs(): return [torch.rand([4, 2, 4])] def get_init_inputs(): return [[], {'kernel_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.fft assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x0 = xindex % 4 x2 = xindex // 16 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 8 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 3, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (x0 + 4 * (-1 + x1) + 8 * x2), tmp9 & xmask, other=0.0) tmp11 = tmp0 >= tmp7 tl.full([1], 4, tl.int64) tmp14 = tl.load(in_ptr0 + (4 + x0 + 8 * x2), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp10, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + x3, tmp16, xmask) @triton.jit def triton_poi_fused_avg_pool2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_sub_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 8 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 - tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 2, 4), (8, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) triton_poi_fused_avg_pool2d_1[grid(16)](buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf0 buf2 = empty_strided_cuda((4, 2, 4), (8, 4, 1), torch.float32) triton_poi_fused_sub_2[grid(32)](arg0_1, buf1, buf2, 32, XBLOCK=32, num_warps=1, num_stages=1) del arg0_1 return buf2, reinterpret_tensor(buf1, (4, 1, 4), (4, 4, 1), 0) class MovingAvg(nn.Module): """Moving average block to highlight the trend of time series.""" def __init__(self, kernel_size, stride): super(MovingAvg, self).__init__() self.kernel_size = kernel_size self.avg = nn.AvgPool1d(kernel_size=kernel_size, stride=stride, padding=0) def forward(self, x): front = x[:, 0:1, :].repeat(1, (self.kernel_size - 1) // 2, 1) end = x[:, -1:, :].repeat(1, (self.kernel_size - 1) // 2, 1) x = torch.cat([front, x, end], dim=1) x = self.avg(x.permute(0, 2, 1)) x = x.permute(0, 2, 1) return x class SeriesDecompNew(nn.Module): """Series decomposition block.""" def __init__(self, kernel_size): super(SeriesDecompNew, self).__init__() self.moving_avg = MovingAvg(kernel_size, stride=1) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0], output[1]
jianzhnie/TsFormer
SeriesDecomp
false
3,732
[ "Apache-2.0" ]
0
47e362f02445ba00d5ab8db206667767e72faca7
https://github.com/jianzhnie/TsFormer/tree/47e362f02445ba00d5ab8db206667767e72faca7
WSLinear
import torch from torch import nn import torch.utils.data class WSLinear(nn.Module): def __init__(self, in_features, out_features, gain=2): super(WSLinear, self).__init__() self.linear = nn.Linear(in_features, out_features) self.scale = (gain / in_features) ** 0.5 self.bias = self.linear.bias self.linear.bias = None nn.init.normal_(self.linear.weight) nn.init.zeros_(self.bias) def forward(self, x): return self.linear(x * self.scale) + self.bias def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.7071067811865476 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1) del primals_2 buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 triton_poi_fused_add_1[grid(256)](buf2, primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 return buf2, reinterpret_tensor(buf0, (64, 4), (4, 1), 0) class WSLinearNew(nn.Module): def __init__(self, in_features, out_features, gain=2): super(WSLinearNew, self).__init__() self.linear = nn.Linear(in_features, out_features) self.scale = (gain / in_features) ** 0.5 self.bias = self.linear.bias self.linear.bias = None nn.init.normal_(self.linear.weight) nn.init.zeros_(self.bias) def forward(self, input_0): primals_3 = self.bias primals_2 = self.linear.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
jiazhi412/Machine-Learning-Collection
WSLinear
false
3,733
[ "MIT" ]
0
1c30faf1e27a79eeca966c017e956df8f7f6ef17
https://github.com/jiazhi412/Machine-Learning-Collection/tree/1c30faf1e27a79eeca966c017e956df8f7f6ef17
LatentLoss
import torch from torch import Tensor import torch.nn as nn class LatentLoss(nn.Module): def forward(self, mu: 'Tensor', logvar: 'Tensor') ->Tensor: loss = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_exp_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp0 + tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 - tmp4 tmp6 = tl_math.exp(tmp0) tmp7 = tmp5 - tmp6 tmp8 = tl.broadcast_to(tmp7, [RBLOCK]) tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0)) tmp11 = -0.5 tmp12 = tmp10 * tmp11 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_exp_mul_pow_sub_sum_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class LatentLossNew(nn.Module): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
jinyeom/vae
LatentLoss
false
3,734
[ "MIT" ]
0
861cb2edd5cebc9f56c2677d7b79f5ab0a05f874
https://github.com/jinyeom/vae/tree/861cb2edd5cebc9f56c2677d7b79f5ab0a05f874
DotProductSimilarity
import math import torch import torch.nn as nn class SimilarityFunction(nn.Module): """ A ``SimilarityFunction`` takes a pair of tensors with the same shape, and computes a similarity function on the vectors in the last dimension. For example, the tensors might both have shape `(batch_size, sentence_length, embedding_dim)`, and we will compute some function of the two vectors of length `embedding_dim` for each position `(batch_size, sentence_length)`, returning a tensor of shape `(batch_size, sentence_length)`. The similarity function could be as simple as a dot product, or it could be a more complex, parameterized function. """ default_implementation = 'dot_product' def forward(self, tensor_1: 'torch.Tensor', tensor_2: 'torch.Tensor' ) ->torch.Tensor: """ Takes two tensors of the same shape, such as ``(batch_size, length_1, length_2, embedding_dim)``. Computes a (possibly parameterized) similarity on the final dimension and returns a tensor with one less dimension, such as ``(batch_size, length_1, length_2)``. """ raise NotImplementedError class DotProductSimilarity(SimilarityFunction): """ This similarity function simply computes the dot product between each pair of vectors, with an optional scaling to reduce the variance of the output elements. Parameters ---------- scale_output : ``bool``, optional If ``True``, we will scale the output by ``math.sqrt(tensor.size(-1))``, to reduce the variance in the result. """ def __init__(self, scale_output: 'bool'=False) ->None: super(DotProductSimilarity, self).__init__() self._scale_output = scale_output def forward(self, tensor_1: 'torch.Tensor', tensor_2: 'torch.Tensor' ) ->torch.Tensor: result = (tensor_1 * tensor_2).sum(dim=-1) if self._scale_output: result *= math.sqrt(tensor_1.size(-1)) return result def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + x0, tmp14, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sum_0[grid(64)](arg0_1, arg1_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 return buf0, class SimilarityFunction(nn.Module): """ A ``SimilarityFunction`` takes a pair of tensors with the same shape, and computes a similarity function on the vectors in the last dimension. For example, the tensors might both have shape `(batch_size, sentence_length, embedding_dim)`, and we will compute some function of the two vectors of length `embedding_dim` for each position `(batch_size, sentence_length)`, returning a tensor of shape `(batch_size, sentence_length)`. The similarity function could be as simple as a dot product, or it could be a more complex, parameterized function. """ default_implementation = 'dot_product' def forward(self, tensor_1: 'torch.Tensor', tensor_2: 'torch.Tensor' ) ->torch.Tensor: """ Takes two tensors of the same shape, such as ``(batch_size, length_1, length_2, embedding_dim)``. Computes a (possibly parameterized) similarity on the final dimension and returns a tensor with one less dimension, such as ``(batch_size, length_1, length_2)``. """ raise NotImplementedError class DotProductSimilarityNew(SimilarityFunction): """ This similarity function simply computes the dot product between each pair of vectors, with an optional scaling to reduce the variance of the output elements. Parameters ---------- scale_output : ``bool``, optional If ``True``, we will scale the output by ``math.sqrt(tensor.size(-1))``, to reduce the variance in the result. """ def __init__(self, scale_output: 'bool'=False) ->None: super(DotProductSimilarityNew, self).__init__() self._scale_output = scale_output def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
immrz/qagnn
DotProductSimilarity
false
3,735
[ "MIT" ]
0
0e695c6fcbefcf25da25c056c0bea1940b3e0f2b
https://github.com/immrz/qagnn/tree/0e695c6fcbefcf25da25c056c0bea1940b3e0f2b
MultiHeadAttention
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data class ScaledDotProductAttention(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropout) def forward(self, q, k, v, mask=None): attn = torch.matmul(q / self.temperature, k.transpose(2, 3)) if mask is not None: attn = attn.masked_fill(mask == 0, -1000000000.0) attn = self.dropout(F.softmax(attn, dim=-1)) output = torch.matmul(attn, v) return output, attn class MultiHeadAttention(nn.Module): """ Multi-Head Attention module """ def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1): super().__init__() self.n_head = n_head self.d_k = d_k self.d_v = d_v self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False) self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False) self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False) self.fc = nn.Linear(n_head * d_v, d_model, bias=False) self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(d_model, eps=1e-06) def init_context(self, context): self.context = context def forward(self, q, k, v, mask=None): q = q.transpose(0, 1) k = k.transpose(0, 1) v = v.transpose(0, 1) d_k, d_v, n_head = self.d_k, self.d_v, self.n_head sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1) residual = q q = self.layer_norm(q) q = self.w_qs(q).view(sz_b, len_q, n_head, d_k) k = self.w_ks(k).view(sz_b, len_k, n_head, d_k) v = self.w_vs(v).view(sz_b, len_v, n_head, d_v) q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2) if mask is not None: mask = mask.unsqueeze(1) output, attn = self.attention(q, k, v, mask=mask) output = output.transpose(1, 2).contiguous().view(sz_b, len_q, -1) output = self.dropout(self.fc(output)) output += residual return output, attn def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'n_head': 4, 'd_model': 4, 'd_k': 4, 'd_v': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.nn.functional as F import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-06 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex // 4 x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x0 + 4 * x2 + 16 * x1), tmp8, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_clone_div_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x4, tmp2, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tl.store(out_ptr0 + x4, tmp0, xmask) @triton.jit def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (16, 4), (4, 1)) assert_size_stride(primals_7, (16, 4), (4, 1)) assert_size_stride(primals_8, (16, 4), (4, 1)) assert_size_stride(primals_9, (4, 16), (16, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (1, 4, 16), torch.float32) buf1 = empty_strided_cuda((4, 4, 1), (1, 4, 16), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(16)](primals_1, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(64)](primals_1, buf0, buf1, primals_4, primals_5, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf0 del buf1 del primals_4 del primals_5 buf3 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 16), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_2[grid(64)](primals_2, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf5 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 16), (1, 4), 0), out=buf5) del primals_7 buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_2[grid(64)](primals_3, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 buf7 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 16), (1, 4), 0), out=buf7) del primals_8 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_div_3[grid(256)](buf3, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) buf9 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf3 triton_poi_fused_clone_4[grid(64, 4)](buf5, buf9, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) buf10 = reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0) del buf5 extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), out=buf10) buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_5[grid(256)](buf10, buf11, 256, XBLOCK= 256, num_warps=4, num_stages=1) buf12 = reinterpret_tensor(buf10, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf10 triton_poi_fused__softmax_6[grid(256)](buf11, buf12, 256, XBLOCK= 128, num_warps=4, num_stages=1) buf13 = buf11 del buf11 triton_poi_fused_clone_7[grid(256)](buf7, buf13, 256, XBLOCK=256, num_warps=4, num_stages=1) buf14 = reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0) del buf7 extern_kernels.bmm(reinterpret_tensor(buf12, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf13, (16, 4, 4), (16, 4, 1), 0), out=buf14 ) buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_7[grid(256)](buf14, buf15, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf14 buf16 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf15, (16, 16), (16, 1), 0), reinterpret_tensor(primals_9, (16, 4), (1, 16), 0), out=buf16) buf17 = reinterpret_tensor(buf16, (4, 4, 4), (16, 4, 1), 0) del buf16 triton_poi_fused_add_8[grid(64)](buf17, primals_1, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf17, buf12, primals_1, reinterpret_tensor(buf2, (16, 4), (4, 1), 0 ), reinterpret_tensor(buf4, (16, 4), (4, 1), 0), reinterpret_tensor( buf6, (16, 4), (4, 1), 0), buf12, reinterpret_tensor(buf15, (16, 16 ), (16, 1), 0), primals_9, reinterpret_tensor(buf13, (16, 4, 4), ( 16, 1, 4), 0), reinterpret_tensor(buf8, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf9, (16, 4, 4), (16, 1, 4), 0), primals_6 class ScaledDotProductAttention(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropout) def forward(self, q, k, v, mask=None): attn = torch.matmul(q / self.temperature, k.transpose(2, 3)) if mask is not None: attn = attn.masked_fill(mask == 0, -1000000000.0) attn = self.dropout(F.softmax(attn, dim=-1)) output = torch.matmul(attn, v) return output, attn class MultiHeadAttentionNew(nn.Module): """ Multi-Head Attention module """ def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1): super().__init__() self.n_head = n_head self.d_k = d_k self.d_v = d_v self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False) self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False) self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False) self.fc = nn.Linear(n_head * d_v, d_model, bias=False) self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(d_model, eps=1e-06) def init_context(self, context): self.context = context def forward(self, input_0, input_1, input_2): primals_6 = self.w_qs.weight primals_7 = self.w_ks.weight primals_8 = self.w_vs.weight primals_9 = self.fc.weight primals_4 = self.layer_norm.weight primals_5 = self.layer_norm.bias primals_1 = input_0 primals_2 = input_1 primals_3 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0], output[1]
jiahuanluo/Global-Encoding
MultiHeadAttention
false
3,736
[ "MIT" ]
0
2adb01def9525588b3a75e6f2a5181a3a11464ed
https://github.com/jiahuanluo/Global-Encoding/tree/2adb01def9525588b3a75e6f2a5181a3a11464ed
NN
import torch from torch import nn import torch.nn.functional as F import torch.utils.data class NN(nn.Module): def __init__(self, input_size, num_classes): super(NN, self).__init__() self.fc1 = nn.Linear(input_size, 50) self.fc2 = nn.Linear(50, num_classes) def forward(self, x): """ x here is the mnist images and we run it through fc1, fc2 that we created above. we also add a ReLU activation function in between and for that (since it has no parameters) I recommend using nn.functional (F) """ x = F.relu(self.fc1(x)) x = self.fc2(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'num_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 3200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 50 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (50, 4), (4, 1)) assert_size_stride(primals_2, (50,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 50), (50, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 50), (50, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 50), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 50), (800, 200, 50, 1), 0) del buf0 buf3 = empty_strided_cuda((4, 4, 4, 50), (800, 200, 50, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(3200)](buf1, primals_2, buf3, 3200, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 50), (50, 1), 0), reinterpret_tensor(primals_4, (50, 4), (1, 50), 0), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 50), (50, 1), 0), primals_4, buf3 class NNNew(nn.Module): def __init__(self, input_size, num_classes): super(NNNew, self).__init__() self.fc1 = nn.Linear(input_size, 50) self.fc2 = nn.Linear(50, num_classes) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
jiazhi412/Machine-Learning-Collection
NN
false
3,737
[ "MIT" ]
0
1c30faf1e27a79eeca966c017e956df8f7f6ef17
https://github.com/jiazhi412/Machine-Learning-Collection/tree/1c30faf1e27a79eeca966c017e956df8f7f6ef17
DummyLayer
import torch import torch.nn as nn class DummyLayer(nn.Module): def __init__(self): super().__init__() self.dummy = nn.Parameter(torch.ones(1, dtype=torch.float)) def forward(self, x): return x + self.dummy - self.dummy def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tmp3 - tmp2 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_sub_0[grid(256)](primals_2, primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf0, class DummyLayerNew(nn.Module): def __init__(self): super().__init__() self.dummy = nn.Parameter(torch.ones(1, dtype=torch.float)) def forward(self, input_0): primals_1 = self.dummy primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
jishnujayakumar/specter
DummyLayer
false
3,738
[ "Apache-2.0" ]
0
40e3b5e538004b00b0955f17dd3d71fb1f96b922
https://github.com/jishnujayakumar/specter/tree/40e3b5e538004b00b0955f17dd3d71fb1f96b922
MatrixAttention
import math import torch import torch.nn as nn class SimilarityFunction(nn.Module): """ A ``SimilarityFunction`` takes a pair of tensors with the same shape, and computes a similarity function on the vectors in the last dimension. For example, the tensors might both have shape `(batch_size, sentence_length, embedding_dim)`, and we will compute some function of the two vectors of length `embedding_dim` for each position `(batch_size, sentence_length)`, returning a tensor of shape `(batch_size, sentence_length)`. The similarity function could be as simple as a dot product, or it could be a more complex, parameterized function. """ default_implementation = 'dot_product' def forward(self, tensor_1: 'torch.Tensor', tensor_2: 'torch.Tensor' ) ->torch.Tensor: """ Takes two tensors of the same shape, such as ``(batch_size, length_1, length_2, embedding_dim)``. Computes a (possibly parameterized) similarity on the final dimension and returns a tensor with one less dimension, such as ``(batch_size, length_1, length_2)``. """ raise NotImplementedError class DotProductSimilarity(SimilarityFunction): """ This similarity function simply computes the dot product between each pair of vectors, with an optional scaling to reduce the variance of the output elements. Parameters ---------- scale_output : ``bool``, optional If ``True``, we will scale the output by ``math.sqrt(tensor.size(-1))``, to reduce the variance in the result. """ def __init__(self, scale_output: 'bool'=False) ->None: super(DotProductSimilarity, self).__init__() self._scale_output = scale_output def forward(self, tensor_1: 'torch.Tensor', tensor_2: 'torch.Tensor' ) ->torch.Tensor: result = (tensor_1 * tensor_2).sum(dim=-1) if self._scale_output: result *= math.sqrt(tensor_1.size(-1)) return result class MatrixAttention(nn.Module): def __init__(self, similarity_function: 'SimilarityFunction'=None) ->None: super().__init__() self._similarity_function = (similarity_function or DotProductSimilarity()) def forward(self, matrix_1: 'torch.Tensor', matrix_2: 'torch.Tensor' ) ->torch.Tensor: tiled_matrix_1 = matrix_1.unsqueeze(2).expand(matrix_1.size()[0], matrix_1.size()[1], matrix_2.size()[1], matrix_1.size()[2]) tiled_matrix_2 = matrix_2.unsqueeze(1).expand(matrix_2.size()[0], matrix_1.size()[1], matrix_2.size()[1], matrix_2.size()[2]) return self._similarity_function(tiled_matrix_1, tiled_matrix_2) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 4 x0 = xindex % 4 x2 = xindex // 16 x4 = xindex tmp0 = tl.load(in_ptr0 + 4 * x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4 * x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x3), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0 + 16 * x2), xmask, eviction_policy ='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x3), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0 + 16 * x2), xmask, eviction_policy ='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + x4, tmp14, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sum_0[grid(64)](arg0_1, arg1_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 return buf0, class SimilarityFunction(nn.Module): """ A ``SimilarityFunction`` takes a pair of tensors with the same shape, and computes a similarity function on the vectors in the last dimension. For example, the tensors might both have shape `(batch_size, sentence_length, embedding_dim)`, and we will compute some function of the two vectors of length `embedding_dim` for each position `(batch_size, sentence_length)`, returning a tensor of shape `(batch_size, sentence_length)`. The similarity function could be as simple as a dot product, or it could be a more complex, parameterized function. """ default_implementation = 'dot_product' def forward(self, tensor_1: 'torch.Tensor', tensor_2: 'torch.Tensor' ) ->torch.Tensor: """ Takes two tensors of the same shape, such as ``(batch_size, length_1, length_2, embedding_dim)``. Computes a (possibly parameterized) similarity on the final dimension and returns a tensor with one less dimension, such as ``(batch_size, length_1, length_2)``. """ raise NotImplementedError class DotProductSimilarity(SimilarityFunction): """ This similarity function simply computes the dot product between each pair of vectors, with an optional scaling to reduce the variance of the output elements. Parameters ---------- scale_output : ``bool``, optional If ``True``, we will scale the output by ``math.sqrt(tensor.size(-1))``, to reduce the variance in the result. """ def __init__(self, scale_output: 'bool'=False) ->None: super(DotProductSimilarity, self).__init__() self._scale_output = scale_output def forward(self, tensor_1: 'torch.Tensor', tensor_2: 'torch.Tensor' ) ->torch.Tensor: result = (tensor_1 * tensor_2).sum(dim=-1) if self._scale_output: result *= math.sqrt(tensor_1.size(-1)) return result class MatrixAttentionNew(nn.Module): def __init__(self, similarity_function: 'SimilarityFunction'=None) ->None: super().__init__() self._similarity_function = (similarity_function or DotProductSimilarity()) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
immrz/qagnn
MatrixAttention
false
3,739
[ "MIT" ]
0
0e695c6fcbefcf25da25c056c0bea1940b3e0f2b
https://github.com/immrz/qagnn/tree/0e695c6fcbefcf25da25c056c0bea1940b3e0f2b
BinaryLoss
import torch import torch.nn as nn import torch.nn.functional as F class BinaryLoss(nn.Module): """ Computes contrastive loss[1, 2] twice, one time for the distance between query and positive example, and another for the distance between query and negative example. Both use l2-distance. [1] http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf, equation 4 [2] https://gist.github.com/harveyslash/725fcc68df112980328951b3426c0e0b#file-contrastive-loss-py """ def __init__(self, margin=1.0): """ Args: margin: margin (float, optional): Default: `1.0`. """ super(BinaryLoss, self).__init__() self.margin = margin def forward(self, query, positive, negative): distance_positive = F.pairwise_distance(query, positive) distance_negative = F.pairwise_distance(query, negative) return torch.pow(distance_positive, 2) + torch.pow(torch.clamp(self .margin - distance_negative, min=0.0), 2) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_clamp_norm_pow_rsub_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp26 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp35 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp40 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 - tmp1 tmp3 = 1e-06 tmp4 = tmp2 + tmp3 tmp5 = tmp4 * tmp4 tmp8 = tmp6 - tmp7 tmp9 = tmp8 + tmp3 tmp10 = tmp9 * tmp9 tmp11 = tmp5 + tmp10 tmp14 = tmp12 - tmp13 tmp15 = tmp14 + tmp3 tmp16 = tmp15 * tmp15 tmp17 = tmp11 + tmp16 tmp20 = tmp18 - tmp19 tmp21 = tmp20 + tmp3 tmp22 = tmp21 * tmp21 tmp23 = tmp17 + tmp22 tmp24 = libdevice.sqrt(tmp23) tmp25 = tmp24 * tmp24 tmp27 = tmp0 - tmp26 tmp28 = tmp27 + tmp3 tmp29 = tmp28 * tmp28 tmp31 = tmp6 - tmp30 tmp32 = tmp31 + tmp3 tmp33 = tmp32 * tmp32 tmp34 = tmp29 + tmp33 tmp36 = tmp12 - tmp35 tmp37 = tmp36 + tmp3 tmp38 = tmp37 * tmp37 tmp39 = tmp34 + tmp38 tmp41 = tmp18 - tmp40 tmp42 = tmp41 + tmp3 tmp43 = tmp42 * tmp42 tmp44 = tmp39 + tmp43 tmp45 = libdevice.sqrt(tmp44) tmp46 = 1.0 tmp47 = tmp46 - tmp45 tmp48 = 0.0 tmp49 = triton_helpers.maximum(tmp47, tmp48) tmp50 = tmp49 * tmp49 tmp51 = tmp25 + tmp50 tl.store(out_ptr0 + x0, tmp51, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_clamp_norm_pow_rsub_sub_0[grid(64)](arg1_1, arg0_1, arg2_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf0, class BinaryLossNew(nn.Module): """ Computes contrastive loss[1, 2] twice, one time for the distance between query and positive example, and another for the distance between query and negative example. Both use l2-distance. [1] http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf, equation 4 [2] https://gist.github.com/harveyslash/725fcc68df112980328951b3426c0e0b#file-contrastive-loss-py """ def __init__(self, margin=1.0): """ Args: margin: margin (float, optional): Default: `1.0`. """ super(BinaryLossNew, self).__init__() self.margin = margin def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
jishnujayakumar/specter
BinaryLoss
false
3,740
[ "Apache-2.0" ]
0
40e3b5e538004b00b0955f17dd3d71fb1f96b922
https://github.com/jishnujayakumar/specter/tree/40e3b5e538004b00b0955f17dd3d71fb1f96b922
QNetwork
import torch import torch.nn.functional as F import torch.nn as nn class QNetwork(nn.Module): """Actor (Policy) Model.""" def __init__(self, state_size, action_size, seed): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed """ super(QNetwork, self).__init__() self.seed = torch.manual_seed(seed) self.fc1 = nn.Linear(in_features=state_size, out_features=128) self.fc2 = nn.Linear(in_features=128, out_features=64) self.fc3 = nn.Linear(in_features=64, out_features=32) self.fc4 = nn.Linear(in_features=32, out_features=action_size) def forward(self, state): """Build a network that maps state -> action values.""" x = F.relu(self.fc1(state)) x = F.relu(self.fc2(x)) x = F.relu(self.fc3(x)) x = self.fc4(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'state_size': 4, 'action_size': 4, 'seed': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (64, 128), (128, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (32, 64), (64, 1)) assert_size_stride(primals_7, (32,), (1,)) assert_size_stride(primals_8, (4, 32), (32, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf0 buf9 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf1, primals_2, buf9, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 64), (1, 128), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf2 buf8 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool ) triton_poi_fused_relu_threshold_backward_1[grid(4096)](buf3, primals_5, buf8, 4096, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 64), (64, 1), 0), reinterpret_tensor(primals_6, (64, 32), (1, 64), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 32), (512, 128, 32, 1), 0) del buf4 buf7 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(2048)](buf5, primals_7, buf7, 2048, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 32), (32, 1), 0), reinterpret_tensor(primals_8, (32, 4), (1, 32), 0), alpha=1, beta=1, out=buf6) del primals_9 return reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 128), (128, 1), 0 ), reinterpret_tensor(buf3, (64, 64), (64, 1), 0), reinterpret_tensor( buf5, (64, 32), (32, 1), 0 ), primals_8, buf7, primals_6, buf8, primals_4, buf9 class QNetworkNew(nn.Module): """Actor (Policy) Model.""" def __init__(self, state_size, action_size, seed): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed """ super(QNetworkNew, self).__init__() self.seed = torch.manual_seed(seed) self.fc1 = nn.Linear(in_features=state_size, out_features=128) self.fc2 = nn.Linear(in_features=128, out_features=64) self.fc3 = nn.Linear(in_features=64, out_features=32) self.fc4 = nn.Linear(in_features=32, out_features=action_size) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_8 = self.fc4.weight primals_9 = self.fc4.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
jibin-liu/deep-reinforcement-learning
QNetwork
false
3,741
[ "MIT" ]
0
2a91a66a931e891d08cd1af95da973a522381b52
https://github.com/jibin-liu/deep-reinforcement-learning/tree/2a91a66a931e891d08cd1af95da973a522381b52
AdaIN
import torch from torch import nn import torch.utils.data class WSLinear(nn.Module): def __init__(self, in_features, out_features, gain=2): super(WSLinear, self).__init__() self.linear = nn.Linear(in_features, out_features) self.scale = (gain / in_features) ** 0.5 self.bias = self.linear.bias self.linear.bias = None nn.init.normal_(self.linear.weight) nn.init.zeros_(self.bias) def forward(self, x): return self.linear(x * self.scale) + self.bias class AdaIN(nn.Module): def __init__(self, channels, w_dim): super().__init__() self.instance_norm = nn.InstanceNorm2d(channels) self.style_scale = WSLinear(w_dim, channels) self.style_bias = WSLinear(w_dim, channels) def forward(self, x, w): x = self.instance_norm(x) style_scale = self.style_scale(w).unsqueeze(2).unsqueeze(3) style_bias = self.style_bias(w).unsqueeze(2).unsqueeze(3) return style_scale * x + style_bias def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4, 'w_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused__native_batch_norm_legit_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 16.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp21, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.7071067811865476 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_add_mul_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex // 256 x4 = xindex % 16 x0 = xindex % 4 x5 = xindex % 256 x2 = xindex // 16 % 16 x6 = xindex tmp0 = tl.load(in_ptr0 + (x4 + 16 * x3), None, eviction_policy='evict_last' ) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x5, None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr5 + (x4 + 16 * x3), None, eviction_policy='evict_last' ) tmp10 = tl.load(in_ptr6 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 - tmp4 tmp7 = tmp5 * tmp6 tmp8 = tmp2 * tmp7 tmp11 = tmp9 + tmp10 tmp12 = tmp8 + tmp11 tl.store(out_ptr0 + x6, tmp12, None) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32) buf1 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) buf3 = reinterpret_tensor(buf1, (1, 16, 1, 1), (16, 1, 1, 1), 0) del buf1 get_raw_stream(0) triton_per_fused__native_batch_norm_legit_0[grid(16)](buf3, primals_1, buf0, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_1[grid(256)](primals_2, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf5) del primals_3 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf6) del primals_5 buf7 = empty_strided_cuda((4, 4, 4, 4, 4, 4), (1024, 256, 64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_2[grid(4096)](buf5, primals_4, primals_1, buf0, buf3, buf6, primals_6, buf7, 4096, XBLOCK=128, num_warps= 4, num_stages=1) del buf5 del buf6 del primals_4 del primals_6 return buf7, primals_1, buf0, buf3, reinterpret_tensor(buf4, (64, 4), ( 4, 1), 0) class WSLinear(nn.Module): def __init__(self, in_features, out_features, gain=2): super(WSLinear, self).__init__() self.linear = nn.Linear(in_features, out_features) self.scale = (gain / in_features) ** 0.5 self.bias = self.linear.bias self.linear.bias = None nn.init.normal_(self.linear.weight) nn.init.zeros_(self.bias) def forward(self, x): return self.linear(x * self.scale) + self.bias class AdaINNew(nn.Module): def __init__(self, channels, w_dim): super().__init__() self.instance_norm = nn.InstanceNorm2d(channels) self.style_scale = WSLinear(w_dim, channels) self.style_bias = WSLinear(w_dim, channels) def forward(self, input_0, input_1): primals_4 = self.style_scale.bias primals_3 = self.style_scale.linear.weight primals_6 = self.style_bias.bias primals_5 = self.style_bias.linear.weight primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
jiazhi412/Machine-Learning-Collection
AdaIN
false
3,742
[ "MIT" ]
0
1c30faf1e27a79eeca966c017e956df8f7f6ef17
https://github.com/jiazhi412/Machine-Learning-Collection/tree/1c30faf1e27a79eeca966c017e956df8f7f6ef17
MatrixVectorScaledDotProductAttention
import torch import numpy as np import torch.nn as nn class MatrixVectorScaledDotProductAttention(nn.Module): def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropout) self.softmax = nn.Softmax(dim=-1) def forward(self, q, k, v, mask=None): """ q: tensor of shape (*, m, d_k) k: tensor of shape (*, l, d_k) v: tensor of shape (*, l, d_v) mask: None or tensor of shape (*, m, l) or (*, 1, l) returns: tensor of shape (*, m, d_v), tensor of shape(*, m, l) """ attn = torch.matmul(q, k.transpose(-2, -1)) attn = attn / self.temperature if mask is not None: attn = attn.masked_fill(mask, -np.inf) attn = self.softmax(attn) attn = self.dropout(attn) output = torch.matmul(attn, v) return output, attn def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'temperature': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.25 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(arg0_1, (16, 4, 4), (16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 triton_poi_fused__softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) buf3 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0) del buf1 extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf3 ) del arg2_1 return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf2 class MatrixVectorScaledDotProductAttentionNew(nn.Module): def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropout) self.softmax = nn.Softmax(dim=-1) def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0], output[1]
immrz/qagnn
MatrixVectorScaledDotProductAttention
false
3,743
[ "MIT" ]
0
0e695c6fcbefcf25da25c056c0bea1940b3e0f2b
https://github.com/immrz/qagnn/tree/0e695c6fcbefcf25da25c056c0bea1940b3e0f2b
Qnet
import random import torch import torch.nn as nn import torch.nn.functional as F class Qnet(nn.Module): def __init__(self): super(Qnet, self).__init__() self.fc1 = nn.Linear(4, 128) self.fc2 = nn.Linear(128, 128) self.fc3 = nn.Linear(128, 2) def forward(self, x): x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x def sample_action(self, obs, epsilon): out = self.forward(obs) coin = random.random() if coin < epsilon: return random.randint(0, 1) else: return out.argmax().item() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import random import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (128, 128), (128, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (2, 128), (128, 1)) assert_size_stride(primals_7, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf0 buf6 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf1, primals_2, buf6, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 128), (1, 128), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf2 buf5 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf3, primals_5, buf5, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 128), (128, 1), 0), reinterpret_tensor(primals_6, (128, 2), (1, 128), 0), alpha=1, beta=1, out=buf4) del primals_7 return reinterpret_tensor(buf4, (4, 4, 4, 2), (32, 8, 2, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 128), (128, 1), 0 ), reinterpret_tensor(buf3, (64, 128), (128, 1), 0 ), primals_6, buf5, primals_4, buf6 class QnetNew(nn.Module): def __init__(self): super(QnetNew, self).__init__() self.fc1 = nn.Linear(4, 128) self.fc2 = nn.Linear(128, 128) self.fc3 = nn.Linear(128, 2) def sample_action(self, obs, epsilon): out = self.forward(obs) coin = random.random() if coin < epsilon: return random.randint(0, 1) else: return out.argmax().item() def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
jinPrelude/minimalRL
Qnet
false
3,744
[ "MIT" ]
0
4eba82feac15bb29f4ad715c6c8fd7b11426b840
https://github.com/jinPrelude/minimalRL/tree/4eba82feac15bb29f4ad715c6c8fd7b11426b840
ChannelMaxPool
import torch import torch.nn as nn import torch.nn.functional as F class ChannelMaxPool(nn.MaxPool1d): def forward(self, input): n, c, w, h = input.size() input = input.view(n, c, w * h).permute(0, 2, 1) pooled = F.max_pool1d(input, self.kernel_size, self.stride, self. padding, self.dilation, self.ceil_mode, self.return_indices) return pooled.permute(0, 2, 1).view(n, 1, w, h) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'kernel_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 64, 64), torch.float32 ) get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 1, 4, 4), (16, 16, 4, 1), 0), class ChannelMaxPoolNew(nn.MaxPool1d): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
joeization/CycleGAN
ChannelMaxPool
false
3,745
[ "MIT" ]
0
9635c8e3a7b1634b2e2eb5b5299f03a4e0786868
https://github.com/joeization/CycleGAN/tree/9635c8e3a7b1634b2e2eb5b5299f03a4e0786868
Policy
import torch import torch.nn as nn import torch.nn.functional as F from torch.distributions import Categorical class Policy(nn.Module): def __init__(self, s_size=4, h_size=16, a_size=2): super(Policy, self).__init__() self.fc1 = nn.Linear(s_size, h_size) self.fc2 = nn.Linear(h_size, a_size) def forward(self, x): x = F.relu(self.fc1(x)) x = self.fc2(x) return F.softmax(x, dim=1) def act(self, state): state = torch.from_numpy(state).float().unsqueeze(0) probs = self.forward(state).cpu() m = Categorical(probs) action = m.sample() return action.item(), m.log_prob(action) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn from torch.distributions import Categorical assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 8 x2 = xindex // 32 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (8 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (16 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (24 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 8 x2 = xindex // 32 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (8 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (16 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (24 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (16, 4), (4, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (2, 16), (16, 1)) assert_size_stride(primals_5, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 16), (256, 64, 16, 1), 0) del buf0 buf5 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(1024)](buf1, primals_2, buf5, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 16), (16, 1), 0), reinterpret_tensor(primals_4, (16, 2), (1, 16), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32) triton_poi_fused__softmax_1[grid(128)](buf2, buf3, 128, XBLOCK=128, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 2), (32, 8, 2, 1), 0) del buf2 triton_poi_fused__softmax_2[grid(128)](buf3, buf4, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf3 return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 16), (16, 1), 0 ), buf4, primals_4, buf5 class PolicyNew(nn.Module): def __init__(self, s_size=4, h_size=16, a_size=2): super(PolicyNew, self).__init__() self.fc1 = nn.Linear(s_size, h_size) self.fc2 = nn.Linear(h_size, a_size) def act(self, state): state = torch.from_numpy(state).float().unsqueeze(0) probs = self.forward(state).cpu() m = Categorical(probs) action = m.sample() return action.item(), m.log_prob(action) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
jiruifu-jerry0219/DRLND_Jerry
Policy
false
3,746
[ "MIT" ]
0
6a342f99119d466f8ae96202452b034f1a2e70e1
https://github.com/jiruifu-jerry0219/DRLND_Jerry/tree/6a342f99119d466f8ae96202452b034f1a2e70e1
SelfAttention
import torch from torch import nn import torch.utils.data class SelfAttention(nn.Module): def __init__(self, embed_size, heads): super(SelfAttention, self).__init__() self.embed_size = embed_size self.heads = heads self.head_dim = embed_size // heads assert self.head_dim * heads == embed_size, 'Embedding size needs to be divisible by heads' self.values = nn.Linear(self.head_dim, self.head_dim, bias=False) self.keys = nn.Linear(self.head_dim, self.head_dim, bias=False) self.queries = nn.Linear(self.head_dim, self.head_dim, bias=False) self.fc_out = nn.Linear(heads * self.head_dim, embed_size) def forward(self, values, keys, query, mask): N = query.shape[0] value_len, key_len, query_len = values.shape[1], keys.shape[1 ], query.shape[1] values = values.reshape(N, value_len, self.heads, self.head_dim) keys = keys.reshape(N, key_len, self.heads, self.head_dim) query = query.reshape(N, query_len, self.heads, self.head_dim) values = self.values(values) keys = self.keys(keys) queries = self.queries(query) energy = torch.einsum('nqhd,nkhd->nhqk', [queries, keys]) if mask is not None: energy = energy.masked_fill(mask == 0, float('-1e20')) attention = torch.softmax(energy / self.embed_size ** (1 / 2), dim=3) out = torch.einsum('nhql,nlhd->nqhd', [attention, values]).reshape(N, query_len, self.heads * self.head_dim) out = self.fc_out(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 1]), torch.rand([4, 4, 4, 1]), torch.rand( [4, 4, 4, 1]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'embed_size': 4, 'heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_eq_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_masked_fill_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl. constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (4 * x2 + 16 * y3), xmask & ymask, eviction_policy='evict_last').to(tl.int1) tmp1 = tl.load(in_ptr1 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (y0 + 16 * y1), ymask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (1 + 4 * x2 + 16 * y3), xmask & ymask, eviction_policy='evict_last').to(tl.int1) tmp9 = tl.load(in_ptr2 + (4 + y0 + 16 * y1), ymask, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr0 + (2 + 4 * x2 + 16 * y3), xmask & ymask, eviction_policy='evict_last').to(tl.int1) tmp15 = tl.load(in_ptr2 + (8 + y0 + 16 * y1), ymask, eviction_policy= 'evict_last') tmp20 = tl.load(in_ptr0 + (3 + 4 * x2 + 16 * y3), xmask & ymask, eviction_policy='evict_last').to(tl.int1) tmp21 = tl.load(in_ptr2 + (12 + y0 + 16 * y1), ymask, eviction_policy= 'evict_last') tmp3 = tmp1 * tmp2 tmp4 = -1.0000000200408773e+20 tmp5 = tl.where(tmp0, tmp4, tmp3) tmp6 = 1.0 tmp7 = tmp5 * tmp6 tmp10 = tmp1 * tmp9 tmp11 = tl.where(tmp8, tmp4, tmp10) tmp12 = tmp11 * tmp6 tmp13 = triton_helpers.maximum(tmp7, tmp12) tmp16 = tmp1 * tmp15 tmp17 = tl.where(tmp14, tmp4, tmp16) tmp18 = tmp17 * tmp6 tmp19 = triton_helpers.maximum(tmp13, tmp18) tmp22 = tmp1 * tmp21 tmp23 = tl.where(tmp20, tmp4, tmp22) tmp24 = tmp23 * tmp6 tmp25 = triton_helpers.maximum(tmp19, tmp24) tmp26 = tmp7 - tmp25 tmp27 = 0.5 tmp28 = tmp26 * tmp27 tmp29 = tl_math.exp(tmp28) tmp30 = tmp12 - tmp25 tmp31 = tmp30 * tmp27 tmp32 = tl_math.exp(tmp31) tmp33 = tmp29 + tmp32 tmp34 = tmp18 - tmp25 tmp35 = tmp34 * tmp27 tmp36 = tl_math.exp(tmp35) tmp37 = tmp33 + tmp36 tmp38 = tmp24 - tmp25 tmp39 = tmp38 * tmp27 tmp40 = tl_math.exp(tmp39) tmp41 = tmp37 + tmp40 tl.store(out_ptr0 + (x2 + 4 * y3), tmp25, xmask & ymask) tl.store(out_ptr1 + (x2 + 4 * y3), tmp41, xmask & ymask) @triton.jit def triton_poi_fused__softmax_masked_fill_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x0 = xindex % 4 x5 = xindex // 4 tmp0 = tl.load(in_ptr0 + x4, xmask).to(tl.int1) tmp1 = tl.load(in_ptr1 + (x2 + 4 * x1 + 16 * x3), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (x2 + 4 * x0 + 16 * x3), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + x5, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr4 + x5, xmask, eviction_policy='evict_last') tmp3 = tmp1 * tmp2 tmp4 = -1.0000000200408773e+20 tmp5 = tl.where(tmp0, tmp4, tmp3) tmp6 = 1.0 tmp7 = tmp5 * tmp6 tmp9 = tmp7 - tmp8 tmp10 = 0.5 tmp11 = tmp9 * tmp10 tmp12 = tl_math.exp(tmp11) tmp14 = tmp12 / tmp13 tl.store(out_ptr0 + x4, tmp14, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 1), (16, 4, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 1), (16, 4, 1, 1)) assert_size_stride(primals_3, (4, 4, 4, 1), (16, 4, 1, 1)) assert_size_stride(primals_4, (1, 1), (1, 1)) assert_size_stride(primals_5, (1, 1), (1, 1)) assert_size_stride(primals_6, (1, 1), (1, 1)) assert_size_stride(primals_7, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 1), (1, 1), 0), primals_4, out=buf0) del primals_4 buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 1), (1, 1), 0), primals_5, out=buf1) del primals_5 buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 1), (1, 1), 0), primals_6, out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_eq_0[grid(256)](primals_7, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_masked_fill_1[grid(16, 4)](buf3, buf2, buf1, buf4, buf5, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_masked_fill_2[grid(256)](buf3, buf2, buf1, buf4, buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) buf7 = reinterpret_tensor(buf5, (4, 4, 4, 1, 1), (16, 4, 1, 1, 1), 0) del buf5 triton_poi_fused_clone_3[grid(16, 4)](buf0, buf7, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf8 = reinterpret_tensor(buf0, (16, 4, 1), (4, 1, 1), 0) del buf0 extern_kernels.bmm(reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf7, (16, 4, 1), (4, 1, 0), 0), out=buf8) buf9 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0) del buf4 triton_poi_fused_clone_3[grid(16, 4)](buf8, buf9, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf10 = reinterpret_tensor(buf8, (16, 4), (4, 1), 0) del buf8 extern_kernels.mm(reinterpret_tensor(buf9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf10) buf11 = reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0) del buf10 triton_poi_fused_add_4[grid(64)](buf11, primals_9, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_9 return buf11, reinterpret_tensor(primals_2, (64, 1), (1, 1), 0 ), reinterpret_tensor(primals_3, (64, 1), (1, 1), 0 ), buf1, reinterpret_tensor(primals_1, (64, 1), (1, 1), 0 ), buf2, buf3, buf6, reinterpret_tensor(buf9, (16, 4), (4, 1), 0 ), primals_8, reinterpret_tensor(buf7, (16, 1, 4), (4, 1, 1), 0) class SelfAttentionNew(nn.Module): def __init__(self, embed_size, heads): super(SelfAttentionNew, self).__init__() self.embed_size = embed_size self.heads = heads self.head_dim = embed_size // heads assert self.head_dim * heads == embed_size, 'Embedding size needs to be divisible by heads' self.values = nn.Linear(self.head_dim, self.head_dim, bias=False) self.keys = nn.Linear(self.head_dim, self.head_dim, bias=False) self.queries = nn.Linear(self.head_dim, self.head_dim, bias=False) self.fc_out = nn.Linear(heads * self.head_dim, embed_size) def forward(self, input_0, input_1, input_2, input_3): primals_4 = self.values.weight primals_5 = self.keys.weight primals_6 = self.queries.weight primals_8 = self.fc_out.weight primals_9 = self.fc_out.bias primals_1 = input_0 primals_2 = input_1 primals_3 = input_2 primals_7 = input_3 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
jiazhi412/Machine-Learning-Collection
SelfAttention
false
3,747
[ "MIT" ]
0
1c30faf1e27a79eeca966c017e956df8f7f6ef17
https://github.com/jiazhi412/Machine-Learning-Collection/tree/1c30faf1e27a79eeca966c017e956df8f7f6ef17
GELU
import torch import numpy as np import torch.nn as nn class GELU(nn.Module): """Gaussian Error Linear Unit. Dan Hendrycks∗, Kevin Gimpel GAUSSIAN ERROR LINEAR UNITS (GELUS), 2016 Args: x: float Tensor to perform activation. Returns: `x` with the GELU activation applied. """ def forward(self, x): return 0.5 * x * (1 + torch.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * torch.pow(x, 3)))) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_pow_sqrt_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = tmp0 * tmp0 tmp4 = tmp3 * tmp0 tmp5 = 0.044715 tmp6 = tmp4 * tmp5 tmp7 = tmp0 + tmp6 tmp8 = 0.7978845608028654 tmp9 = tmp8 * tmp7 tmp10 = libdevice.tanh(tmp9) tmp11 = 1.0 tmp12 = tmp10 + tmp11 tmp13 = tmp2 * tmp12 tl.store(out_ptr0 + x0, tmp13, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_pow_sqrt_tanh_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class GELUNew(nn.Module): """Gaussian Error Linear Unit. Dan Hendrycks∗, Kevin Gimpel GAUSSIAN ERROR LINEAR UNITS (GELUS), 2016 Args: x: float Tensor to perform activation. Returns: `x` with the GELU activation applied. """ def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
joeization/CycleGAN
GELU
false
3,748
[ "MIT" ]
0
9635c8e3a7b1634b2e2eb5b5299f03a4e0786868
https://github.com/joeization/CycleGAN/tree/9635c8e3a7b1634b2e2eb5b5299f03a4e0786868
BertSelfOutput
from _paritybench_helpers import _mock_config import torch import torch.nn as nn import torch.utils.checkpoint class BertSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config. layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, layer_norm_eps=1, hidden_dropout_prob=0.5)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.checkpoint assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1.0 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_0[grid(256)](buf1, primals_2, primals_4, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 del primals_4 buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused_native_layer_norm_1[grid(64)](buf1, buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_2[grid(256)](buf1, buf2, buf3, primals_5, primals_6, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf2 del buf3 del primals_6 return buf4, primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1 class BertSelfOutputNew(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config. layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, input_0, input_1): primals_1 = self.dense.weight primals_2 = self.dense.bias primals_5 = self.LayerNorm.weight primals_6 = self.LayerNorm.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
Hzfinfdu/Black-Box-Tuning
BertSelfOutput
false
3,749
[ "MIT" ]
0
64eb5505875dc1b242c6f0a2a2f07e4000c24cb4
https://github.com/Hzfinfdu/Black-Box-Tuning/tree/64eb5505875dc1b242c6f0a2a2f07e4000c24cb4
NPRNNCell
import torch from torch import nn class NPRNNCell(nn.Module): def __init__(self, input_size, hidden_size, output_size, clip=2.0): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.clip = clip self.fc_in = nn.Linear(input_size, hidden_size) self.weight = nn.Parameter(torch.Tensor(hidden_size, hidden_size)) self.alpha = nn.Parameter(torch.Tensor(hidden_size, hidden_size)) self.norm = nn.LayerNorm(hidden_size) self.modulator = nn.Linear(hidden_size, 1) self.modfanout = nn.Linear(1, hidden_size) self.fc_out = nn.Linear(hidden_size, output_size) self.reset_parameters() def reset_parameters(self): nn.init.normal_(self.weight, std=0.001) nn.init.normal_(self.alpha, std=0.001) def forward(self, x, h_pre, hebb): weight = self.weight + self.alpha * hebb h_post = self.fc_in(x) + (h_pre.unsqueeze(1) @ weight).squeeze(1) h_post = torch.tanh(self.norm(h_post)) out = self.fc_out(h_post) m = torch.tanh(self.modulator(h_post)) eta = self.modfanout(m.unsqueeze(2)) delta = eta * (h_pre.unsqueeze(2) @ h_post.unsqueeze(1)) hebb = torch.clamp(hebb + delta, min=-self.clip, max=self.clip) return out, h_post, m, hebb def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x2 = xindex // 256 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x3 = xindex % 256 x4 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last') tmp3 = tmp1 * tmp2 tmp4 = tmp0 + tmp3 tl.store(out_ptr0 + x4, tmp4, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x2, tmp16, xmask) tl.store(out_ptr1 + x2, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_tanh_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 256 x4 = xindex x5 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x4, xmask) tmp3 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x5, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = libdevice.tanh(tmp13) tl.store(out_ptr0 + x4, tmp14, xmask) @triton.jit def triton_poi_fused_tanh_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = libdevice.tanh(tmp3) tl.store(in_out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_clone_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x2 = xindex // 64 % 16 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), None, eviction_policy='evict_last' ) tl.store(out_ptr0 + x4, tmp0, None) @triton.jit def triton_poi_fused_clone_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 256 x2 = xindex // 1024 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 256 * x2), None, eviction_policy= 'evict_last') tl.store(out_ptr0 + x3, tmp0, None) @triton.jit def triton_poi_fused_add_clamp_mul_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex % 256 x0 = xindex % 64 x2 = xindex // 256 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + 64 * x2), None, eviction_policy='evict_last' ) tmp2 = tl.load(in_ptr2 + x4, None) tmp3 = tmp1 * tmp2 tmp4 = tmp0 + tmp3 tmp5 = -2.0 tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp7 = 2.0 tmp8 = triton_helpers.minimum(tmp6, tmp7) tl.store(out_ptr0 + x4, tmp8, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_7, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (1, 4), (4, 1)) assert_size_stride(primals_13, (1,), (1,)) assert_size_stride(primals_14, (4, 1), (1, 1)) assert_size_stride(primals_15, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_4 del primals_5 buf1 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(1024)](primals_7, buf1, 1024, XBLOCK= 256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_poi_fused_clone_1[grid(1024)](primals_1, primals_2, primals_3, buf2, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 buf3 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf1, (64, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf2, (64, 4, 4), (16, 4, 1), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 256), torch.float32) buf5 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 256), torch.float32) triton_poi_fused_add_native_layer_norm_2[grid(256)](buf0, buf3, buf4, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) buf6 = buf2 del buf2 triton_poi_fused_add_native_layer_norm_tanh_3[grid(1024)](buf0, buf3, buf4, buf5, primals_8, primals_9, buf6, 1024, XBLOCK=128, num_warps=4, num_stages=1) del buf4 del primals_9 buf7 = empty_strided_cuda((256, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_11, reinterpret_tensor(buf6, (256, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf7) del primals_11 buf8 = reinterpret_tensor(buf5, (256, 1), (1, 1), 0) del buf5 extern_kernels.mm(reinterpret_tensor(buf6, (256, 4), (4, 1), 0), reinterpret_tensor(primals_12, (4, 1), (1, 4), 0), out=buf8) buf9 = reinterpret_tensor(buf8, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0) del buf8 triton_poi_fused_tanh_4[grid(256)](buf9, primals_13, 256, XBLOCK= 256, num_warps=4, num_stages=1) del primals_13 buf10 = empty_strided_cuda((256, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_15, reinterpret_tensor(buf9, (256, 1), (1, 1), 0), reinterpret_tensor(primals_14, (1, 4), (1, 1), 0), alpha=1, beta=1, out=buf10) del primals_15 buf11 = empty_strided_cuda((4, 4, 4, 4, 4, 4), (1024, 256, 64, 16, 4, 1), torch.float32) triton_poi_fused_clone_5[grid(4096)](primals_7, buf11, 4096, XBLOCK =256, num_warps=4, num_stages=1) del primals_7 buf12 = empty_strided_cuda((4, 4, 4, 4, 4, 4), (1024, 256, 64, 16, 4, 1), torch.float32) triton_poi_fused_clone_6[grid(4096)](buf6, buf12, 4096, XBLOCK=256, num_warps=4, num_stages=1) buf13 = empty_strided_cuda((256, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf11, (256, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf12, (256, 4, 4), (16, 4, 1), 0), out=buf13) buf14 = buf12 del buf12 triton_poi_fused_add_clamp_mul_7[grid(4096)](primals_3, buf10, buf13, buf14, 4096, XBLOCK=128, num_warps=4, num_stages=1) return reinterpret_tensor(buf7, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0 ), buf6, buf9, buf14, primals_3, primals_8, reinterpret_tensor( primals_6, (64, 4), (4, 1), 0 ), buf0, buf3, buf6, buf9, buf10, buf13, reinterpret_tensor(buf11, (256, 4, 4), (16, 1, 4), 0 ), primals_14, primals_12, primals_10, reinterpret_tensor(buf1, (64, 4, 4), (16, 1, 4), 0) class NPRNNCellNew(nn.Module): def __init__(self, input_size, hidden_size, output_size, clip=2.0): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.clip = clip self.fc_in = nn.Linear(input_size, hidden_size) self.weight = nn.Parameter(torch.Tensor(hidden_size, hidden_size)) self.alpha = nn.Parameter(torch.Tensor(hidden_size, hidden_size)) self.norm = nn.LayerNorm(hidden_size) self.modulator = nn.Linear(hidden_size, 1) self.modfanout = nn.Linear(1, hidden_size) self.fc_out = nn.Linear(hidden_size, output_size) self.reset_parameters() def reset_parameters(self): nn.init.normal_(self.weight, std=0.001) nn.init.normal_(self.alpha, std=0.001) def forward(self, input_0, input_1, input_2): primals_1 = self.weight primals_2 = self.alpha primals_4 = self.fc_in.weight primals_5 = self.fc_in.bias primals_8 = self.norm.weight primals_9 = self.norm.bias primals_12 = self.modulator.weight primals_13 = self.modulator.bias primals_14 = self.modfanout.weight primals_11 = self.modfanout.bias primals_10 = self.fc_out.weight primals_15 = self.fc_out.bias primals_3 = input_0 primals_6 = input_1 primals_7 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15]) return output[0], output[1], output[2], output[3]
jinyeom/ga-plastic-models
NPRNNCell
false
3,750
[ "MIT" ]
0
e38b245ae51c35a5f32679cc9f215463a3d58f1a
https://github.com/jinyeom/ga-plastic-models/tree/e38b245ae51c35a5f32679cc9f215463a3d58f1a
BlurPool2d
import torch import torch.nn as nn class BlurPool2d(nn.Sequential): """Blur Pooling Layer (MaxPool2d replacement) See: https://richzhang.github.io/antialiased-cnns/ Paper: https://arxiv.org/abs/1904.11486 """ __constants__ = ['in_features'] _blur_kernel = torch.tensor([[1 / 16, 2 / 16, 1 / 16], [2 / 16, 4 / 16, 2 / 16], [1 / 16, 2 / 16, 1 / 16]]) def __init__(self, in_features): """ Args: in_features (int): The number of channels in the input """ super().__init__() self.in_features = in_features self.add_module('maxpool', nn.MaxPool2d(2, stride=1)) blurpool = nn.Conv2d(in_features, in_features, kernel_size=3, padding=1, stride=2, bias=False, groups=in_features) blurpool.weight = torch.nn.Parameter(self._blur_kernel.repeat( in_features, 1, 1, 1), requires_grad=False) self.add_module('blurpool', blurpool) def forward(self, x): return super(BlurPool2d, self).forward(x) def extra_repr(self): return 'in_features={}'.format(self.in_features) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex % 3 x3 = xindex // 3 y4 = yindex x5 = xindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 4 * x3 + 16 * y4), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + x2 + 4 * x3 + 16 * y4), xmask & ymask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + x2 + 4 * x3 + 16 * y4), xmask & ymask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (5 + x2 + 4 * x3 + 16 * y4), xmask & ymask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + (y0 + 4 * x5 + 36 * y1), tmp6, xmask & ymask) @triton.jit def triton_poi_fused_convolution_max_pool2d_with_indices_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 1, 3, 3), (9, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 1, 12, 4), torch.float32) get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0[grid(16, 9)](arg0_1, buf0, 16, 9, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del arg0_1 buf1 = extern_kernels.convolution(buf0, arg1_1, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf1, (4, 4, 2, 2), (16, 1, 8, 4)) del arg1_1 del buf0 buf2 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) triton_poi_fused_convolution_max_pool2d_with_indices_1[grid(16, 4)]( buf1, buf2, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del buf1 return buf2, class BlurPool2dNew(nn.Sequential): """Blur Pooling Layer (MaxPool2d replacement) See: https://richzhang.github.io/antialiased-cnns/ Paper: https://arxiv.org/abs/1904.11486 """ __constants__ = ['in_features'] _blur_kernel = torch.tensor([[1 / 16, 2 / 16, 1 / 16], [2 / 16, 4 / 16, 2 / 16], [1 / 16, 2 / 16, 1 / 16]]) def __init__(self, in_features): """ Args: in_features (int): The number of channels in the input """ super().__init__() self.in_features = in_features self.add_module('maxpool', nn.MaxPool2d(2, stride=1)) blurpool = nn.Conv2d(in_features, in_features, kernel_size=3, padding=1, stride=2, bias=False, groups=in_features) blurpool.weight = torch.nn.Parameter(self._blur_kernel.repeat( in_features, 1, 1, 1), requires_grad=False) self.add_module('blurpool', blurpool) def extra_repr(self): return 'in_features={}'.format(self.in_features) def forward(self, input_0): arg1_1 = self.blurpool.weight arg0_1 = input_0 output = call([arg0_1, arg1_1]) return output[0]
johanofverstedt/comir
BlurPool2d
false
3,751
[ "MIT" ]
0
fced349ebe3a7bf07ac59e25f02ca4780796b041
https://github.com/johanofverstedt/comir/tree/fced349ebe3a7bf07ac59e25f02ca4780796b041
ChannelAvgPool
import torch import torch.nn as nn import torch.nn.functional as F class ChannelAvgPool(nn.AvgPool1d): def forward(self, input): n, c, w, h = input.size() input = input.view(n, c, w * h).permute(0, 2, 1) pooled = F.avg_pool1d(input, self.kernel_size, self.stride, self. padding, self.ceil_mode, self.count_include_pad) return pooled.permute(0, 2, 1).view(n, 1, w, h) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'kernel_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 64, 64), torch.float32 ) get_raw_stream(0) triton_poi_fused_avg_pool2d_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 1, 4, 4), (16, 16, 4, 1), 0), class ChannelAvgPoolNew(nn.AvgPool1d): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
joeization/CycleGAN
ChannelAvgPool
false
3,752
[ "MIT" ]
0
9635c8e3a7b1634b2e2eb5b5299f03a4e0786868
https://github.com/joeization/CycleGAN/tree/9635c8e3a7b1634b2e2eb5b5299f03a4e0786868
IndependentNACLayer
import collections import scipy import torch import numpy as np import torch.utils.data import scipy.stats import scipy.optimize def sparsity_error(W): W_error = torch.min(torch.abs(W), torch.abs(1 - torch.abs(W))) return torch.max(W_error) def nac_w_variance(r): """Calculates the variance of W. Asumming \\hat{w} and \\hat{m} are sampled from a uniform distribution with range [-r, r], this is the variance of w = tanh(\\hat{w})*sigmoid(\\hat{m}). """ if r == 0: return 0 else: return (1 - np.tanh(r) / r) * (r - np.tanh(r / 2)) * (1 / (2 * r)) def nac_w_optimal_r(fan_in, fan_out): """Computes the optimal Uniform[-r, r] given the fan This uses numerical optimization. TODO: consider if there is an algebraic solution. """ fan = max(fan_in + fan_out, 5) r = scipy.optimize.bisect(lambda r: fan * nac_w_variance(r) - 2, 0, 10) return r def nac_weight(w_hat, m_hat, mode='normal'): if mode == 'normal': return NACWeight.apply(w_hat, m_hat) elif mode == 'sign': return NACWeightSign.apply(w_hat, m_hat) elif mode == 'independent': return NACWeightIndependent.apply(w_hat, m_hat) class SummaryWriterNamespaceNoLoggingScope: def __init__(self, writer): self._writer = writer def __enter__(self): self._writer._logging_enabled = False def __exit__(self, type, value, traceback): self._writer._logging_enabled = True return False class DummySummaryWriter: def __init__(self, **kwargs): self._logging_enabled = False pass def add_scalar(self, name, value, verbose_only=True): pass def add_summary(self, name, tensor, verbose_only=True): pass def add_histogram(self, name, tensor, verbose_only=True): pass def add_tensor(self, name, tensor, verbose_only=True): pass def print(self, name, tensor, verbose_only=True): pass def namespace(self, name): return self def every(self, epoch_interval): return self def verbose(self, verbose): return self def no_logging(self): return SummaryWriterNamespaceNoLoggingScope(self) class NoRandomScope: def __init__(self, module): self._module = module def __enter__(self): self._module._disable_random() def __exit__(self, type, value, traceback): self._module._enable_random() return False class ExtendedTorchModule(torch.nn.Module): def __init__(self, default_name, *args, writer=None, name=None, **kwargs): super().__init__() if writer is None: writer = DummySummaryWriter() self.writer = writer.namespace(default_name if name is None else name) self.allow_random = True def set_parameter(self, name, value): parameter = getattr(self, name, None) if isinstance(parameter, torch.nn.Parameter): parameter.fill_(value) for module in self.children(): if isinstance(module, ExtendedTorchModule): module.set_parameter(name, value) def regualizer(self, merge_in=None): regualizers = collections.defaultdict(int) if merge_in is not None: for key, value in merge_in.items(): self.writer.add_scalar(f'regualizer/{key}', value) regualizers[key] += value for module in self.children(): if isinstance(module, ExtendedTorchModule): for key, value in module.regualizer().items(): regualizers[key] += value return regualizers def optimize(self, loss): for module in self.children(): if isinstance(module, ExtendedTorchModule): module.optimize(loss) def log_gradients(self): for name, parameter in self.named_parameters(recurse=False): if parameter.requires_grad: gradient, *_ = parameter.grad.data self.writer.add_summary(f'{name}/grad', gradient) self.writer.add_histogram(f'{name}/grad', gradient) for module in self.children(): if isinstance(module, ExtendedTorchModule): module.log_gradients() def no_internal_logging(self): return self.writer.no_logging() def _disable_random(self): self.allow_random = False for module in self.children(): if isinstance(module, ExtendedTorchModule): module._disable_random() def _enable_random(self): self.allow_random = True for module in self.children(): if isinstance(module, ExtendedTorchModule): module._enable_random() def no_random(self): return NoRandomScope(self) class RegualizerNAUZ: def __init__(self, zero=False): self.zero = zero self.stored_inputs = [] def __call__(self, W): if self.zero: return 0 x_mean = torch.mean(torch.cat(self.stored_inputs, dim=0), dim=0, keepdim=True) return torch.mean((1 - torch.abs(W)) * (0 - x_mean) ** 2) def append_input(self, x): if self.zero: return self.stored_inputs.append(x) def reset(self): if self.zero: return self.stored_inputs = [] class NACWeight(torch.autograd.Function): """Implements the NAC weight operator w = tanh(\\hat{w}) * sigmoid(\\hat{m}) """ @staticmethod def forward(ctx, w_hat, m_hat): tanh_w_hat = torch.tanh(w_hat) sigmoid_m_hat = torch.sigmoid(m_hat) ctx.save_for_backward(tanh_w_hat, sigmoid_m_hat) return tanh_w_hat * sigmoid_m_hat @staticmethod def backward(ctx, grad_output): tanh_w_hat, sigmoid_m_hat = ctx.saved_tensors return grad_output * (1 - tanh_w_hat * tanh_w_hat ) * sigmoid_m_hat, grad_output * tanh_w_hat * sigmoid_m_hat * ( 1 - sigmoid_m_hat) class NACWeightIndependent(torch.autograd.Function): """Implements the NAC weight operator but with independent optimization. The optimiation of \\hat{w} is independent of \\hat{m} and vice versa. w = tanh(\\hat{w}) * sigmoid(\\hat{m}) dL/d\\hat{w} = (dL/dw) (dw/d\\hat{w}) = (dL/dw) (1 - tanh(\\hat{w})^2) dL/d\\hat{m} = (dL/dw) (dw/d\\hat{m}) = (dL/dw) sigmoid(\\hat{m}) * (1 - sigmoid(\\hat{m})) """ @staticmethod def forward(ctx, w_hat, m_hat): tanh_w_hat = torch.tanh(w_hat) sigmoid_m_hat = torch.sigmoid(m_hat) ctx.save_for_backward(tanh_w_hat, sigmoid_m_hat) return tanh_w_hat * sigmoid_m_hat @staticmethod def backward(ctx, grad_output): tanh_w_hat, sigmoid_m_hat = ctx.saved_tensors return grad_output * (1 - tanh_w_hat * tanh_w_hat ), grad_output * sigmoid_m_hat * (1 - sigmoid_m_hat) class NACWeightSign(torch.autograd.Function): """Implements the NAC weight operator but with a hard gradient for \\hat{m} w = tanh(\\hat{w}) * sigmoid(\\hat{m}) dL/d\\hat{m} = (dL/dw) (dw/d\\hat{m}) = (dL/dw) * 0.1 * sign(\\hat{w}) * sigmoid(\\hat{m}) * (1 - sigmoid(\\hat{m})) """ @staticmethod def forward(ctx, w_hat, m_hat): tanh_w_hat = torch.tanh(w_hat) sigmoid_m_hat = torch.sigmoid(m_hat) ctx.save_for_backward(w_hat, tanh_w_hat, sigmoid_m_hat) return tanh_w_hat * sigmoid_m_hat @staticmethod def backward(ctx, grad_output): w_hat, tanh_w_hat, sigmoid_m_hat = ctx.saved_tensors return grad_output * (1 - tanh_w_hat * tanh_w_hat ) * sigmoid_m_hat, grad_output * 0.1 * torch.sign(w_hat ) * sigmoid_m_hat * (1 - sigmoid_m_hat) class NACLayer(ExtendedTorchModule): """Implements the NAC (Neural Accumulator) Arguments: in_features: number of ingoing features out_features: number of outgoing features """ def __init__(self, in_features, out_features, regualizer_z=0, **kwargs): super().__init__('nac', **kwargs) self.in_features = in_features self.out_features = out_features self.W_hat = torch.nn.Parameter(torch.Tensor(out_features, in_features) ) self.M_hat = torch.nn.Parameter(torch.Tensor(out_features, in_features) ) self.register_parameter('bias', None) self._regualizer_nau_z = RegualizerNAUZ(zero=regualizer_z == 0) def reset_parameters(self): r = nac_w_optimal_r(self.in_features, self.out_features) torch.nn.init.uniform_(self.W_hat, a=-r, b=r) torch.nn.init.uniform_(self.M_hat, a=-r, b=r) def optimize(self, loss): self._regualizer_nau_z.reset() def regualizer(self): W = nac_weight(self.W_hat, self.M_hat, mode='normal') return super().regualizer({'z': self._regualizer_nau_z(W)}) def forward(self, x, reuse=False): if self.allow_random: self._regualizer_nau_z.append_input(x) W = nac_weight(self.W_hat, self.M_hat, mode='normal') self.writer.add_histogram('W', W) self.writer.add_tensor('W', W) self.writer.add_scalar('W/sparsity_error', sparsity_error(W), verbose_only=False) return torch.nn.functional.linear(x, W, self.bias) def extra_repr(self): return 'in_features={}, out_features={}'.format(self.in_features, self.out_features) class IndependentNACLayer(NACLayer): def forward(self, input, reuse=False): W = nac_weight(self.W_hat, self.M_hat, mode='independent') self.writer.add_histogram('W', W) self.writer.add_tensor('W', W) self.writer.add_scalar('W/sparsity_error', sparsity_error(W), verbose_only=False) return torch.nn.functional.linear(input, W, self.bias) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import collections import scipy import numpy as np import torch.utils.data import scipy.stats import scipy.optimize assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_sigmoid_tanh_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tmp3 = tl.sigmoid(tmp2) tmp4 = tmp1 * tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sigmoid_tanh_0[grid(16)](primals_1, primals_2, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), out=buf1) del buf0 return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_1, primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0) def sparsity_error(W): W_error = torch.min(torch.abs(W), torch.abs(1 - torch.abs(W))) return torch.max(W_error) def nac_w_variance(r): """Calculates the variance of W. Asumming \\hat{w} and \\hat{m} are sampled from a uniform distribution with range [-r, r], this is the variance of w = tanh(\\hat{w})*sigmoid(\\hat{m}). """ if r == 0: return 0 else: return (1 - np.tanh(r) / r) * (r - np.tanh(r / 2)) * (1 / (2 * r)) def nac_w_optimal_r(fan_in, fan_out): """Computes the optimal Uniform[-r, r] given the fan This uses numerical optimization. TODO: consider if there is an algebraic solution. """ fan = max(fan_in + fan_out, 5) r = scipy.optimize.bisect(lambda r: fan * nac_w_variance(r) - 2, 0, 10) return r def nac_weight(w_hat, m_hat, mode='normal'): if mode == 'normal': return NACWeight.apply(w_hat, m_hat) elif mode == 'sign': return NACWeightSign.apply(w_hat, m_hat) elif mode == 'independent': return NACWeightIndependent.apply(w_hat, m_hat) class SummaryWriterNamespaceNoLoggingScope: def __init__(self, writer): self._writer = writer def __enter__(self): self._writer._logging_enabled = False def __exit__(self, type, value, traceback): self._writer._logging_enabled = True return False class DummySummaryWriter: def __init__(self, **kwargs): self._logging_enabled = False pass def add_scalar(self, name, value, verbose_only=True): pass def add_summary(self, name, tensor, verbose_only=True): pass def add_histogram(self, name, tensor, verbose_only=True): pass def add_tensor(self, name, tensor, verbose_only=True): pass def print(self, name, tensor, verbose_only=True): pass def namespace(self, name): return self def every(self, epoch_interval): return self def verbose(self, verbose): return self def no_logging(self): return SummaryWriterNamespaceNoLoggingScope(self) class NoRandomScope: def __init__(self, module): self._module = module def __enter__(self): self._module._disable_random() def __exit__(self, type, value, traceback): self._module._enable_random() return False class ExtendedTorchModule(torch.nn.Module): def __init__(self, default_name, *args, writer=None, name=None, **kwargs): super().__init__() if writer is None: writer = DummySummaryWriter() self.writer = writer.namespace(default_name if name is None else name) self.allow_random = True def set_parameter(self, name, value): parameter = getattr(self, name, None) if isinstance(parameter, torch.nn.Parameter): parameter.fill_(value) for module in self.children(): if isinstance(module, ExtendedTorchModule): module.set_parameter(name, value) def regualizer(self, merge_in=None): regualizers = collections.defaultdict(int) if merge_in is not None: for key, value in merge_in.items(): self.writer.add_scalar(f'regualizer/{key}', value) regualizers[key] += value for module in self.children(): if isinstance(module, ExtendedTorchModule): for key, value in module.regualizer().items(): regualizers[key] += value return regualizers def optimize(self, loss): for module in self.children(): if isinstance(module, ExtendedTorchModule): module.optimize(loss) def log_gradients(self): for name, parameter in self.named_parameters(recurse=False): if parameter.requires_grad: gradient, *_ = parameter.grad.data self.writer.add_summary(f'{name}/grad', gradient) self.writer.add_histogram(f'{name}/grad', gradient) for module in self.children(): if isinstance(module, ExtendedTorchModule): module.log_gradients() def no_internal_logging(self): return self.writer.no_logging() def _disable_random(self): self.allow_random = False for module in self.children(): if isinstance(module, ExtendedTorchModule): module._disable_random() def _enable_random(self): self.allow_random = True for module in self.children(): if isinstance(module, ExtendedTorchModule): module._enable_random() def no_random(self): return NoRandomScope(self) class RegualizerNAUZ: def __init__(self, zero=False): self.zero = zero self.stored_inputs = [] def __call__(self, W): if self.zero: return 0 x_mean = torch.mean(torch.cat(self.stored_inputs, dim=0), dim=0, keepdim=True) return torch.mean((1 - torch.abs(W)) * (0 - x_mean) ** 2) def append_input(self, x): if self.zero: return self.stored_inputs.append(x) def reset(self): if self.zero: return self.stored_inputs = [] class NACWeight(torch.autograd.Function): """Implements the NAC weight operator w = tanh(\\hat{w}) * sigmoid(\\hat{m}) """ @staticmethod def forward(ctx, w_hat, m_hat): tanh_w_hat = torch.tanh(w_hat) sigmoid_m_hat = torch.sigmoid(m_hat) ctx.save_for_backward(tanh_w_hat, sigmoid_m_hat) return tanh_w_hat * sigmoid_m_hat @staticmethod def backward(ctx, grad_output): tanh_w_hat, sigmoid_m_hat = ctx.saved_tensors return grad_output * (1 - tanh_w_hat * tanh_w_hat ) * sigmoid_m_hat, grad_output * tanh_w_hat * sigmoid_m_hat * ( 1 - sigmoid_m_hat) class NACWeightIndependent(torch.autograd.Function): """Implements the NAC weight operator but with independent optimization. The optimiation of \\hat{w} is independent of \\hat{m} and vice versa. w = tanh(\\hat{w}) * sigmoid(\\hat{m}) dL/d\\hat{w} = (dL/dw) (dw/d\\hat{w}) = (dL/dw) (1 - tanh(\\hat{w})^2) dL/d\\hat{m} = (dL/dw) (dw/d\\hat{m}) = (dL/dw) sigmoid(\\hat{m}) * (1 - sigmoid(\\hat{m})) """ @staticmethod def forward(ctx, w_hat, m_hat): tanh_w_hat = torch.tanh(w_hat) sigmoid_m_hat = torch.sigmoid(m_hat) ctx.save_for_backward(tanh_w_hat, sigmoid_m_hat) return tanh_w_hat * sigmoid_m_hat @staticmethod def backward(ctx, grad_output): tanh_w_hat, sigmoid_m_hat = ctx.saved_tensors return grad_output * (1 - tanh_w_hat * tanh_w_hat ), grad_output * sigmoid_m_hat * (1 - sigmoid_m_hat) class NACWeightSign(torch.autograd.Function): """Implements the NAC weight operator but with a hard gradient for \\hat{m} w = tanh(\\hat{w}) * sigmoid(\\hat{m}) dL/d\\hat{m} = (dL/dw) (dw/d\\hat{m}) = (dL/dw) * 0.1 * sign(\\hat{w}) * sigmoid(\\hat{m}) * (1 - sigmoid(\\hat{m})) """ @staticmethod def forward(ctx, w_hat, m_hat): tanh_w_hat = torch.tanh(w_hat) sigmoid_m_hat = torch.sigmoid(m_hat) ctx.save_for_backward(w_hat, tanh_w_hat, sigmoid_m_hat) return tanh_w_hat * sigmoid_m_hat @staticmethod def backward(ctx, grad_output): w_hat, tanh_w_hat, sigmoid_m_hat = ctx.saved_tensors return grad_output * (1 - tanh_w_hat * tanh_w_hat ) * sigmoid_m_hat, grad_output * 0.1 * torch.sign(w_hat ) * sigmoid_m_hat * (1 - sigmoid_m_hat) class NACLayer(ExtendedTorchModule): """Implements the NAC (Neural Accumulator) Arguments: in_features: number of ingoing features out_features: number of outgoing features """ def __init__(self, in_features, out_features, regualizer_z=0, **kwargs): super().__init__('nac', **kwargs) self.in_features = in_features self.out_features = out_features self.W_hat = torch.nn.Parameter(torch.Tensor(out_features, in_features) ) self.M_hat = torch.nn.Parameter(torch.Tensor(out_features, in_features) ) self.register_parameter('bias', None) self._regualizer_nau_z = RegualizerNAUZ(zero=regualizer_z == 0) def reset_parameters(self): r = nac_w_optimal_r(self.in_features, self.out_features) torch.nn.init.uniform_(self.W_hat, a=-r, b=r) torch.nn.init.uniform_(self.M_hat, a=-r, b=r) def optimize(self, loss): self._regualizer_nau_z.reset() def regualizer(self): W = nac_weight(self.W_hat, self.M_hat, mode='normal') return super().regualizer({'z': self._regualizer_nau_z(W)}) def forward(self, x, reuse=False): if self.allow_random: self._regualizer_nau_z.append_input(x) W = nac_weight(self.W_hat, self.M_hat, mode='normal') self.writer.add_histogram('W', W) self.writer.add_tensor('W', W) self.writer.add_scalar('W/sparsity_error', sparsity_error(W), verbose_only=False) return torch.nn.functional.linear(x, W, self.bias) def extra_repr(self): return 'in_features={}, out_features={}'.format(self.in_features, self.out_features) class IndependentNACLayerNew(NACLayer): def forward(self, input_0): primals_1 = self.W_hat primals_2 = self.M_hat primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
hoedt/stable-nalu
IndependentNACLayer
false
3,753
[ "MIT" ]
0
64b3d240db8bff4da857d955f213ef3c7e38e035
https://github.com/hoedt/stable-nalu/tree/64b3d240db8bff4da857d955f213ef3c7e38e035
Encoder
import torch from torch import nn from torch.nn import functional as F class Encoder(nn.Module): def __init__(self, latent_size): super().__init__() self.latent_size = latent_size self.conv1 = nn.Conv2d(3, 32, 4, stride=2) self.conv2 = nn.Conv2d(32, 64, 4, stride=2) self.conv3 = nn.Conv2d(64, 128, 4, stride=2) self.conv4 = nn.Conv2d(128, 256, 4, stride=2) self.flatten = nn.Flatten() self.fc_mu = nn.Linear(2 * 2 * 256, latent_size) self.fc_logsigma = nn.Linear(2 * 2 * 256, latent_size) def forward(self, x): x = F.relu(self.conv1(x)) x = F.relu(self.conv2(x)) x = F.relu(self.conv3(x)) x = F.relu(self.conv4(x)) x = self.flatten(x) mu = self.fc_mu(x) logsigma = self.fc_logsigma(x) return mu, logsigma def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {'latent_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 96 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 48 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 12 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = yindex // 32 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 32 * x2 + 512 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 64 * x2 + 1024 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 128 * x2 + 2048 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 123008 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 50176 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_8(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl. constexpr): xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 256 y1 = yindex // 256 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 256 * x2 + 1024 * y1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask) tl.store(out_ptr1 + (y0 + 256 * x2 + 1024 * y1), tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (32, 3, 4, 4), (48, 16, 4, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (64, 32, 4, 4), (512, 16, 4, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (128, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (256, 128, 4, 4), (2048, 16, 4, 1)) assert_size_stride(primals_9, (256,), (1,)) assert_size_stride(primals_10, (4, 1024), (1024, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4, 1024), (1024, 1)) assert_size_stride(primals_13, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((32, 3, 4, 4), (48, 1, 12, 3), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(96, 16)](primals_1, buf0, 96, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch .float32) triton_poi_fused_1[grid(12, 4096)](primals_3, buf1, 12, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 32, 4, 4), (512, 1, 128, 32), torch. float32) triton_poi_fused_2[grid(2048, 16)](primals_4, buf2, 2048, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((128, 64, 4, 4), (1024, 1, 256, 64), torch.float32) triton_poi_fused_3[grid(8192, 16)](primals_6, buf3, 8192, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf4 = empty_strided_cuda((256, 128, 4, 4), (2048, 1, 512, 128), torch.float32) triton_poi_fused_4[grid(32768, 16)](primals_8, buf4, 32768, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_8 buf5 = extern_kernels.convolution(buf1, buf0, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 32, 31, 31), (30752, 1, 992, 32)) buf6 = buf5 del buf5 triton_poi_fused_convolution_relu_5[grid(123008)](buf6, primals_2, 123008, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf7 = extern_kernels.convolution(buf6, buf2, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 64, 14, 14), (12544, 1, 896, 64)) buf8 = buf7 del buf7 triton_poi_fused_convolution_relu_6[grid(50176)](buf8, primals_5, 50176, XBLOCK=512, num_warps=4, num_stages=1) del primals_5 buf9 = extern_kernels.convolution(buf8, buf3, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 128, 6, 6), (4608, 1, 768, 128)) buf10 = buf9 del buf9 triton_poi_fused_convolution_relu_7[grid(18432)](buf10, primals_7, 18432, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf11 = extern_kernels.convolution(buf10, buf4, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 256, 2, 2), (1024, 1, 512, 256)) buf12 = empty_strided_cuda((4, 256, 2, 2), (1024, 4, 2, 1), torch. float32) buf15 = empty_strided_cuda((4, 256, 2, 2), (1024, 1, 512, 256), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_8[grid(1024, 4)]( buf11, primals_9, buf12, buf15, 1024, 4, XBLOCK=4, YBLOCK=64, num_warps=4, num_stages=1) del buf11 del primals_9 buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_11, reinterpret_tensor(buf12, (4, 1024 ), (1024, 1), 0), reinterpret_tensor(primals_10, (1024, 4), (1, 1024), 0), alpha=1, beta=1, out=buf13) del primals_11 buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_13, reinterpret_tensor(buf12, (4, 1024 ), (1024, 1), 0), reinterpret_tensor(primals_12, (1024, 4), (1, 1024), 0), alpha=1, beta=1, out=buf14) del primals_13 return (buf13, buf14, buf0, buf1, buf2, buf3, buf4, buf6, buf8, buf10, reinterpret_tensor(buf12, (4, 1024), (1024, 1), 0), primals_12, primals_10, buf15) class EncoderNew(nn.Module): def __init__(self, latent_size): super().__init__() self.latent_size = latent_size self.conv1 = nn.Conv2d(3, 32, 4, stride=2) self.conv2 = nn.Conv2d(32, 64, 4, stride=2) self.conv3 = nn.Conv2d(64, 128, 4, stride=2) self.conv4 = nn.Conv2d(128, 256, 4, stride=2) self.flatten = nn.Flatten() self.fc_mu = nn.Linear(2 * 2 * 256, latent_size) self.fc_logsigma = nn.Linear(2 * 2 * 256, latent_size) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.conv4.weight primals_9 = self.conv4.bias primals_10 = self.fc_mu.weight primals_11 = self.fc_mu.bias primals_12 = self.fc_logsigma.weight primals_13 = self.fc_logsigma.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0], output[1]
jinyeom/ga-plastic-models
Encoder
false
3,754
[ "MIT" ]
0
e38b245ae51c35a5f32679cc9f215463a3d58f1a
https://github.com/jinyeom/ga-plastic-models/tree/e38b245ae51c35a5f32679cc9f215463a3d58f1a
Decoder
import torch from torch import nn from torch.nn import functional as F class Decoder(nn.Module): def __init__(self, latent_size): super().__init__() self.latent_size = latent_size self.fc1 = nn.Linear(latent_size, 1024) self.deconv1 = nn.ConvTranspose2d(1024, 128, 5, stride=2) self.deconv2 = nn.ConvTranspose2d(128, 64, 5, stride=2) self.deconv3 = nn.ConvTranspose2d(64, 32, 6, stride=2) self.deconv4 = nn.ConvTranspose2d(32, 3, 6, stride=2) def forward(self, x): x = F.relu(self.fc1(x)) x = x.view(x.size(0), x.size(1), 1, 1) x = F.relu(self.deconv1(x)) x = F.relu(self.deconv2(x)) x = F.relu(self.deconv3(x)) x = torch.sigmoid(self.deconv4(x)) return x def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'latent_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 25 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 128 * x2 + 3200 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 64 * x2 + 1600 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 36 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = yindex // 32 tmp0 = tl.load(in_ptr0 + (x2 + 36 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 32 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 96 xnumel = 36 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 36 * y3), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 108 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_relu_threshold_backward_4(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 1024 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 12800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 43264 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 115200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_sigmoid_8(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 12 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y0 = yindex % 3 y1 = yindex // 3 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 3 * x2 + 12288 * y1), ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(out_ptr0 + (x2 + 4096 * y3), tmp3, ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (1024, 4), (4, 1)) assert_size_stride(primals_2, (1024,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (1024, 128, 5, 5), (3200, 25, 5, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (128, 64, 5, 5), (1600, 25, 5, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (64, 32, 6, 6), (1152, 36, 6, 1)) assert_size_stride(primals_9, (32,), (1,)) assert_size_stride(primals_10, (32, 3, 6, 6), (108, 36, 6, 1)) assert_size_stride(primals_11, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1024, 128, 5, 5), (3200, 1, 640, 128), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(131072, 25)](primals_4, buf0, 131072, 25, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_4 buf1 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64), torch.float32) triton_poi_fused_1[grid(8192, 25)](primals_6, buf1, 8192, 25, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_6 buf2 = empty_strided_cuda((64, 32, 6, 6), (1152, 1, 192, 32), torch .float32) triton_poi_fused_2[grid(2048, 36)](primals_8, buf2, 2048, 36, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_8 buf3 = empty_strided_cuda((32, 3, 6, 6), (108, 1, 18, 3), torch.float32 ) triton_poi_fused_3[grid(96, 36)](primals_10, buf3, 96, 36, XBLOCK= 32, YBLOCK=32, num_warps=4, num_stages=1) del primals_10 buf4 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 1024 ), (1, 4), 0), out=buf4) del primals_1 buf5 = buf4 del buf4 buf14 = empty_strided_cuda((4, 1024), (1024, 1), torch.bool) triton_poi_fused_relu_threshold_backward_4[grid(4096)](buf5, primals_2, buf14, 4096, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf6 = extern_kernels.convolution(reinterpret_tensor(buf5, (4, 1024, 1, 1), (1024, 1, 0, 0), 0), buf0, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups =1, bias=None) assert_size_stride(buf6, (4, 128, 5, 5), (3200, 1, 640, 128)) buf7 = buf6 del buf6 triton_poi_fused_convolution_relu_5[grid(12800)](buf7, primals_5, 12800, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf8 = extern_kernels.convolution(buf7, buf1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 64, 13, 13), (10816, 1, 832, 64)) buf9 = buf8 del buf8 triton_poi_fused_convolution_relu_6[grid(43264)](buf9, primals_7, 43264, XBLOCK=512, num_warps=4, num_stages=1) del primals_7 buf10 = extern_kernels.convolution(buf9, buf2, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 32, 30, 30), (28800, 1, 960, 32)) buf11 = buf10 del buf10 triton_poi_fused_convolution_relu_7[grid(115200)](buf11, primals_9, 115200, XBLOCK=512, num_warps=8, num_stages=1) del primals_9 buf12 = extern_kernels.convolution(buf11, buf3, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 3, 64, 64), (12288, 1, 192, 3)) buf13 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.float32) triton_poi_fused_convolution_sigmoid_8[grid(12, 4096)](buf12, primals_11, buf13, 12, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del buf12 del primals_11 return buf13, primals_3, buf0, buf1, buf2, buf3, reinterpret_tensor(buf5, (4, 1024, 1, 1), (1024, 1, 1, 1), 0), buf7, buf9, buf11, buf13, buf14 class DecoderNew(nn.Module): def __init__(self, latent_size): super().__init__() self.latent_size = latent_size self.fc1 = nn.Linear(latent_size, 1024) self.deconv1 = nn.ConvTranspose2d(1024, 128, 5, stride=2) self.deconv2 = nn.ConvTranspose2d(128, 64, 5, stride=2) self.deconv3 = nn.ConvTranspose2d(64, 32, 6, stride=2) self.deconv4 = nn.ConvTranspose2d(32, 3, 6, stride=2) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.deconv1.weight primals_5 = self.deconv1.bias primals_6 = self.deconv2.weight primals_7 = self.deconv2.bias primals_8 = self.deconv3.weight primals_9 = self.deconv3.bias primals_10 = self.deconv4.weight primals_11 = self.deconv4.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
jinyeom/ga-plastic-models
Decoder
false
3,755
[ "MIT" ]
0
e38b245ae51c35a5f32679cc9f215463a3d58f1a
https://github.com/jinyeom/ga-plastic-models/tree/e38b245ae51c35a5f32679cc9f215463a3d58f1a
CoralLayer
import torch import torch.nn class CoralLayer(torch.nn.Module): """ Implements CORAL layer described in Cao, Mirjalili, and Raschka (2020) *Rank Consistent Ordinal Regression for Neural Networks with Application to Age Estimation* Pattern Recognition Letters, https://doi.org/10.1016/j.patrec.2020.11.008 Parameters ----------- size_in : int Number of input features for the inputs to the forward method, which are expected to have shape=(num_examples, num_features). num_classes : int Number of classes in the dataset. preinit_bias : bool (default=True) If true, it will pre-initialize the biases to descending values in [0, 1] range instead of initializing it to all zeros. This pre- initialization scheme results in faster learning and better generalization performance in practice. """ def __init__(self, size_in, num_classes, preinit_bias=True): super().__init__() self.size_in, self.size_out = size_in, 1 self.coral_weights = torch.nn.Linear(self.size_in, 1, bias=False) if preinit_bias: self.coral_bias = torch.nn.Parameter(torch.arange(num_classes - 1, 0, -1).float() / (num_classes - 1)) else: self.coral_bias = torch.nn.Parameter(torch.zeros(num_classes - 1).float()) def forward(self, x): """ Computes forward pass. Parameters ----------- x : torch.tensor, shape=(num_examples, num_features) Input features. Returns ----------- logits : torch.tensor, shape=(num_examples, num_classes-1) """ return self.coral_weights(x) + self.coral_bias def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'size_in': 4, 'num_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 3 x0 = xindex % 3 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 3), (48, 12, 3, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(192)](buf0, primals_3, buf1, 192, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_3 return buf1, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0) class CoralLayerNew(torch.nn.Module): """ Implements CORAL layer described in Cao, Mirjalili, and Raschka (2020) *Rank Consistent Ordinal Regression for Neural Networks with Application to Age Estimation* Pattern Recognition Letters, https://doi.org/10.1016/j.patrec.2020.11.008 Parameters ----------- size_in : int Number of input features for the inputs to the forward method, which are expected to have shape=(num_examples, num_features). num_classes : int Number of classes in the dataset. preinit_bias : bool (default=True) If true, it will pre-initialize the biases to descending values in [0, 1] range instead of initializing it to all zeros. This pre- initialization scheme results in faster learning and better generalization performance in practice. """ def __init__(self, size_in, num_classes, preinit_bias=True): super().__init__() self.size_in, self.size_out = size_in, 1 self.coral_weights = torch.nn.Linear(self.size_in, 1, bias=False) if preinit_bias: self.coral_bias = torch.nn.Parameter(torch.arange(num_classes - 1, 0, -1).float() / (num_classes - 1)) else: self.coral_bias = torch.nn.Parameter(torch.zeros(num_classes - 1).float()) def forward(self, input_0): primals_3 = self.coral_bias primals_1 = self.coral_weights.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
johann-petrak/farm-tools
CoralLayer
false
3,756
[ "Apache-2.0" ]
0
7d379bbc5b9b079eedd4a11d7bdb1636c0ad834c
https://github.com/johann-petrak/farm-tools/tree/7d379bbc5b9b079eedd4a11d7bdb1636c0ad834c
SelfAttention
import torch from torch.nn import functional as F from torch import nn class SelfAttention(nn.Module): def __init__(self, k, heads=8): super().__init__() self.k, self.heads = k, heads self.toKeys = nn.Linear(k, k * heads, bias=False) self.toQueries = nn.Linear(k, k * heads, bias=False) self.toValues = nn.Linear(k, k * heads, bias=False) self.unifyHeads = nn.Linear(heads * k, k) def forward(self, x): b, t, k = x.size() h = self.heads queries = self.toQueries(x).view(b, t, h, k) keys = self.toKeys(x).view(b, t, h, k) values = self.toValues(x).view(b, t, h, k) keys = keys.transpose(1, 2).contiguous().view(b * h, t, k) queries = queries.transpose(1, 2).contiguous().view(b * h, t, k) values = values.transpose(1, 2).contiguous().view(b * h, t, k) queries = queries / k ** (1 / 4) keys = keys / k ** (1 / 4) dot = torch.bmm(queries, keys.transpose(1, 2)) dot = F.softmax(dot, dim=2) out = torch.bmm(dot, values).view(b, h, t, k) out = out.transpose(1, 2).contiguous().view(b, t, h * k) return self.unifyHeads(out) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'k': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 32 x2 = xindex // 128 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * (x1 % 8) + 32 * x2 + 128 * (x1 // 8) ), xmask) tmp1 = 0.7071067811865475 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * (x2 % 8) + 32 * x1 + 128 * (x2 // 8) ), xmask) tmp1 = 0.7071067811865475 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 8 x3 = xindex // 128 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 32 * x1 + 128 * x3), xmask) tl.store(out_ptr0 + x4, tmp0, xmask) @triton.jit def triton_poi_fused_clone_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 8 x2 = xindex // 32 % 4 x3 = xindex // 128 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 128 * x3), xmask) tl.store(out_ptr0 + x4, tmp0, xmask) @triton.jit def triton_poi_fused_transpose_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 128 * x1), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (32, 4), (4, 1)) assert_size_stride(primals_3, (32, 4), (4, 1)) assert_size_stride(primals_4, (32, 4), (4, 1)) assert_size_stride(primals_5, (4, 32), (32, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 32), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 32), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((16, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 32), (1, 4), 0), out=buf2) del primals_4 buf3 = empty_strided_cuda((32, 4, 4), (4, 128, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(512)](buf0, buf3, 512, XBLOCK=256, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf0, (32, 4, 4), (16, 4, 1), 0) del buf0 triton_poi_fused_div_1[grid(512)](buf1, buf4, 512, XBLOCK=256, num_warps=4, num_stages=1) buf5 = reinterpret_tensor(buf1, (32, 4, 4), (16, 4, 1), 0) del buf1 extern_kernels.bmm(buf3, reinterpret_tensor(buf4, (32, 4, 4), (16, 1, 4), 0), out=buf5) buf6 = empty_strided_cuda((32, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_2[grid(512)](buf5, buf6, 512, XBLOCK=128, num_warps=4, num_stages=1) buf7 = buf5 del buf5 triton_poi_fused__softmax_3[grid(512)](buf6, buf7, 512, XBLOCK=256, num_warps=4, num_stages=1) buf8 = reinterpret_tensor(buf6, (4, 8, 4, 4), (128, 16, 4, 1), 0) del buf6 triton_poi_fused_clone_4[grid(512)](buf2, buf8, 512, XBLOCK=256, num_warps=4, num_stages=1) buf9 = reinterpret_tensor(buf2, (32, 4, 4), (16, 4, 1), 0) del buf2 extern_kernels.bmm(buf7, reinterpret_tensor(buf8, (32, 4, 4), (16, 4, 1), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 8, 4), (128, 32, 4, 1), torch.float32 ) triton_poi_fused_clone_5[grid(512)](buf9, buf10, 512, XBLOCK=256, num_warps=4, num_stages=1) buf11 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_6, reinterpret_tensor(buf10, (16, 32), (32, 1), 0), reinterpret_tensor(primals_5, (32, 4), (1, 32), 0), alpha=1, beta=1, out=buf11) del primals_6 buf12 = reinterpret_tensor(buf9, (32, 4, 4), (16, 1, 4), 0) del buf9 triton_poi_fused_transpose_6[grid(512)](buf3, buf12, 512, XBLOCK= 128, num_warps=4, num_stages=1) del buf3 return reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), buf7, reinterpret_tensor(buf10, (16, 32), (32, 1), 0 ), primals_5, reinterpret_tensor(buf8, (32, 4, 4), (16, 1, 4), 0 ), buf12, buf4 class SelfAttentionNew(nn.Module): def __init__(self, k, heads=8): super().__init__() self.k, self.heads = k, heads self.toKeys = nn.Linear(k, k * heads, bias=False) self.toQueries = nn.Linear(k, k * heads, bias=False) self.toValues = nn.Linear(k, k * heads, bias=False) self.unifyHeads = nn.Linear(heads * k, k) def forward(self, input_0): primals_2 = self.toKeys.weight primals_3 = self.toQueries.weight primals_4 = self.toValues.weight primals_5 = self.unifyHeads.weight primals_6 = self.unifyHeads.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
jiyfeng/transformer-text-tasks
SelfAttention
false
3,757
[ "MIT" ]
0
b06349ca759bc8084a5880a425153dfdd7b91a98
https://github.com/jiyfeng/transformer-text-tasks/tree/b06349ca759bc8084a5880a425153dfdd7b91a98
Model
import torch import torch.nn as nn import torch.nn.functional as f class Model(nn.Module): def __init__(self): super(Model, self).__init__() self.conv = nn.Conv2d(1, 16, 5) self.pool = nn.MaxPool2d(2, 2) self.fc = nn.Linear(2304, 10) def forward(self, x): x = self.pool(f.relu(self.conv(x))) x = x.view(-1, 2304) logit_output = self.fc(x) return logit_output def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 230400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3600 % 16 x0 = xindex % 3600 x4 = xindex // 3600 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + 3616 * x4), tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 57600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 30 x1 = xindex // 30 % 30 x4 = xindex // 900 x3 = xindex // 14400 x5 = xindex % 14400 x6 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 120 * x1 + 3616 * x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 120 * x1 + 3616 * x4), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (60 + 2 * x0 + 120 * x1 + 3616 * x4), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (61 + 2 * x0 + 120 * x1 + 3616 * x4), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x5 + 14464 * x3), tmp15, xmask) tl.store(out_ptr1 + x6, tmp16, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (16, 1, 5, 5), (25, 25, 5, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (10, 2304), (2304, 1)) assert_size_stride(primals_5, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 60, 60), (57600, 3600, 60, 1)) buf1 = empty_strided_cuda((4, 16, 60, 60), (57856, 3616, 60, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(230400)](buf0, primals_2, buf1, 230400, XBLOCK=1024, num_warps=4, num_stages=1) del buf0 del primals_2 buf2 = empty_strided_cuda((4, 16, 30, 30), (14464, 900, 30, 1), torch.int8) buf3 = empty_strided_cuda((4, 16, 30, 30), (14400, 900, 30, 1), torch.float32) triton_poi_fused_max_pool2d_with_indices_1[grid(57600)](buf1, buf2, buf3, 57600, XBLOCK=512, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((25, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf3, (25, 2304), (2304, 1), 0), reinterpret_tensor(primals_4, (2304, 10), (1, 2304), 0), alpha=1, beta=1, out=buf4) del primals_5 return buf4, primals_1, primals_3, buf1, buf2, reinterpret_tensor(buf3, (25, 2304), (2304, 1), 0), primals_4 class ModelNew(nn.Module): def __init__(self): super(ModelNew, self).__init__() self.conv = nn.Conv2d(1, 16, 5) self.pool = nn.MaxPool2d(2, 2) self.fc = nn.Linear(2304, 10) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_4 = self.fc.weight primals_5 = self.fc.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
jizongFox/adversarial-robustness-toolbox
Model
false
3,758
[ "MIT" ]
0
0649fe44d42bc7ba39a4b1a2ff95a31320fd1ae5
https://github.com/jizongFox/adversarial-robustness-toolbox/tree/0649fe44d42bc7ba39a4b1a2ff95a31320fd1ae5
DataEmbedding_wo_pos
import math import torch import torch.nn as nn import torch.fft class PositionalEmbedding(nn.Module): def __init__(self, d_model, max_len=5000): super(PositionalEmbedding, self).__init__() pe = torch.zeros(max_len, d_model).float() pe.require_grad = False position = torch.arange(0, max_len).float().unsqueeze(1) div_term = (torch.arange(0, d_model, 2).float() * -(math.log( 10000.0) / d_model)).exp() pe[:, 0::2] = torch.sin(position * div_term) pe[:, 1::2] = torch.cos(position * div_term) pe = pe.unsqueeze(0) self.register_buffer('pe', pe) def forward(self, x): return self.pe[:, :x.size(1)] class FixedEmbedding(nn.Module): def __init__(self, c_in, d_model): super(FixedEmbedding, self).__init__() w = torch.zeros(c_in, d_model).float() w.require_grad = False position = torch.arange(0, c_in).float().unsqueeze(1) div_term = (torch.arange(0, d_model, 2).float() * -(math.log( 10000.0) / d_model)).exp() w[:, 0::2] = torch.sin(position * div_term) w[:, 1::2] = torch.cos(position * div_term) self.emb = nn.Embedding(c_in, d_model) self.emb.weight = nn.Parameter(w, requires_grad=False) def forward(self, x): return self.emb(x).detach() class TemporalEmbedding(nn.Module): def __init__(self, d_model, embed_type='fixed', freq='h'): super(TemporalEmbedding, self).__init__() minute_size = 4 hour_size = 24 weekday_size = 7 day_size = 32 month_size = 13 Embed = FixedEmbedding if embed_type == 'fixed' else nn.Embedding if freq == 't': self.minute_embed = Embed(minute_size, d_model) self.hour_embed = Embed(hour_size, d_model) self.weekday_embed = Embed(weekday_size, d_model) self.day_embed = Embed(day_size, d_model) self.month_embed = Embed(month_size, d_model) def forward(self, x): x = x.long() minute_x = self.minute_embed(x[:, :, 4]) if hasattr(self, 'minute_embed') else 0.0 hour_x = self.hour_embed(x[:, :, 3]) weekday_x = self.weekday_embed(x[:, :, 2]) day_x = self.day_embed(x[:, :, 1]) month_x = self.month_embed(x[:, :, 0]) return hour_x + weekday_x + day_x + month_x + minute_x class TimeFeatureEmbedding(nn.Module): def __init__(self, d_model, embed_type='timeF', freq='h'): super(TimeFeatureEmbedding, self).__init__() freq_map = {'h': 4, 't': 5, 's': 6, 'm': 1, 'a': 1, 'w': 2, 'd': 3, 'b': 3} d_inp = freq_map[freq] self.embed = nn.Linear(d_inp, d_model, bias=False) def forward(self, x): return self.embed(x) class TokenEmbedding(nn.Module): def __init__(self, c_in, d_model): super(TokenEmbedding, self).__init__() padding = 1 if torch.__version__ >= '1.5.0' else 2 self.tokenConv = nn.Conv1d(in_channels=c_in, out_channels=d_model, kernel_size=3, padding=padding, padding_mode='circular', bias=False ) for m in self.modules(): if isinstance(m, nn.Conv1d): nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='leaky_relu') def forward(self, x): x = self.tokenConv(x.permute(0, 2, 1)).transpose(1, 2) return x class DataEmbedding_wo_pos(nn.Module): def __init__(self, c_in, d_model, embed_type='fixed', freq='h', dropout=0.1 ): super(DataEmbedding_wo_pos, self).__init__() self.value_embedding = TokenEmbedding(c_in=c_in, d_model=d_model) self.position_embedding = PositionalEmbedding(d_model=d_model) self.temporal_embedding = TemporalEmbedding(d_model=d_model, embed_type=embed_type, freq=freq ) if embed_type != 'timeF' else TimeFeatureEmbedding(d_model= d_model, embed_type=embed_type, freq=freq) self.dropout = nn.Dropout(p=dropout) def forward(self, x, x_mark): x = self.value_embedding(x) + self.temporal_embedding(x_mark) return self.dropout(x) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'c_in': 4, 'd_model': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch.nn as nn import torch.fft assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_copy_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 24 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel y0 = yindex % 6 x2 = xindex y1 = yindex // 6 tmp0 = y0 tmp1 = tl.full([1, 1], 5, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.broadcast_to(-4 + y0, [XBLOCK, YBLOCK]) tmp4 = tl.full([1, 1], 1, tl.int64) tmp5 = tmp3 < tmp4 tmp6 = tmp5 & tmp2 tmp7 = tl.broadcast_to(y0, [XBLOCK, YBLOCK]) tmp8 = tmp7 >= tmp4 tmp9 = tmp7 < tmp1 tmp10 = tmp8 & tmp9 tmp11 = tmp10 & tmp6 tmp12 = tl.load(in_ptr0 + (-4 + x2 + 4 * y0 + 16 * y1), tmp11 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp13 = float('nan') tmp14 = tl.where(tmp10, tmp12, tmp13) tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype) tmp16 = tl.where(tmp6, tmp14, tmp15) tmp17 = tmp3 >= tmp4 tmp18 = tmp3 < tmp1 tmp19 = tmp17 & tmp18 tmp20 = tmp19 & tmp2 tmp21 = tl.load(in_ptr0 + (-20 + x2 + 4 * y0 + 16 * y1), tmp20 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp22 = tl.where(tmp19, tmp21, tmp13) tmp23 = tl.where(tmp5, tmp16, tmp22) tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype) tmp25 = tl.where(tmp2, tmp23, tmp24) tmp26 = tmp0 < tmp4 tmp27 = tl.broadcast_to(4 + y0, [XBLOCK, YBLOCK]) tmp28 = tmp27 >= tmp4 tmp29 = tmp27 < tmp1 tmp30 = tmp28 & tmp29 tmp31 = tmp30 & tmp26 tmp32 = tl.load(in_ptr0 + (12 + x2 + 4 * y0 + 16 * y1), tmp31 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp33 = tl.where(tmp30, tmp32, tmp13) tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype) tmp35 = tl.where(tmp26, tmp33, tmp34) tmp36 = tmp0 >= tmp4 tmp37 = tmp0 < tmp1 tmp38 = tmp36 & tmp37 tmp39 = tl.load(in_ptr0 + (-4 + x2 + 4 * y0 + 16 * y1), tmp38 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp40 = tl.where(tmp38, tmp39, tmp13) tmp41 = tl.where(tmp26, tmp35, tmp40) tmp42 = tl.where(tmp2, tmp25, tmp41) tl.store(out_ptr0 + (y0 + 6 * x2 + 24 * y1), tmp42, xmask & ymask) @triton.jit def triton_poi_fused_add_embedding_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * x2), xmask, eviction_policy ='evict_last') tmp9 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * x2), xmask, eviction_policy ='evict_last') tmp18 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr0 + (4 * x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1.to(tl.int64) tmp3 = tl.full([XBLOCK], 24, tl.int32) tmp4 = tmp2 + tmp3 tmp5 = tmp2 < 0 tmp6 = tl.where(tmp5, tmp4, tmp2) tl.device_assert((0 <= tmp6) & (tmp6 < 24) | ~xmask, 'index out of bounds: 0 <= tmp6 < 24') tmp8 = tl.load(in_ptr1 + (x1 + 4 * tmp6), xmask, eviction_policy= 'evict_last') tmp10 = tmp9.to(tl.int64) tmp11 = tl.full([XBLOCK], 7, tl.int32) tmp12 = tmp10 + tmp11 tmp13 = tmp10 < 0 tmp14 = tl.where(tmp13, tmp12, tmp10) tl.device_assert((0 <= tmp14) & (tmp14 < 7) | ~xmask, 'index out of bounds: 0 <= tmp14 < 7') tmp16 = tl.load(in_ptr2 + (x1 + 4 * tmp14), xmask, eviction_policy= 'evict_last') tmp17 = tmp8 + tmp16 tmp19 = tmp18.to(tl.int64) tmp20 = tl.full([XBLOCK], 32, tl.int32) tmp21 = tmp19 + tmp20 tmp22 = tmp19 < 0 tmp23 = tl.where(tmp22, tmp21, tmp19) tl.device_assert((0 <= tmp23) & (tmp23 < 32) | ~xmask, 'index out of bounds: 0 <= tmp23 < 32') tmp25 = tl.load(in_ptr3 + (x1 + 4 * tmp23), xmask, eviction_policy= 'evict_last') tmp26 = tmp17 + tmp25 tmp28 = tmp27.to(tl.int64) tmp29 = tl.full([XBLOCK], 13, tl.int32) tmp30 = tmp28 + tmp29 tmp31 = tmp28 < 0 tmp32 = tl.where(tmp31, tmp30, tmp28) tl.device_assert((0 <= tmp32) & (tmp32 < 13) | ~xmask, 'index out of bounds: 0 <= tmp32 < 13') tmp34 = tl.load(in_ptr4 + (x1 + 4 * tmp32), xmask, eviction_policy= 'evict_last') tmp35 = tmp26 + tmp34 tmp36 = 0.0 tmp37 = tmp35 + tmp36 tmp38 = tmp0 + tmp37 tl.store(in_out_ptr0 + x3, tmp38, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3), (12, 3, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (24, 4), (4, 1)) assert_size_stride(primals_5, (7, 4), (4, 1)) assert_size_stride(primals_6, (32, 4), (4, 1)) assert_size_stride(primals_7, (13, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4, 6), (24, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_copy_0[grid(24, 4)](primals_1, buf1, 24, 4, XBLOCK =4, YBLOCK=32, num_warps=4, num_stages=1) del primals_1 buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4), (16, 4, 1)) buf3 = reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0) del buf2 triton_poi_fused_add_embedding_1[grid(64)](buf3, primals_3, primals_4, primals_5, primals_6, primals_7, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 del primals_4 del primals_5 del primals_6 del primals_7 return buf3, primals_2, buf1 class PositionalEmbedding(nn.Module): def __init__(self, d_model, max_len=5000): super(PositionalEmbedding, self).__init__() pe = torch.zeros(max_len, d_model).float() pe.require_grad = False position = torch.arange(0, max_len).float().unsqueeze(1) div_term = (torch.arange(0, d_model, 2).float() * -(math.log( 10000.0) / d_model)).exp() pe[:, 0::2] = torch.sin(position * div_term) pe[:, 1::2] = torch.cos(position * div_term) pe = pe.unsqueeze(0) self.register_buffer('pe', pe) def forward(self, x): return self.pe[:, :x.size(1)] class FixedEmbedding(nn.Module): def __init__(self, c_in, d_model): super(FixedEmbedding, self).__init__() w = torch.zeros(c_in, d_model).float() w.require_grad = False position = torch.arange(0, c_in).float().unsqueeze(1) div_term = (torch.arange(0, d_model, 2).float() * -(math.log( 10000.0) / d_model)).exp() w[:, 0::2] = torch.sin(position * div_term) w[:, 1::2] = torch.cos(position * div_term) self.emb = nn.Embedding(c_in, d_model) self.emb.weight = nn.Parameter(w, requires_grad=False) def forward(self, x): return self.emb(x).detach() class TemporalEmbedding(nn.Module): def __init__(self, d_model, embed_type='fixed', freq='h'): super(TemporalEmbedding, self).__init__() minute_size = 4 hour_size = 24 weekday_size = 7 day_size = 32 month_size = 13 Embed = FixedEmbedding if embed_type == 'fixed' else nn.Embedding if freq == 't': self.minute_embed = Embed(minute_size, d_model) self.hour_embed = Embed(hour_size, d_model) self.weekday_embed = Embed(weekday_size, d_model) self.day_embed = Embed(day_size, d_model) self.month_embed = Embed(month_size, d_model) def forward(self, x): x = x.long() minute_x = self.minute_embed(x[:, :, 4]) if hasattr(self, 'minute_embed') else 0.0 hour_x = self.hour_embed(x[:, :, 3]) weekday_x = self.weekday_embed(x[:, :, 2]) day_x = self.day_embed(x[:, :, 1]) month_x = self.month_embed(x[:, :, 0]) return hour_x + weekday_x + day_x + month_x + minute_x class TimeFeatureEmbedding(nn.Module): def __init__(self, d_model, embed_type='timeF', freq='h'): super(TimeFeatureEmbedding, self).__init__() freq_map = {'h': 4, 't': 5, 's': 6, 'm': 1, 'a': 1, 'w': 2, 'd': 3, 'b': 3} d_inp = freq_map[freq] self.embed = nn.Linear(d_inp, d_model, bias=False) def forward(self, x): return self.embed(x) class TokenEmbedding(nn.Module): def __init__(self, c_in, d_model): super(TokenEmbedding, self).__init__() padding = 1 if torch.__version__ >= '1.5.0' else 2 self.tokenConv = nn.Conv1d(in_channels=c_in, out_channels=d_model, kernel_size=3, padding=padding, padding_mode='circular', bias=False ) for m in self.modules(): if isinstance(m, nn.Conv1d): nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='leaky_relu') def forward(self, x): x = self.tokenConv(x.permute(0, 2, 1)).transpose(1, 2) return x class DataEmbedding_wo_posNew(nn.Module): def __init__(self, c_in, d_model, embed_type='fixed', freq='h', dropout=0.1 ): super(DataEmbedding_wo_posNew, self).__init__() self.value_embedding = TokenEmbedding(c_in=c_in, d_model=d_model) self.position_embedding = PositionalEmbedding(d_model=d_model) self.temporal_embedding = TemporalEmbedding(d_model=d_model, embed_type=embed_type, freq=freq ) if embed_type != 'timeF' else TimeFeatureEmbedding(d_model= d_model, embed_type=embed_type, freq=freq) self.dropout = nn.Dropout(p=dropout) def forward(self, input_0, input_1): primals_2 = self.value_embedding.tokenConv.weight primals_4 = self.temporal_embedding.hour_embed.emb.weight primals_5 = self.temporal_embedding.weekday_embed.emb.weight primals_6 = self.temporal_embedding.day_embed.emb.weight primals_7 = self.temporal_embedding.month_embed.emb.weight primals_1 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
jianzhnie/TsFormer
DataEmbedding_wo_pos
false
3,759
[ "Apache-2.0" ]
0
47e362f02445ba00d5ab8db206667767e72faca7
https://github.com/jianzhnie/TsFormer/tree/47e362f02445ba00d5ab8db206667767e72faca7
LossLoglikelihoodNb
import torch class LossLoglikelihoodNb(torch.nn.Module): def __init__(self, average=True): super(LossLoglikelihoodNb, self).__init__() self.average = average def forward(self, preds, target): """Implements the negative log likelihood loss as VAE reconstruction loss""" x = target loc, scale = torch.chunk(preds, chunks=2, dim=1) eta_loc = torch.log(loc) eta_scale = torch.log(scale) log_r_plus_mu = torch.log(scale + loc) ll = torch.lgamma(scale + x) ll = ll - torch.lgamma(x + torch.ones_like(x)) ll = ll - torch.lgamma(scale) ll = ll + torch.multiply(x, eta_loc - log_r_plus_mu) + torch.multiply( scale, eta_scale - log_r_plus_mu) ll = torch.clamp(ll, min=-300, max=300) neg_ll = -ll if self.average: neg_ll = torch.mean(neg_ll) else: neg_ll = neg_ll.sum(dim=1).sum(dim=1) return neg_ll def get_inputs(): return [torch.rand([4, 2, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_clamp_lgamma_log_mean_mul_neg_ones_like_sub_0( in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex % 16 r2 = rindex // 64 r3 = rindex tmp0 = tl.load(in_ptr0 + (16 + r0 + 32 * r2), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + r3, None) tmp10 = tl.load(in_ptr0 + (r0 + 32 * r2), None, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.lgamma(tmp2) tmp4 = 1.0 tmp5 = tmp1 + tmp4 tmp6 = libdevice.lgamma(tmp5) tmp7 = tmp3 - tmp6 tmp8 = libdevice.lgamma(tmp0) tmp9 = tmp7 - tmp8 tmp11 = tl_math.log(tmp10) tmp12 = tmp0 + tmp10 tmp13 = tl_math.log(tmp12) tmp14 = tmp11 - tmp13 tmp15 = tmp1 * tmp14 tmp16 = tmp9 + tmp15 tmp17 = tl_math.log(tmp0) tmp18 = tmp17 - tmp13 tmp19 = tmp0 * tmp18 tmp20 = tmp16 + tmp19 tmp21 = -300.0 tmp22 = triton_helpers.maximum(tmp20, tmp21) tmp23 = 300.0 tmp24 = triton_helpers.minimum(tmp22, tmp23) tmp25 = -tmp24 tmp26 = tl.broadcast_to(tmp25, [RBLOCK]) tmp28 = triton_helpers.promote_to_tensor(tl.sum(tmp26, 0)) tmp29 = 256.0 tmp30 = tmp28 / tmp29 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp30, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 2, 4, 4), (32, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_clamp_lgamma_log_mean_mul_neg_ones_like_sub_0[grid (1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class LossLoglikelihoodNbNew(torch.nn.Module): def __init__(self, average=True): super(LossLoglikelihoodNbNew, self).__init__() self.average = average def forward(self, input_0, input_1): arg1_1 = input_0 arg0_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
johnmous/sfaira
LossLoglikelihoodNb
false
3,760
[ "BSD-3-Clause" ]
0
c50240a74530e614ab7681bf9c63b04cb815b361
https://github.com/johnmous/sfaira/tree/c50240a74530e614ab7681bf9c63b04cb815b361
BCEAfterSigmoidLoss
import torch from torch import nn from torch.nn import functional import torch.autograd class Loss(nn.Module): """A loss function.""" class PointwiseLoss(Loss): """Pointwise loss functions compute an independent loss term for each triple-label pair.""" class BCEAfterSigmoidLoss(PointwiseLoss): """A loss function which uses the numerically unstable version of explicit Sigmoid + BCE.""" def __init__(self, reduction: 'str'='mean'): super().__init__() self.reduction = reduction def forward(self, logits: 'torch.FloatTensor', labels: 'torch.FloatTensor', **kwargs) ->torch.FloatTensor: post_sigmoid = torch.sigmoid(logits) return functional.binary_cross_entropy(post_sigmoid, labels, **kwargs) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_binary_cross_entropy_sigmoid_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp4 = tl.sigmoid(tmp3) tmp5 = -tmp4 tmp6 = libdevice.log1p(tmp5) tmp7 = -100.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp2 * tmp8 tmp10 = tl_math.log(tmp4) tmp11 = triton_helpers.maximum(tmp10, tmp7) tmp12 = tmp0 * tmp11 tmp13 = tmp9 - tmp12 tmp14 = tl.broadcast_to(tmp13, [RBLOCK]) tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0)) tmp17 = 256.0 tmp18 = tmp16 / tmp17 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp18, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_binary_cross_entropy_sigmoid_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class Loss(nn.Module): """A loss function.""" class PointwiseLoss(Loss): """Pointwise loss functions compute an independent loss term for each triple-label pair.""" class BCEAfterSigmoidLossNew(PointwiseLoss): """A loss function which uses the numerically unstable version of explicit Sigmoid + BCE.""" def __init__(self, reduction: 'str'='mean'): super().__init__() self.reduction = reduction def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
johnbachman/pykeen
BCEAfterSigmoidLoss
false
3,761
[ "MIT" ]
0
6595f6cefc462b6d1e057446e6c3ed66d36a078b
https://github.com/johnbachman/pykeen/tree/6595f6cefc462b6d1e057446e6c3ed66d36a078b
unet_bottleneck
import torch import torch.nn as nn class unet_bottleneck(nn.Module): def __init__(self, in_ch, out_ch, ker=3): super(unet_bottleneck, self).__init__() self.relu = nn.ReLU(inplace=True) self.conv1 = nn.Conv2d(in_ch, out_ch, 1) self.bn1 = nn.GroupNorm(out_ch // 4, out_ch) self.conv2 = nn.Conv2d(out_ch, out_ch, 3) self.bn2 = nn.GroupNorm(out_ch // 4, out_ch) self.conv3 = nn.Conv2d(out_ch, out_ch, 1) self.bn3 = nn.GroupNorm(out_ch // 4, out_ch) self.s_conv = nn.Conv2d(in_ch, out_ch, 3) self.s_bn = nn.GroupNorm(out_ch // 4, out_ch) def forward(self, x): xp = self.conv1(x) xp = self.bn1(xp) xp = self.relu(xp) xp = self.conv2(xp) xp = self.bn2(xp) xp = self.relu(xp) xp = self.conv3(xp) xp = self.bn3(xp) x = self.s_conv(x) x = self.s_bn(x) return self.relu(xp + x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_ch': 4, 'out_ch': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_convolution_native_group_norm_relu_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r3 = rindex x0 = xindex r2 = rindex // 16 tmp0 = tl.load(in_out_ptr0 + (r3 + 64 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + r2, None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr1 + r2, None, eviction_policy='evict_last') tmp28 = tl.load(in_ptr2 + r2, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tl.where(xmask, tmp3, 0) tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 64, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp3 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = tmp2 - tmp12 tmp20 = 64.0 tmp21 = tmp18 / tmp20 tmp22 = 1e-05 tmp23 = tmp21 + tmp22 tmp24 = libdevice.rsqrt(tmp23) tmp25 = tmp19 * tmp24 tmp27 = tmp25 * tmp26 tmp29 = tmp27 + tmp28 tmp30 = tl.full([1, 1], 0, tl.int32) tmp31 = triton_helpers.maximum(tmp30, tmp29) tl.store(in_out_ptr0 + (r3 + 64 * x0), tmp2, xmask) tl.store(out_ptr2 + (r3 + 64 * x0), tmp31, xmask) tl.store(out_ptr3 + x0, tmp24, xmask) tl.store(out_ptr0 + x0, tmp12, xmask) @triton.jit def triton_per_fused_convolution_native_group_norm_relu_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r3 = rindex x0 = xindex r2 = rindex // 4 tmp0 = tl.load(in_out_ptr0 + (r3 + 16 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + r2, None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr1 + r2, None, eviction_policy='evict_last') tmp28 = tl.load(in_ptr2 + r2, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tl.where(xmask, tmp3, 0) tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 16, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp3 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = tmp2 - tmp12 tmp20 = 16.0 tmp21 = tmp18 / tmp20 tmp22 = 1e-05 tmp23 = tmp21 + tmp22 tmp24 = libdevice.rsqrt(tmp23) tmp25 = tmp19 * tmp24 tmp27 = tmp25 * tmp26 tmp29 = tmp27 + tmp28 tmp30 = tl.full([1, 1], 0, tl.int32) tmp31 = triton_helpers.maximum(tmp30, tmp29) tl.store(in_out_ptr0 + (r3 + 16 * x0), tmp2, xmask) tl.store(out_ptr2 + (r3 + 16 * x0), tmp31, xmask) tl.store(out_ptr3 + x0, tmp24, xmask) tl.store(out_ptr0 + x0, tmp12, xmask) @triton.jit def triton_per_fused_add_convolution_native_group_norm_relu_threshold_backward_2( in_out_ptr0, in_out_ptr1, in_out_ptr2, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr2, out_ptr4, out_ptr5, out_ptr6, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r3 = rindex x0 = xindex r2 = rindex // 4 tmp0 = tl.load(in_out_ptr0 + (r3 + 16 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + r2, None, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr1 + (r3 + 16 * x0), xmask, other=0.0) tmp4 = tl.load(in_ptr1 + r2, None, eviction_policy='evict_last') tmp43 = tl.load(in_ptr2 + r2, None, eviction_policy='evict_last') tmp45 = tl.load(in_ptr3 + r2, None, eviction_policy='evict_last') tmp52 = tl.load(in_ptr4 + r2, None, eviction_policy='evict_last') tmp54 = tl.load(in_ptr5 + r2, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tl.where(xmask, tmp6, 0) tmp9 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp11 = tl.where(xmask, tmp9, 0) tmp12 = tl.sum(tmp11, 1)[:, None] tmp13 = tl.full([XBLOCK, 1], 16, tl.int32) tmp14 = tmp13.to(tl.float32) tmp15 = tmp12 / tmp14 tmp16 = tmp6 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK]) tmp20 = tl.where(xmask, tmp18, 0) tmp21 = tl.sum(tmp20, 1)[:, None] tmp22 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tl.where(xmask, tmp22, 0) tmp25 = tl.broadcast_to(tmp22, [XBLOCK, RBLOCK]) tmp27 = tl.where(xmask, tmp25, 0) tmp28 = tl.sum(tmp27, 1)[:, None] tmp29 = tmp28 / tmp14 tmp30 = tmp22 - tmp29 tmp31 = tmp30 * tmp30 tmp32 = tl.broadcast_to(tmp31, [XBLOCK, RBLOCK]) tmp34 = tl.where(xmask, tmp32, 0) tmp35 = tl.sum(tmp34, 1)[:, None] tmp36 = tmp5 - tmp15 tmp37 = 16.0 tmp38 = tmp21 / tmp37 tmp39 = 1e-05 tmp40 = tmp38 + tmp39 tmp41 = libdevice.rsqrt(tmp40) tmp42 = tmp36 * tmp41 tmp44 = tmp42 * tmp43 tmp46 = tmp44 + tmp45 tmp47 = tmp2 - tmp29 tmp48 = tmp35 / tmp37 tmp49 = tmp48 + tmp39 tmp50 = libdevice.rsqrt(tmp49) tmp51 = tmp47 * tmp50 tmp53 = tmp51 * tmp52 tmp55 = tmp53 + tmp54 tmp56 = tmp46 + tmp55 tmp57 = tl.full([1, 1], 0, tl.int32) tmp58 = triton_helpers.maximum(tmp57, tmp56) tmp59 = 0.0 tmp60 = tmp58 <= tmp59 tl.store(in_out_ptr0 + (r3 + 16 * x0), tmp2, xmask) tl.store(in_out_ptr1 + (r3 + 16 * x0), tmp5, xmask) tl.store(in_out_ptr2 + (r3 + 16 * x0), tmp58, xmask) tl.store(out_ptr4 + (r3 + 16 * x0), tmp60, xmask) tl.store(out_ptr5 + x0, tmp41, xmask) tl.store(out_ptr6 + x0, tmp50, xmask) tl.store(out_ptr0 + x0, tmp15, xmask) tl.store(out_ptr2 + x0, tmp29, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17) = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4,), (1,)) assert_size_stride(primals_14, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_15, (4,), (1,)) assert_size_stride(primals_16, (4,), (1,)) assert_size_stride(primals_17, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf5 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) get_raw_stream(0) triton_per_fused_convolution_native_group_norm_relu_0[grid(4)](buf1, primals_2, primals_4, primals_5, buf2, buf6, buf5, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del primals_2 del primals_5 buf7 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 4, 2, 2), (16, 4, 2, 1)) buf8 = buf7 del buf7 buf9 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf13 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) buf12 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) triton_per_fused_convolution_native_group_norm_relu_1[grid(4)](buf8, primals_7, primals_8, primals_9, buf9, buf13, buf12, 4, 16, XBLOCK=1, num_warps=2, num_stages=1) del primals_7 del primals_9 buf14 = extern_kernels.convolution(buf13, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 4, 2, 2), (16, 4, 2, 1)) buf20 = extern_kernels.convolution(primals_3, primals_14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 4, 2, 2), (16, 4, 2, 1)) buf21 = buf20 del buf20 buf15 = buf14 del buf14 buf16 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf22 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf26 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) buf27 = buf26 del buf26 buf28 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.bool) buf19 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf25 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) triton_per_fused_add_convolution_native_group_norm_relu_threshold_backward_2[ grid(4)](buf21, buf15, buf27, primals_15, primals_11, primals_12, primals_13, primals_16, primals_17, buf16, buf22, buf28, buf19, buf25, 4, 16, XBLOCK=1, num_warps=2, num_stages=1) del primals_11 del primals_13 del primals_15 del primals_17 return (buf27, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, buf1, reinterpret_tensor(buf2, (4, 1), (1, 1), 0), reinterpret_tensor( buf5, (4, 1), (1, 1), 0), buf6, buf8, reinterpret_tensor(buf9, (4, 1), (1, 1), 0), reinterpret_tensor(buf12, (4, 1), (1, 1), 0), buf13, buf15, reinterpret_tensor(buf16, (4, 1), (1, 1), 0), reinterpret_tensor(buf19, (4, 1), (1, 1), 0), buf21, reinterpret_tensor(buf22, (4, 1), (1, 1), 0), reinterpret_tensor( buf25, (4, 1), (1, 1), 0), buf28) class unet_bottleneckNew(nn.Module): def __init__(self, in_ch, out_ch, ker=3): super(unet_bottleneckNew, self).__init__() self.relu = nn.ReLU(inplace=True) self.conv1 = nn.Conv2d(in_ch, out_ch, 1) self.bn1 = nn.GroupNorm(out_ch // 4, out_ch) self.conv2 = nn.Conv2d(out_ch, out_ch, 3) self.bn2 = nn.GroupNorm(out_ch // 4, out_ch) self.conv3 = nn.Conv2d(out_ch, out_ch, 1) self.bn3 = nn.GroupNorm(out_ch // 4, out_ch) self.s_conv = nn.Conv2d(in_ch, out_ch, 3) self.s_bn = nn.GroupNorm(out_ch // 4, out_ch) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.bn1.weight primals_5 = self.bn1.bias primals_6 = self.conv2.weight primals_7 = self.conv2.bias primals_8 = self.bn2.weight primals_9 = self.bn2.bias primals_10 = self.conv3.weight primals_11 = self.conv3.bias primals_12 = self.bn3.weight primals_13 = self.bn3.bias primals_14 = self.s_conv.weight primals_15 = self.s_conv.bias primals_16 = self.s_bn.weight primals_17 = self.s_bn.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return output[0]
joeization/CycleGAN
unet_bottleneck
false
3,762
[ "MIT" ]
0
9635c8e3a7b1634b2e2eb5b5299f03a4e0786868
https://github.com/joeization/CycleGAN/tree/9635c8e3a7b1634b2e2eb5b5299f03a4e0786868
LossCrossentropyAgg
import torch class LossCrossentropyAgg(torch.nn.Module): def __init__(self): super(LossCrossentropyAgg, self).__init__() def forward(self, preds, target): """ Modified crossentropy that aggregates allowed output classes into single class. """ preds = torch.clamp(preds, min=1e-10, max=1.0) ll_cce_agg = -torch.log(torch.mean(target * preds, dim=1, keepdim= False)) return ll_cce_agg def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_log_mean_mul_neg_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask) tmp7 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp8 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask) tmp13 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp14 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask) tmp19 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp20 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask) tmp2 = 1e-10 tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp4 = 1.0 tmp5 = triton_helpers.minimum(tmp3, tmp4) tmp6 = tmp0 * tmp5 tmp9 = triton_helpers.maximum(tmp8, tmp2) tmp10 = triton_helpers.minimum(tmp9, tmp4) tmp11 = tmp7 * tmp10 tmp12 = tmp6 + tmp11 tmp15 = triton_helpers.maximum(tmp14, tmp2) tmp16 = triton_helpers.minimum(tmp15, tmp4) tmp17 = tmp13 * tmp16 tmp18 = tmp12 + tmp17 tmp21 = triton_helpers.maximum(tmp20, tmp2) tmp22 = triton_helpers.minimum(tmp21, tmp4) tmp23 = tmp19 * tmp22 tmp24 = tmp18 + tmp23 tmp25 = 4.0 tmp26 = tmp24 / tmp25 tmp27 = tl_math.log(tmp26) tmp28 = -tmp27 tl.store(out_ptr0 + x2, tmp28, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_log_mean_mul_neg_0[grid(64)](arg1_1, arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 return buf0, class LossCrossentropyAggNew(torch.nn.Module): def __init__(self): super(LossCrossentropyAggNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
johnmous/sfaira
LossCrossentropyAgg
false
3,763
[ "BSD-3-Clause" ]
0
c50240a74530e614ab7681bf9c63b04cb815b361
https://github.com/johnmous/sfaira/tree/c50240a74530e614ab7681bf9c63b04cb815b361
CmapPafHead
import torch import torch.utils.data import torch.nn import torch.optim class UpsampleCBR(torch.nn.Sequential): def __init__(self, input_channels, output_channels, count=1, num_flat=0): layers = [] for i in range(count): if i == 0: inch = input_channels else: inch = output_channels layers += [torch.nn.ConvTranspose2d(inch, output_channels, kernel_size=4, stride=2, padding=1), torch.nn.BatchNorm2d( output_channels), torch.nn.ReLU()] for i in range(num_flat): layers += [torch.nn.Conv2d(output_channels, output_channels, kernel_size=3, stride=1, padding=1), torch.nn. BatchNorm2d(output_channels), torch.nn.ReLU()] super(UpsampleCBR, self).__init__(*layers) class CmapPafHead(torch.nn.Module): def __init__(self, input_channels, cmap_channels, paf_channels, upsample_channels=256, num_upsample=0, num_flat=0): super(CmapPafHead, self).__init__() if num_upsample > 0: self.cmap_conv = torch.nn.Sequential(UpsampleCBR(input_channels, upsample_channels, num_upsample, num_flat), torch.nn.Conv2d (upsample_channels, cmap_channels, kernel_size=1, stride=1, padding=0)) self.paf_conv = torch.nn.Sequential(UpsampleCBR(input_channels, upsample_channels, num_upsample, num_flat), torch.nn.Conv2d (upsample_channels, paf_channels, kernel_size=1, stride=1, padding=0)) else: self.cmap_conv = torch.nn.Conv2d(input_channels, cmap_channels, kernel_size=1, stride=1, padding=0) self.paf_conv = torch.nn.Conv2d(input_channels, paf_channels, kernel_size=1, stride=1, padding=0) def forward(self, x): return self.cmap_conv(x), self.paf_conv(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_channels': 4, 'cmap_channels': 4, 'paf_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data import torch.nn import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(256)](buf1, primals_2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_0[grid(256)](buf3, primals_5, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 return buf1, buf3, primals_1, primals_3, primals_4 class UpsampleCBR(torch.nn.Sequential): def __init__(self, input_channels, output_channels, count=1, num_flat=0): layers = [] for i in range(count): if i == 0: inch = input_channels else: inch = output_channels layers += [torch.nn.ConvTranspose2d(inch, output_channels, kernel_size=4, stride=2, padding=1), torch.nn.BatchNorm2d( output_channels), torch.nn.ReLU()] for i in range(num_flat): layers += [torch.nn.Conv2d(output_channels, output_channels, kernel_size=3, stride=1, padding=1), torch.nn. BatchNorm2d(output_channels), torch.nn.ReLU()] super(UpsampleCBR, self).__init__(*layers) class CmapPafHeadNew(torch.nn.Module): def __init__(self, input_channels, cmap_channels, paf_channels, upsample_channels=256, num_upsample=0, num_flat=0): super(CmapPafHeadNew, self).__init__() if num_upsample > 0: self.cmap_conv = torch.nn.Sequential(UpsampleCBR(input_channels, upsample_channels, num_upsample, num_flat), torch.nn.Conv2d (upsample_channels, cmap_channels, kernel_size=1, stride=1, padding=0)) self.paf_conv = torch.nn.Sequential(UpsampleCBR(input_channels, upsample_channels, num_upsample, num_flat), torch.nn.Conv2d (upsample_channels, paf_channels, kernel_size=1, stride=1, padding=0)) else: self.cmap_conv = torch.nn.Conv2d(input_channels, cmap_channels, kernel_size=1, stride=1, padding=0) self.paf_conv = torch.nn.Conv2d(input_channels, paf_channels, kernel_size=1, stride=1, padding=0) def forward(self, input_0): primals_1 = self.cmap_conv.weight primals_2 = self.cmap_conv.bias primals_4 = self.paf_conv.weight primals_5 = self.paf_conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0], output[1]
intflow/trt_openpose
CmapPafHead
false
3,764
[ "MIT" ]
0
526b1b0d463f1c86a45ca4d4cd77a41732c7654b
https://github.com/intflow/trt_openpose/tree/526b1b0d463f1c86a45ca4d4cd77a41732c7654b
NearestInterp
import torch class NearestInterp(torch.nn.Module): """ Nearest neighbor interpolation layer. note: From the source code, it appears that Darknet uses nearest neighbor method for its upsampling layer (darknet master-30 oct 2018). Internally calls torch.nn.functional.interpolate to suppress the warning on calling torch.nn.Upsample. """ def __init__(self, factor): """ Constructor. Parameters ---------- factor: float The interpolation factor. """ super(NearestInterp, self).__init__() self.factor = factor def forward(self, inp): """ Parameters ---------- inp: torch.tensor Input image as torch rank-4 tensor: batch x channels x height x width. """ return torch.nn.functional.interpolate(inp, scale_factor=self. factor, mode='nearest') def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'factor': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 16 % 16 x0 = xindex % 16 x2 = xindex // 256 x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.25 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = x0 tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + 4 * tmp4 + 16 * x2), None, eviction_policy='evict_last') tl.store(out_ptr0 + x4, tmp9, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch .float32) get_raw_stream(0) triton_poi_fused__unsafe_index_0[grid(4096)](arg0_1, buf0, 4096, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class NearestInterpNew(torch.nn.Module): """ Nearest neighbor interpolation layer. note: From the source code, it appears that Darknet uses nearest neighbor method for its upsampling layer (darknet master-30 oct 2018). Internally calls torch.nn.functional.interpolate to suppress the warning on calling torch.nn.Upsample. """ def __init__(self, factor): """ Constructor. Parameters ---------- factor: float The interpolation factor. """ super(NearestInterpNew, self).__init__() self.factor = factor def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
jonathanzjl/cam-vision
NearestInterp
false
3,765
[ "BSD-2-Clause" ]
0
d1bd865b147ea1137979b624c64a6baa4a4b0714
https://github.com/jonathanzjl/cam-vision/tree/d1bd865b147ea1137979b624c64a6baa4a4b0714
NoisyLinear
import math import torch import torch.nn as nn import torch.nn import torch.optim class NoisyLinear(nn.Linear): def __init__(self, in_dimension, out_dimension, std_dev_init=0.4) ->None: """ Noisy Networks for Exploration: https://arxiv.org/abs/1706.10295 Standard linear layer: y = wx + b Noise linear layer: y = wx + b, where: w = u1 + σ1 * ε and b = u2 + σ2 * ε ε ~ N(0, std) Using factorized Guassian Noise for performance. :param in_dimension: Number of in dimensions :param out_dimension: Number of out dimensions :param std_dev_init: Standard deviation initialization value """ self.in_dimension, self.out_dimension = in_dimension, out_dimension super(NoisyLinear, self).__init__(self.in_dimension, self. out_dimension, bias=True) sigma_init = std_dev_init / math.sqrt(in_dimension) self.sigma_weight = nn.Parameter(torch.Tensor(self.out_dimension, self.in_dimension).fill_(sigma_init)) self.sigma_bias = nn.Parameter(torch.Tensor(self.out_dimension). fill_(sigma_init)) def forward(self, input): epsilon_input = torch.randn(1, input.size()[1], device=input.device) epsilon_output = torch.randn(self.out_dimension - input.size()[1] + input.size()[1], 1, device=input.device) epsilon_in = torch.sign(epsilon_input) * torch.sqrt(torch.abs( epsilon_input)) epsilon_out = torch.sign(epsilon_output) * torch.sqrt(torch.abs( epsilon_output)) noise = torch.mul(epsilon_in, epsilon_out) bias = self.bias + self.sigma_bias * epsilon_out.t() weight = self.weight + self.sigma_weight * noise return input.matmul(weight.t()) + bias def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dimension': 4, 'out_dimension': 4}]
import torch from torch import device from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.nn as nn import torch.nn import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_abs_add_mul_sign_sqrt_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.full([1], 0, tl.int32) tmp4 = tmp3 < tmp2 tmp5 = tmp4.to(tl.int8) tmp6 = tmp2 < tmp3 tmp7 = tmp6.to(tl.int8) tmp8 = tmp5 - tmp7 tmp9 = tmp8.to(tmp2.dtype) tmp10 = tl_math.abs(tmp2) tmp11 = libdevice.sqrt(tmp10) tmp12 = tmp9 * tmp11 tmp14 = tmp3 < tmp13 tmp15 = tmp14.to(tl.int8) tmp16 = tmp13 < tmp3 tmp17 = tmp16.to(tl.int8) tmp18 = tmp15 - tmp17 tmp19 = tmp18.to(tmp13.dtype) tmp20 = tl_math.abs(tmp13) tmp21 = libdevice.sqrt(tmp20) tmp22 = tmp19 * tmp21 tmp23 = tmp12 * tmp22 tmp24 = tmp1 * tmp23 tmp25 = tmp0 + tmp24 tl.store(out_ptr0 + x2, tmp25, xmask) @triton.jit def triton_poi_fused_add_mul_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp4 = tl.full([1], 0, tl.int32) tmp5 = tmp4 < tmp3 tmp6 = tmp5.to(tl.int8) tmp7 = tmp3 < tmp4 tmp8 = tmp7.to(tl.int8) tmp9 = tmp6 - tmp8 tmp10 = tmp9.to(tmp3.dtype) tmp11 = tl_math.abs(tmp3) tmp12 = libdevice.sqrt(tmp11) tmp13 = tmp10 * tmp12 tmp14 = tmp2 * tmp13 tmp15 = tmp1 + tmp14 tmp16 = tmp0 + tmp15 tl.store(in_out_ptr0 + x2, tmp16, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = torch.ops.aten.randn.default([1, 4], device=device(type= 'cuda', index=0), pin_memory=False) buf1 = buf0 del buf0 buf2 = torch.ops.aten.randn.default([4, 1], device=device(type= 'cuda', index=0), pin_memory=False) buf3 = buf2 del buf2 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_abs_add_mul_sign_sqrt_0[grid(16)](primals_4, primals_5, buf1, buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_4 del primals_5 buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(buf4, (4, 4), (1, 4), 0), out=buf5) del buf4 buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused_add_mul_1[grid(256)](buf6, primals_2, primals_3, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 del primals_3 return buf6, buf1, buf3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0) class NoisyLinearNew(nn.Linear): def __init__(self, in_dimension, out_dimension, std_dev_init=0.4) ->None: """ Noisy Networks for Exploration: https://arxiv.org/abs/1706.10295 Standard linear layer: y = wx + b Noise linear layer: y = wx + b, where: w = u1 + σ1 * ε and b = u2 + σ2 * ε ε ~ N(0, std) Using factorized Guassian Noise for performance. :param in_dimension: Number of in dimensions :param out_dimension: Number of out dimensions :param std_dev_init: Standard deviation initialization value """ self.in_dimension, self.out_dimension = in_dimension, out_dimension super(NoisyLinearNew, self).__init__(self.in_dimension, self. out_dimension, bias=True) sigma_init = std_dev_init / math.sqrt(in_dimension) self.sigma_weight = nn.Parameter(torch.Tensor(self.out_dimension, self.in_dimension).fill_(sigma_init)) self.sigma_bias = nn.Parameter(torch.Tensor(self.out_dimension). fill_(sigma_init)) def forward(self, input_0): primals_4 = self.weight primals_2 = self.bias primals_5 = self.sigma_weight primals_3 = self.sigma_bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
johncliu/Horizon
NoisyLinear
false
3,766
[ "BSD-3-Clause" ]
0
cfa7a873ada5de3bb01e78e2f237d9849b8270b2
https://github.com/johncliu/Horizon/tree/cfa7a873ada5de3bb01e78e2f237d9849b8270b2
ExpandNetLoss
import torch from torch import nn class ExpandNetLoss(nn.Module): def __init__(self, loss_lambda=5): super(ExpandNetLoss, self).__init__() self.similarity = torch.nn.CosineSimilarity(dim=1, eps=1e-20) self.l1_loss = nn.L1Loss() self.loss_lambda = loss_lambda def forward(self, x, y): cosine_term = (1 - self.similarity(x, y)).mean() return self.l1_loss(x, y) + self.loss_lambda * cosine_term def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_clamp_min_div_linalg_vector_norm_mean_mul_sub_0( in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex r1 = rindex % 16 r3 = rindex // 64 tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp7 = tl.load(in_ptr0 + (r1 + 64 * r3), None, eviction_policy='evict_last' ) tmp9 = tl.load(in_ptr0 + (16 + r1 + 64 * r3), None, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr0 + (32 + r1 + 64 * r3), None, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (48 + r1 + 64 * r3), None, eviction_policy= 'evict_last') tmp22 = tl.load(in_ptr1 + (r1 + 64 * r3), None, eviction_policy= 'evict_last') tmp24 = tl.load(in_ptr1 + (16 + r1 + 64 * r3), None, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr1 + (32 + r1 + 64 * r3), None, eviction_policy= 'evict_last') tmp30 = tl.load(in_ptr1 + (48 + r1 + 64 * r3), None, eviction_policy= 'evict_last') tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp8 = tmp7 * tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp13 = tmp12 * tmp12 tmp14 = tmp11 + tmp13 tmp16 = tmp15 * tmp15 tmp17 = tmp14 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = 1e-20 tmp20 = triton_helpers.maximum(tmp18, tmp19) tmp21 = tmp0 / tmp20 tmp23 = tmp22 * tmp22 tmp25 = tmp24 * tmp24 tmp26 = tmp23 + tmp25 tmp28 = tmp27 * tmp27 tmp29 = tmp26 + tmp28 tmp31 = tmp30 * tmp30 tmp32 = tmp29 + tmp31 tmp33 = libdevice.sqrt(tmp32) tmp34 = triton_helpers.maximum(tmp33, tmp19) tmp35 = tmp1 / tmp34 tmp36 = tmp21 * tmp35 tl.store(out_ptr1 + tl.broadcast_to(r0, [RBLOCK]), tmp36, None) tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp6, None) @triton.jit def triton_per_fused_abs_add_mean_mul_rsub_sub_sum_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp1 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp3 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp5 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp12 = tl.load(in_out_ptr0 + 0) tmp13 = tl.broadcast_to(tmp12, [XBLOCK, 1]) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 1.0 tmp8 = tmp7 - tmp6 tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.sum(tmp9, 1)[:, None] tmp14 = 256.0 tmp15 = tmp13 / tmp14 tmp16 = 64.0 tmp17 = tmp11 / tmp16 tmp18 = 5.0 tmp19 = tmp17 * tmp18 tmp20 = tmp15 + tmp19 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp20, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_abs_clamp_min_div_linalg_vector_norm_mean_mul_sub_0[ grid(1)](arg1_1, arg0_1, buf0, buf1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 buf3 = buf0 del buf0 triton_per_fused_abs_add_mean_mul_rsub_sub_sum_1[grid(1)](buf3, buf1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del buf1 return buf3, class ExpandNetLossNew(nn.Module): def __init__(self, loss_lambda=5): super(ExpandNetLossNew, self).__init__() self.similarity = torch.nn.CosineSimilarity(dim=1, eps=1e-20) self.l1_loss = nn.L1Loss() self.loss_lambda = loss_lambda def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
jongwookyi/hdr-expandnet
ExpandNetLoss
false
3,767
[ "BSD-3-Clause-Clear" ]
0
0594605c8f2041bc592c20c1e7fd8615994c6b01
https://github.com/jongwookyi/hdr-expandnet/tree/0594605c8f2041bc592c20c1e7fd8615994c6b01
ComplexConv2d
import torch import torch.nn as nn class ComplexConv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, **kwargs): super().__init__() self.conv_re = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups= groups, bias=bias, **kwargs) self.conv_im = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups= groups, bias=bias, **kwargs) def forward(self, x): real = self.conv_re(x[..., 0]) - self.conv_im(x[..., 1]) imaginary = self.conv_re(x[..., 1]) + self.conv_im(x[..., 0]) output = torch.stack((real, imaginary), dim=-1) return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_stack_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.load(in_ptr2 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp9 = tl.load(in_ptr3 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tmp8 + tmp9 tmp11 = tmp7 - tmp10 tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp4, tmp11, tmp12) tmp14 = tmp0 >= tmp3 tl.full([1], 2, tl.int64) tmp17 = tl.load(in_ptr4 + x1, tmp14 & xmask, eviction_policy= 'evict_last', other=0.0) tmp18 = tl.load(in_ptr1 + x1, tmp14 & xmask, eviction_policy= 'evict_last', other=0.0) tmp19 = tmp17 + tmp18 tmp20 = tl.load(in_ptr5 + x1, tmp14 & xmask, eviction_policy= 'evict_last', other=0.0) tmp21 = tl.load(in_ptr3 + x1, tmp14 & xmask, eviction_policy= 'evict_last', other=0.0) tmp22 = tmp20 + tmp21 tmp23 = tmp19 + tmp22 tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype) tmp25 = tl.where(tmp14, tmp23, tmp24) tmp26 = tl.where(tmp4, tmp13, tmp25) tl.store(out_ptr0 + x2, tmp26, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 4, 4, 4), (0, 64, 16, 4), 0), primals_2, stride=(1, 1), padding =(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 1, 1), (4, 1, 1, 1)) buf1 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 4, 4, 4), (0, 64, 16, 4), 1), primals_4, stride=(1, 1), padding =(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (1, 4, 1, 1), (4, 1, 1, 1)) buf2 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 4, 4, 4), (0, 64, 16, 4), 1), primals_2, stride=(1, 1), padding =(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (1, 4, 1, 1), (4, 1, 1, 1)) buf3 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 4, 4, 4), (0, 64, 16, 4), 0), primals_4, stride=(1, 1), padding =(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (1, 4, 1, 1), (4, 1, 1, 1)) buf4 = empty_strided_cuda((4, 1, 1, 2), (2, 2, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_stack_0[grid(8)](buf0, primals_3, buf1, primals_5, buf2, buf3, buf4, 8, XBLOCK=8, num_warps=1, num_stages=1) del buf0 del buf1 del buf2 del buf3 del primals_3 del primals_5 return buf4, primals_2, primals_4, reinterpret_tensor(primals_1, (1, 4, 4, 4), (256, 64, 16, 4), 0), reinterpret_tensor(primals_1, (1, 4, 4, 4), (256, 64, 16, 4), 1) class ComplexConv2dNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, **kwargs): super().__init__() self.conv_re = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups= groups, bias=bias, **kwargs) self.conv_im = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups= groups, bias=bias, **kwargs) def forward(self, input_0): primals_1 = self.conv_re.weight primals_3 = self.conv_re.bias primals_2 = self.conv_im.weight primals_5 = self.conv_im.bias primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
jonashaag/PhoneFortifiedPerceptualLoss
ComplexConv2d
false
3,768
[ "MIT" ]
0
1dabdd4203f59c2d1bfe22bffc4c63b204aa50bd
https://github.com/jonashaag/PhoneFortifiedPerceptualLoss/tree/1dabdd4203f59c2d1bfe22bffc4c63b204aa50bd
ComplexConvTranspose2d
import torch import torch.nn as nn class ComplexConvTranspose2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, output_padding=0, dilation=1, groups=1, bias=True, **kwargs ): super().__init__() self.tconv_re = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, output_padding=output_padding, groups=groups, bias=bias, dilation=dilation, **kwargs) self.tconv_im = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, output_padding=output_padding, groups=groups, bias=bias, dilation=dilation, **kwargs) def forward(self, x): real = self.tconv_re(x[..., 0]) - self.tconv_im(x[..., 1]) imaginary = self.tconv_re(x[..., 1]) + self.tconv_im(x[..., 0]) output = torch.stack((real, imaginary), dim=-1) return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_stack_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 392 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x3 = xindex // 2 x2 = xindex // 98 x4 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x3, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + x2, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.load(in_ptr2 + x3, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp9 = tl.load(in_ptr3 + x2, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tmp8 + tmp9 tmp11 = tmp7 - tmp10 tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp4, tmp11, tmp12) tmp14 = tmp0 >= tmp3 tl.full([1], 2, tl.int64) tmp17 = tl.load(in_ptr4 + x3, tmp14 & xmask, eviction_policy= 'evict_last', other=0.0) tmp18 = tl.load(in_ptr1 + x2, tmp14 & xmask, eviction_policy= 'evict_last', other=0.0) tmp19 = tmp17 + tmp18 tmp20 = tl.load(in_ptr5 + x3, tmp14 & xmask, eviction_policy= 'evict_last', other=0.0) tmp21 = tl.load(in_ptr3 + x2, tmp14 & xmask, eviction_policy= 'evict_last', other=0.0) tmp22 = tmp20 + tmp21 tmp23 = tmp19 + tmp22 tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype) tmp25 = tl.where(tmp14, tmp23, tmp24) tmp26 = tl.where(tmp4, tmp13, tmp25) tl.store(out_ptr0 + x4, tmp26, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 4, 4, 4), (0, 64, 16, 4), 0), primals_2, stride=(1, 1), padding =(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0 ), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 7, 7), (196, 49, 7, 1)) buf1 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 4, 4, 4), (0, 64, 16, 4), 1), primals_4, stride=(1, 1), padding =(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0 ), groups=1, bias=None) assert_size_stride(buf1, (1, 4, 7, 7), (196, 49, 7, 1)) buf2 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 4, 4, 4), (0, 64, 16, 4), 1), primals_2, stride=(1, 1), padding =(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0 ), groups=1, bias=None) assert_size_stride(buf2, (1, 4, 7, 7), (196, 49, 7, 1)) buf3 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 4, 4, 4), (0, 64, 16, 4), 0), primals_4, stride=(1, 1), padding =(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0 ), groups=1, bias=None) assert_size_stride(buf3, (1, 4, 7, 7), (196, 49, 7, 1)) buf4 = empty_strided_cuda((4, 7, 7, 2), (98, 14, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_stack_0[grid(392)](buf0, primals_3, buf1, primals_5, buf2, buf3, buf4, 392, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del buf1 del buf2 del buf3 del primals_3 del primals_5 return buf4, primals_2, primals_4, reinterpret_tensor(primals_1, (1, 4, 4, 4), (256, 64, 16, 4), 0), reinterpret_tensor(primals_1, (1, 4, 4, 4), (256, 64, 16, 4), 1) class ComplexConvTranspose2dNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, output_padding=0, dilation=1, groups=1, bias=True, **kwargs ): super().__init__() self.tconv_re = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, output_padding=output_padding, groups=groups, bias=bias, dilation=dilation, **kwargs) self.tconv_im = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, output_padding=output_padding, groups=groups, bias=bias, dilation=dilation, **kwargs) def forward(self, input_0): primals_1 = self.tconv_re.weight primals_3 = self.tconv_re.bias primals_2 = self.tconv_im.weight primals_5 = self.tconv_im.bias primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
jonashaag/PhoneFortifiedPerceptualLoss
ComplexConvTranspose2d
false
3,769
[ "MIT" ]
0
1dabdd4203f59c2d1bfe22bffc4c63b204aa50bd
https://github.com/jonashaag/PhoneFortifiedPerceptualLoss/tree/1dabdd4203f59c2d1bfe22bffc4c63b204aa50bd
AddPositionalEncoding
import torch import torch.nn as nn import torch.onnx class AddPositionalEncoding(nn.Module): def __init__(self, hidden_size, max_sequence_length): super(AddPositionalEncoding, self).__init__() self.hidden_size = hidden_size self.max_sequence_length = max_sequence_length self.positional_encoding = nn.Parameter(torch.empty( max_sequence_length, hidden_size)) nn.init.normal_(self.positional_encoding) def forward(self, x): seq_len = x.size(1) return x + self.positional_encoding[:seq_len] def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4, 'max_sequence_length': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.onnx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(256)](primals_1, primals_2, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf0, class AddPositionalEncodingNew(nn.Module): def __init__(self, hidden_size, max_sequence_length): super(AddPositionalEncodingNew, self).__init__() self.hidden_size = hidden_size self.max_sequence_length = max_sequence_length self.positional_encoding = nn.Parameter(torch.empty( max_sequence_length, hidden_size)) nn.init.normal_(self.positional_encoding) def forward(self, input_0): primals_2 = self.positional_encoding primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
jonndoe/Character-Level-Language-Modeling-with-Deeper-Self-Attention-pytorch
AddPositionalEncoding
false
3,770
[ "MIT" ]
0
d27d2d390f0831330405c16bd29c7f331ad2007a
https://github.com/jonndoe/Character-Level-Language-Modeling-with-Deeper-Self-Attention-pytorch/tree/d27d2d390f0831330405c16bd29c7f331ad2007a
GatedConv1d
import torch import torch.nn as nn import torch.onnx class MaskedConv1d(nn.Conv1d): def __init__(self, in_channels, out_channels, kernel_size, dilation=1, groups=1, bias=True, causal=True): if causal: padding = (kernel_size - 1) * dilation else: padding = (kernel_size - 1) * dilation // 2 super(MaskedConv1d, self).__init__(in_channels, out_channels, kernel_size, stride=1, padding=padding, dilation=dilation, groups=groups, bias=bias) def forward(self, inputs): output = super(MaskedConv1d, self).forward(inputs) return output[:, :, :inputs.size(2)] class GatedConv1d(MaskedConv1d): def __init__(self, in_channels, out_channels, kernel_size, dilation=1, groups=1, bias=True, causal=True): super(GatedConv1d, self).__init__(in_channels, 2 * out_channels, kernel_size, dilation, groups, bias, causal) self.sigmoid = nn.Sigmoid() def forward(self, inputs): output = super(GatedConv1d, self).forward(inputs) mask, output = output.chunk(2, 1) mask = self.sigmoid(mask) return output * mask def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.onnx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 224 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 7 % 8 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_mul_sigmoid_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 7 * x1 + 56 * x2), xmask) tmp2 = tl.load(in_ptr0 + (28 + x0 + 7 * x1 + 56 * x2), xmask) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp2 * tmp1 tl.store(out_ptr0 + x3, tmp1, xmask) tl.store(out_ptr1 + x3, tmp3, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (8, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(3,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf0, (4, 8, 7), (56, 7, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(224)](buf1, primals_2, 224, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_mul_sigmoid_1[grid(64)](buf1, buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf3, primals_1, primals_3, reinterpret_tensor(buf1, (4, 4, 4), (56, 7, 1), 28), buf2 class MaskedConv1d(nn.Conv1d): def __init__(self, in_channels, out_channels, kernel_size, dilation=1, groups=1, bias=True, causal=True): if causal: padding = (kernel_size - 1) * dilation else: padding = (kernel_size - 1) * dilation // 2 super(MaskedConv1d, self).__init__(in_channels, out_channels, kernel_size, stride=1, padding=padding, dilation=dilation, groups=groups, bias=bias) def forward(self, inputs): output = super(MaskedConv1d, self).forward(inputs) return output[:, :, :inputs.size(2)] class GatedConv1dNew(MaskedConv1d): def __init__(self, in_channels, out_channels, kernel_size, dilation=1, groups=1, bias=True, causal=True): super(GatedConv1dNew, self).__init__(in_channels, 2 * out_channels, kernel_size, dilation, groups, bias, causal) self.sigmoid = nn.Sigmoid() def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
jonndoe/Character-Level-Language-Modeling-with-Deeper-Self-Attention-pytorch
GatedConv1d
false
3,771
[ "MIT" ]
0
d27d2d390f0831330405c16bd29c7f331ad2007a
https://github.com/jonndoe/Character-Level-Language-Modeling-with-Deeper-Self-Attention-pytorch/tree/d27d2d390f0831330405c16bd29c7f331ad2007a
MaskedConv1d
import torch import torch.nn as nn import torch.onnx class MaskedConv1d(nn.Conv1d): def __init__(self, in_channels, out_channels, kernel_size, dilation=1, groups=1, bias=True, causal=True): if causal: padding = (kernel_size - 1) * dilation else: padding = (kernel_size - 1) * dilation // 2 super(MaskedConv1d, self).__init__(in_channels, out_channels, kernel_size, stride=1, padding=padding, dilation=dilation, groups=groups, bias=bias) def forward(self, inputs): output = super(MaskedConv1d, self).forward(inputs) return output[:, :, :inputs.size(2)] def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.onnx assert_size_stride = torch._C._dynamo.guards.assert_size_stride reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 112 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 7 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(3,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 7), (28, 7, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(112)](buf1, primals_2, 112, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return reinterpret_tensor(buf1, (4, 4, 4), (28, 7, 1), 0 ), primals_1, primals_3 class MaskedConv1dNew(nn.Conv1d): def __init__(self, in_channels, out_channels, kernel_size, dilation=1, groups=1, bias=True, causal=True): if causal: padding = (kernel_size - 1) * dilation else: padding = (kernel_size - 1) * dilation // 2 super(MaskedConv1dNew, self).__init__(in_channels, out_channels, kernel_size, stride=1, padding=padding, dilation=dilation, groups=groups, bias=bias) def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
jonndoe/Character-Level-Language-Modeling-with-Deeper-Self-Attention-pytorch
MaskedConv1d
false
3,772
[ "MIT" ]
0
d27d2d390f0831330405c16bd29c7f331ad2007a
https://github.com/jonndoe/Character-Level-Language-Modeling-with-Deeper-Self-Attention-pytorch/tree/d27d2d390f0831330405c16bd29c7f331ad2007a
Swish
import torch from torch import nn class Swish(nn.Module): def forward(self, x): return torch.sigmoid(x) * x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tmp2 = tmp1 * tmp0 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sigmoid_0[grid(256)](arg0_1, buf0, 256, XBLOCK =128, num_warps=4, num_stages=1) del arg0_1 return buf0, class SwishNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
jseppanen/sacking
Swish
false
3,773
[ "Apache-2.0" ]
0
ff16d9a0cbec2661bc84be33ee4b3987be22228e
https://github.com/jseppanen/sacking/tree/ff16d9a0cbec2661bc84be33ee4b3987be22228e
Actor
import torch import numpy as np import torch.nn.functional as F import torch.nn as nn def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class Actor(nn.Module): """Actor (Policy) Model.""" def __init__(self, state_size, action_size, seed, fc1_units=512, fc2_units=384): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed fc1_units (int): Number of nodes in first hidden layer fc2_units (int): Number of nodes in second hidden layer """ super(Actor, self).__init__() self.seed = torch.manual_seed(seed) self.fc1 = nn.Linear(state_size, fc1_units) self.fc2 = nn.Linear(fc1_units, fc2_units) self.fc3 = nn.Linear(fc2_units, action_size) self.reset_parameters() def reset_parameters(self): self.fc1.weight.data.uniform_(*hidden_init(self.fc1)) self.fc2.weight.data.uniform_(*hidden_init(self.fc2)) self.fc3.weight.data.uniform_(-0.003, 0.003) def forward(self, state): """Build an actor (policy) network that maps states -> actions.""" x = F.relu(self.fc1(state)) x = F.relu(self.fc2(x)) return F.tanh(self.fc3(x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'state_size': 4, 'action_size': 4, 'seed': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 384 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_tanh_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (512, 4), (4, 1)) assert_size_stride(primals_2, (512,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (384, 512), (512, 1)) assert_size_stride(primals_5, (384,), (1,)) assert_size_stride(primals_6, (4, 384), (384, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 512), (512, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 512), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 512), (8192, 2048, 512, 1), 0 ) del buf0 buf7 = empty_strided_cuda((4, 4, 4, 512), (8192, 2048, 512, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(32768)](buf1, primals_2, buf7, 32768, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 384), (384, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 512), (512, 1), 0), reinterpret_tensor(primals_4, (512, 384), (1, 512), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 384), (6144, 1536, 384, 1), 0 ) del buf2 buf6 = empty_strided_cuda((4, 4, 4, 384), (6144, 1536, 384, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(24576)](buf3, primals_5, buf6, 24576, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 384), (384, 1), 0), reinterpret_tensor(primals_6, (384, 4), (1, 384), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 triton_poi_fused_tanh_2[grid(256)](buf5, primals_7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 512), (512, 1), 0 ), reinterpret_tensor(buf3, (64, 384), (384, 1), 0 ), buf5, primals_6, buf6, primals_4, buf7 def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class ActorNew(nn.Module): """Actor (Policy) Model.""" def __init__(self, state_size, action_size, seed, fc1_units=512, fc2_units=384): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed fc1_units (int): Number of nodes in first hidden layer fc2_units (int): Number of nodes in second hidden layer """ super(ActorNew, self).__init__() self.seed = torch.manual_seed(seed) self.fc1 = nn.Linear(state_size, fc1_units) self.fc2 = nn.Linear(fc1_units, fc2_units) self.fc3 = nn.Linear(fc2_units, action_size) self.reset_parameters() def reset_parameters(self): self.fc1.weight.data.uniform_(*hidden_init(self.fc1)) self.fc2.weight.data.uniform_(*hidden_init(self.fc2)) self.fc3.weight.data.uniform_(-0.003, 0.003) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
joyce-fang/deep-reinforcement-learning
Actor
false
3,774
[ "MIT" ]
0
62cedab584465bd1c3ef112eb149e8fc611546e3
https://github.com/joyce-fang/deep-reinforcement-learning/tree/62cedab584465bd1c3ef112eb149e8fc611546e3
SDPAttention
import torch import torch.nn as nn import torch.onnx import torch.nn.functional as F class SDPAttention(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, dropout=0, causal=False): super(SDPAttention, self).__init__() self.causal = causal self.dropout = nn.Dropout(dropout) self.mask_q = None self.mask_k = None def set_mask_q(self, masked_tq): self.mask_q = masked_tq def set_mask_k(self, masked_tk): self.mask_k = masked_tk def forward(self, q, k, v): b_q, t_q, dim_q = list(q.size()) b_k, t_k, dim_k = list(k.size()) b_v, t_v, _dim_v = list(v.size()) assert b_q == b_k and b_k == b_v assert dim_q == dim_k assert t_k == t_v b = b_q qk = torch.bmm(q, k.transpose(1, 2)) qk.div_(dim_k ** 0.5) mask = None with torch.no_grad(): if self.causal and t_q > 1: causal_mask = q.data.new(t_q, t_k).byte().fill_(1).triu_(1) mask = causal_mask.unsqueeze(0).expand(b, t_q, t_k) if self.mask_k is not None: mask_k = self.mask_k.unsqueeze(1).expand(b, t_q, t_k) mask = mask_k if mask is None else mask | mask_k if self.mask_q is not None: mask_q = self.mask_q.unsqueeze(2).expand(b, t_q, t_k) mask = mask_q if mask is None else mask | mask_q if mask is not None: qk.masked_fill_(mask, -1000000000.0) sm_qk = F.softmax(qk, dim=2) sm_qk = self.dropout(sm_qk) return torch.bmm(sm_qk, v), sm_qk def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.onnx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(arg0_1, reinterpret_tensor(arg1_1, (4, 4, 4), ( 16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = buf0 del buf0 triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = buf1 del buf1 extern_kernels.bmm(buf2, arg2_1, out=buf3) del arg2_1 return buf3, buf2 class SDPAttentionNew(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, dropout=0, causal=False): super(SDPAttentionNew, self).__init__() self.causal = causal self.dropout = nn.Dropout(dropout) self.mask_q = None self.mask_k = None def set_mask_q(self, masked_tq): self.mask_q = masked_tq def set_mask_k(self, masked_tk): self.mask_k = masked_tk def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0], output[1]
jonndoe/Character-Level-Language-Modeling-with-Deeper-Self-Attention-pytorch
SDPAttention
false
3,775
[ "MIT" ]
0
d27d2d390f0831330405c16bd29c7f331ad2007a
https://github.com/jonndoe/Character-Level-Language-Modeling-with-Deeper-Self-Attention-pytorch/tree/d27d2d390f0831330405c16bd29c7f331ad2007a
ResidualBlock
import torch import torch.nn as nn class CausalConv1d(torch.nn.Conv1d): """Causal 1d convolution""" def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, groups=1, bias=True): self.__padding = (kernel_size - 1) * dilation super(CausalConv1d, self).__init__(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=self.__padding, dilation=dilation, groups=groups, bias=bias) def forward(self, input): result = super(CausalConv1d, self).forward(input) if self.__padding != 0: return result[:, :, :-self.__padding] return result class ResidualBlock(nn.Module): """Residual block of WaveNet architecture""" def __init__(self, residual_channels, dilation_channels, skip_channels, n_globals, kernel_size=1, dilation=1): super(ResidualBlock, self).__init__() self.residual_channels = residual_channels self.dilation_channels = dilation_channels self.skip_channels = skip_channels self.n_globals = n_globals self.kernel_size = kernel_size self.dilation = dilation self.filter_conv = CausalConv1d(in_channels=self.residual_channels, out_channels=self.dilation_channels, kernel_size=self. kernel_size, dilation=self.dilation) self.gated_conv = CausalConv1d(in_channels=self.residual_channels, out_channels=self.dilation_channels, kernel_size=self. kernel_size, dilation=self.dilation) self.filter_linear = nn.Linear(in_features=self.n_globals, out_features=self.dilation_channels) self.gated_linear = nn.Linear(in_features=self.n_globals, out_features=self.dilation_channels) self._1x1_conv_res = nn.Conv1d(in_channels=self.dilation_channels, out_channels=self.residual_channels, kernel_size=1, dilation=1) self._1x1_conv_skip = nn.Conv1d(in_channels=self.dilation_channels, out_channels=self.skip_channels, kernel_size=1, dilation=1) def forward(self, x, h): h_f = self.filter_linear(h).unsqueeze(2) h_g = self.gated_linear(h).unsqueeze(2) x_f = self.filter_conv(x) x_g = self.gated_conv(x) z_f = torch.tanh(x_f + h_f) z_g = torch.sigmoid(x_g + h_g) z = torch.mul(z_f, z_g) skip = self._1x1_conv_skip(z) residual = x + self._1x1_conv_res(z) return skip, residual def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'residual_channels': 4, 'dilation_channels': 1, 'skip_channels': 4, 'n_globals': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, xmask) @triton.jit def triton_poi_fused_add_mul_sigmoid_tanh_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tmp6 = tmp4 + tmp5 tmp7 = tl.sigmoid(tmp6) tmp8 = tmp3 * tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_add_convolution_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 16 x4 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_out_ptr0 + x4, xmask) tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x4, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14) = args args.clear() assert_size_stride(primals_1, (1, 4), (4, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (1, 4), (4, 1)) assert_size_stride(primals_5, (1,), (1,)) assert_size_stride(primals_6, (1, 4, 1), (4, 1, 1)) assert_size_stride(primals_7, (1,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (1, 4, 1), (4, 1, 1)) assert_size_stride(primals_10, (1,), (1,)) assert_size_stride(primals_11, (4, 1, 1), (1, 1, 1)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4, 1, 1), (1, 1, 1)) assert_size_stride(primals_14, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_2, primals_3, reinterpret_tensor( primals_1, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_1 del primals_2 buf3 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_5, primals_3, reinterpret_tensor( primals_4, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_4 del primals_5 buf4 = extern_kernels.convolution(reinterpret_tensor(primals_8, (1, 4, 4), (16, 4, 1), 0), primals_6, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf4, (1, 1, 4), (4, 4, 1)) buf5 = buf4 del buf4 get_raw_stream(0) triton_poi_fused_convolution_0[grid(4)](buf5, primals_7, 4, XBLOCK= 4, num_warps=1, num_stages=1) del primals_7 buf6 = extern_kernels.convolution(reinterpret_tensor(primals_8, (1, 4, 4), (16, 4, 1), 0), primals_9, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf6, (1, 1, 4), (4, 4, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_0[grid(4)](buf7, primals_10, 4, XBLOCK =4, num_warps=1, num_stages=1) del primals_10 buf8 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) triton_poi_fused_add_mul_sigmoid_tanh_1[grid(16)](buf5, buf1, buf7, buf3, buf8, 16, XBLOCK=16, num_warps=1, num_stages=1) buf9 = extern_kernels.convolution(buf8, primals_11, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf9, (4, 4, 4), (16, 4, 1)) buf10 = buf9 del buf9 triton_poi_fused_convolution_2[grid(64)](buf10, primals_12, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_12 buf11 = extern_kernels.convolution(buf8, primals_13, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf11, (4, 4, 4), (16, 4, 1)) buf12 = buf11 del buf11 triton_poi_fused_add_convolution_3[grid(64)](buf12, primals_8, primals_14, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_14 return (buf10, buf12, primals_3, primals_6, primals_9, primals_11, primals_13, buf1, buf3, reinterpret_tensor(primals_8, (1, 4, 4), ( 16, 4, 1), 0), buf5, buf7, buf8) class CausalConv1d(torch.nn.Conv1d): """Causal 1d convolution""" def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, groups=1, bias=True): self.__padding = (kernel_size - 1) * dilation super(CausalConv1d, self).__init__(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=self.__padding, dilation=dilation, groups=groups, bias=bias) def forward(self, input): result = super(CausalConv1d, self).forward(input) if self.__padding != 0: return result[:, :, :-self.__padding] return result class ResidualBlockNew(nn.Module): """Residual block of WaveNet architecture""" def __init__(self, residual_channels, dilation_channels, skip_channels, n_globals, kernel_size=1, dilation=1): super(ResidualBlockNew, self).__init__() self.residual_channels = residual_channels self.dilation_channels = dilation_channels self.skip_channels = skip_channels self.n_globals = n_globals self.kernel_size = kernel_size self.dilation = dilation self.filter_conv = CausalConv1d(in_channels=self.residual_channels, out_channels=self.dilation_channels, kernel_size=self. kernel_size, dilation=self.dilation) self.gated_conv = CausalConv1d(in_channels=self.residual_channels, out_channels=self.dilation_channels, kernel_size=self. kernel_size, dilation=self.dilation) self.filter_linear = nn.Linear(in_features=self.n_globals, out_features=self.dilation_channels) self.gated_linear = nn.Linear(in_features=self.n_globals, out_features=self.dilation_channels) self._1x1_conv_res = nn.Conv1d(in_channels=self.dilation_channels, out_channels=self.residual_channels, kernel_size=1, dilation=1) self._1x1_conv_skip = nn.Conv1d(in_channels=self.dilation_channels, out_channels=self.skip_channels, kernel_size=1, dilation=1) def forward(self, input_0, input_1): primals_6 = self.filter_conv.weight primals_2 = self.filter_conv.bias primals_9 = self.gated_conv.weight primals_5 = self.gated_conv.bias primals_1 = self.filter_linear.weight primals_7 = self.filter_linear.bias primals_4 = self.gated_linear.weight primals_10 = self.gated_linear.bias primals_11 = self._1x1_conv_res.weight primals_12 = self._1x1_conv_res.bias primals_13 = self._1x1_conv_skip.weight primals_14 = self._1x1_conv_skip.bias primals_3 = input_0 primals_8 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14]) return output[0], output[1]
jonasvj/protein-generation
ResidualBlock
false
3,776
[ "MIT" ]
0
ad716f2dba6f6642a6d54571571e6f539cee3644
https://github.com/jonasvj/protein-generation/tree/ad716f2dba6f6642a6d54571571e6f539cee3644
Critic
import torch import numpy as np import torch.nn.functional as F import torch.nn as nn def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class Critic(nn.Module): """Critic (Value) Model.""" def __init__(self, state_size, action_size, seed, fcs1_units=512, fc2_units=384): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed fcs1_units (int): Number of nodes in the first hidden layer fc2_units (int): Number of nodes in the second hidden layer """ super(Critic, self).__init__() self.seed = torch.manual_seed(seed) self.fcs1 = nn.Linear(state_size, fcs1_units) self.fc2 = nn.Linear(fcs1_units + action_size, fc2_units) self.fc3 = nn.Linear(fc2_units, 1) self.reset_parameters() def reset_parameters(self): self.fcs1.weight.data.uniform_(*hidden_init(self.fcs1)) self.fc2.weight.data.uniform_(*hidden_init(self.fc2)) self.fc3.weight.data.uniform_(-0.003, 0.003) def forward(self, state, action): """Build a critic (value) network that maps (state, action) pairs -> Q-values.""" xs = F.relu(self.fcs1(state)) x = torch.cat((xs, action), dim=1) x = F.relu(self.fc2(x)) return self.fc3(x) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'state_size': 4, 'action_size': 4, 'seed': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 2064 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 516 x1 = xindex // 516 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 512, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (512 * x1 + x0), tmp4 & xmask, eviction_policy ='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 516, tl.int64) tmp15 = tl.load(in_ptr2 + (4 * x1 + (-512 + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 384 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (512, 4), (4, 1)) assert_size_stride(primals_2, (512,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (384, 516), (516, 1)) assert_size_stride(primals_6, (384,), (1,)) assert_size_stride(primals_7, (1, 384), (384, 1)) assert_size_stride(primals_8, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 512), (512, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 512), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 516), (516, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(2064)](buf0, primals_2, primals_4, buf1, 2064, XBLOCK=128, num_warps=4, num_stages=1) del primals_4 buf2 = empty_strided_cuda((4, 384), (384, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_5, (516, 384), ( 1, 516), 0), out=buf2) buf3 = buf2 del buf2 triton_poi_fused_relu_1[grid(1536)](buf3, primals_6, 1536, XBLOCK= 128, num_warps=4, num_stages=1) del primals_6 buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_8, buf3, reinterpret_tensor(primals_7, (384, 1), (1, 384), 0), alpha=1, beta=1, out=buf5) del primals_8 buf6 = empty_strided_cuda((4, 512), (512, 1), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(2048)](buf0, primals_2, buf6, 2048, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del primals_2 return buf5, primals_3, buf1, buf3, primals_7, primals_5, buf6 def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class CriticNew(nn.Module): """Critic (Value) Model.""" def __init__(self, state_size, action_size, seed, fcs1_units=512, fc2_units=384): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed fcs1_units (int): Number of nodes in the first hidden layer fc2_units (int): Number of nodes in the second hidden layer """ super(CriticNew, self).__init__() self.seed = torch.manual_seed(seed) self.fcs1 = nn.Linear(state_size, fcs1_units) self.fc2 = nn.Linear(fcs1_units + action_size, fc2_units) self.fc3 = nn.Linear(fc2_units, 1) self.reset_parameters() def reset_parameters(self): self.fcs1.weight.data.uniform_(*hidden_init(self.fcs1)) self.fc2.weight.data.uniform_(*hidden_init(self.fc2)) self.fc3.weight.data.uniform_(-0.003, 0.003) def forward(self, input_0, input_1): primals_1 = self.fcs1.weight primals_2 = self.fcs1.bias primals_5 = self.fc2.weight primals_6 = self.fc2.bias primals_7 = self.fc3.weight primals_8 = self.fc3.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
joyce-fang/deep-reinforcement-learning
Critic
false
3,777
[ "MIT" ]
0
62cedab584465bd1c3ef112eb149e8fc611546e3
https://github.com/joyce-fang/deep-reinforcement-learning/tree/62cedab584465bd1c3ef112eb149e8fc611546e3
QNetwork
import torch import torch.nn as nn import torch.nn.functional as F class QNetwork(nn.Module): """Actor (Policy) Model.""" def __init__(self, state_size, action_size, seed): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed """ super(QNetwork, self).__init__() self.seed = torch.manual_seed(seed) self.in_fc1 = nn.Linear(state_size, 32) self.hidden = nn.Linear(32, 16) self.output = nn.Linear(16, action_size) def forward(self, state): """Build a network that maps state -> action values.""" state = F.relu(self.in_fc1(state)) state = F.relu(self.hidden(state)) state = self.output(state) return state def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'state_size': 4, 'action_size': 4, 'seed': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (32, 4), (4, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (16, 32), (32, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (4, 16), (16, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 32), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 32), (512, 128, 32, 1), 0) del buf0 buf6 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(2048)](buf1, primals_2, buf6, 2048, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 32), (32, 1), 0), reinterpret_tensor(primals_4, (32, 16), (1, 32), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 16), (256, 64, 16, 1), 0) del buf2 buf5 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(1024)](buf3, primals_5, buf5, 1024, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 16), (16, 1), 0), reinterpret_tensor(primals_6, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf4) del primals_7 return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 32), (32, 1), 0), reinterpret_tensor( buf3, (64, 16), (16, 1), 0), primals_6, buf5, primals_4, buf6 class QNetworkNew(nn.Module): """Actor (Policy) Model.""" def __init__(self, state_size, action_size, seed): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed """ super(QNetworkNew, self).__init__() self.seed = torch.manual_seed(seed) self.in_fc1 = nn.Linear(state_size, 32) self.hidden = nn.Linear(32, 16) self.output = nn.Linear(16, action_size) def forward(self, input_0): primals_1 = self.in_fc1.weight primals_2 = self.in_fc1.bias primals_4 = self.hidden.weight primals_5 = self.hidden.bias primals_6 = self.output.weight primals_7 = self.output.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
jsztompka/DuelDQN
QNetwork
false
3,778
[ "MIT" ]
0
3b1234425b66034ef233ac988305dc13ffbf7ace
https://github.com/jsztompka/DuelDQN/tree/3b1234425b66034ef233ac988305dc13ffbf7ace
LeakyClamp
import torch import torch.nn as nn class LeakyClamp(nn.Module): def __init__(self, cap): super(LeakyClamp, self).__init__() self.cap = cap self.leakyrelu = nn.LeakyReLU(inplace=False) self.leakyrelu2 = nn.LeakyReLU(inplace=False) def forward(self, x): x = self.leakyrelu(x) x_ret = -self.leakyrelu2(-x + self.cap) + self.cap return x_ret def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'cap': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_leaky_relu_neg_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.01 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = -tmp5 tmp7 = 4.0 tmp8 = tmp6 + tmp7 tmp9 = tmp8 > tmp1 tmp10 = tmp8 * tmp3 tmp11 = tl.where(tmp9, tmp8, tmp10) tmp12 = -tmp11 tmp13 = tmp12 + tmp7 tl.store(out_ptr0 + x0, tmp13, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_leaky_relu_neg_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class LeakyClampNew(nn.Module): def __init__(self, cap): super(LeakyClampNew, self).__init__() self.cap = cap self.leakyrelu = nn.LeakyReLU(inplace=False) self.leakyrelu2 = nn.LeakyReLU(inplace=False) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
junweima/pytorch-cnn-visualizations
LeakyClamp
false
3,779
[ "MIT" ]
0
c535e76e0a169d02a17ec5c8cc109ea687d698c1
https://github.com/junweima/pytorch-cnn-visualizations/tree/c535e76e0a169d02a17ec5c8cc109ea687d698c1
MultiHeadAttention
import torch import numpy as np from torch import nn import torch.nn.parallel class MultiHeadAttention(nn.Module): def __init__(self, heads_count, d_model, dropout_prob): super().__init__() assert d_model % heads_count == 0, f'model dim {d_model} not divisible by {heads_count} heads' self.d_head = d_model // heads_count self.heads_count = heads_count self.query_projection = nn.Linear(d_model, heads_count * self.d_head) self.key_projection = nn.Linear(d_model, heads_count * self.d_head) self.value_projection = nn.Linear(d_model, heads_count * self.d_head) self.final_projection = nn.Linear(d_model, heads_count * self.d_head) self.dropout = nn.Dropout(dropout_prob) self.softmax = nn.Softmax(dim=3) self.attention = None def forward(self, query, key, value, mask=None): batch_size, query_len, d_model = query.size() d_head = d_model // self.heads_count query_projected = self.query_projection(query) key_projected = self.key_projection(key) value_projected = self.value_projection(value) batch_size, key_len, d_model = key_projected.size() batch_size, value_len, d_model = value_projected.size() query_heads = query_projected.view(batch_size, query_len, self. heads_count, d_head).transpose(1, 2) key_heads = key_projected.view(batch_size, key_len, self. heads_count, d_head).transpose(1, 2) value_heads = value_projected.view(batch_size, value_len, self. heads_count, d_head).transpose(1, 2) attention_weights = self.scaled_dot_product(query_heads, key_heads) if mask is not None: mask_expanded = mask.unsqueeze(1).expand_as(attention_weights) attention_weights = attention_weights.masked_fill(mask_expanded, -1e+18) attention = self.softmax(attention_weights) attention_dropped = self.dropout(attention) context_heads = torch.matmul(attention_dropped, value_heads) context_sequence = context_heads.transpose(1, 2) context = context_sequence.reshape(batch_size, query_len, d_model) final_output = self.final_projection(context) return final_output def scaled_dot_product(self, query_heads, key_heads): key_heads_transposed = key_heads.transpose(2, 3) dot_product = torch.matmul(query_heads, key_heads_transposed) attention_weights = dot_product / np.sqrt(self.d_head) return attention_weights def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'heads_count': 4, 'd_model': 4, 'dropout_prob': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import numpy as np from torch import nn import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2) del primals_7 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_3, buf3, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 triton_poi_fused_clone_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused__softmax_2[grid(256)](buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf6 buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf1 triton_poi_fused_clone_0[grid(16, 4)](buf2, primals_8, buf8, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_8 buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_3[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0) del buf9 extern_kernels.mm(reinterpret_tensor(buf10, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf11) buf12 = reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0) del buf11 triton_poi_fused_add_4[grid(64)](buf12, primals_11, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_11 return buf12, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0 ), buf7, reinterpret_tensor(buf10, (16, 4), (4, 1), 0 ), primals_10, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0) class MultiHeadAttentionNew(nn.Module): def __init__(self, heads_count, d_model, dropout_prob): super().__init__() assert d_model % heads_count == 0, f'model dim {d_model} not divisible by {heads_count} heads' self.d_head = d_model // heads_count self.heads_count = heads_count self.query_projection = nn.Linear(d_model, heads_count * self.d_head) self.key_projection = nn.Linear(d_model, heads_count * self.d_head) self.value_projection = nn.Linear(d_model, heads_count * self.d_head) self.final_projection = nn.Linear(d_model, heads_count * self.d_head) self.dropout = nn.Dropout(dropout_prob) self.softmax = nn.Softmax(dim=3) self.attention = None def scaled_dot_product(self, query_heads, key_heads): key_heads_transposed = key_heads.transpose(2, 3) dot_product = torch.matmul(query_heads, key_heads_transposed) attention_weights = dot_product / np.sqrt(self.d_head) return attention_weights def forward(self, input_0, input_1, input_2): primals_2 = self.query_projection.weight primals_3 = self.query_projection.bias primals_4 = self.key_projection.weight primals_5 = self.key_projection.bias primals_7 = self.value_projection.weight primals_8 = self.value_projection.bias primals_10 = self.final_projection.weight primals_11 = self.final_projection.bias primals_1 = input_0 primals_6 = input_1 primals_9 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
junchen14/video_language
MultiHeadAttention
false
3,780
[ "Apache-2.0" ]
0
1d6d304b795501d1e0d56351047a259d992fab23
https://github.com/junchen14/video_language/tree/1d6d304b795501d1e0d56351047a259d992fab23
AttentionLayer
import torch import torch.nn as nn import torch.onnx import torch.nn.functional as F class AttentionLayer(nn.Module): """ Attention layer according to https://arxiv.org/abs/1409.0473. Params: num_units: Number of units used in the attention layer """ def __init__(self, query_size, key_size, value_size=None, mode= 'bahdanau', normalize=False, dropout=0, batch_first=False, weight_norm=False, bias=True, query_transform=True, output_transform=True, output_nonlinearity='tanh', output_size=None): super(AttentionLayer, self).__init__() assert mode == 'bahdanau' or mode == 'dot_prod' value_size = value_size or key_size self.mode = mode self.query_size = query_size self.key_size = key_size self.value_size = value_size self.normalize = normalize wn_func = wn if weight_norm else lambda x: x if mode == 'bahdanau': self.linear_att = nn.Linear(key_size, 1, bias=bias) if normalize: self.linear_att = nn.utils.weight_norm(self.linear_att) elif normalize: self.scale = nn.Parameter(torch.Tensor([1])) if output_transform: output_size = output_size or query_size self.linear_out = wn_func(nn.Linear(query_size + value_size, output_size, bias=bias)) self.output_size = output_size else: self.output_size = value_size if query_transform: self.linear_q = wn_func(nn.Linear(query_size, key_size, bias=bias)) self.dropout = nn.Dropout(dropout) self.batch_first = batch_first self.output_nonlinearity = output_nonlinearity self.mask = None def set_mask(self, mask): self.mask = mask if mask is not None and not self.batch_first: self.mask = self.mask.t() def calc_score(self, att_query, att_keys): """ att_query is: b x t_q x n att_keys is b x t_k x n return b x t_q x t_k scores """ b, t_k, n = list(att_keys.size()) t_q = att_query.size(1) if self.mode == 'bahdanau': att_query = att_query.unsqueeze(2).expand(b, t_q, t_k, n) att_keys = att_keys.unsqueeze(1).expand(b, t_q, t_k, n) sum_qk = att_query + att_keys sum_qk = sum_qk.view(b * t_k * t_q, n) out = self.linear_att(F.tanh(sum_qk)).view(b, t_q, t_k) elif self.mode == 'dot_prod': out = torch.bmm(att_query, att_keys.transpose(1, 2)) if hasattr(self, 'scale'): out = out * self.scale return out def forward(self, query, keys, values=None): if not self.batch_first: keys = keys.transpose(0, 1) if values is not None: values = values.transpose(0, 1) if query.dim() == 3: query = query.transpose(0, 1) if query.dim() == 2: single_query = True query = query.unsqueeze(1) else: single_query = False values = keys if values is None else values b = query.size(0) t_k = keys.size(1) t_q = query.size(1) if hasattr(self, 'linear_q'): att_query = self.linear_q(query) else: att_query = query scores = self.calc_score(att_query, keys) if self.mask is not None: mask = self.mask.unsqueeze(1).expand(b, t_q, t_k) scores.masked_fill_(mask, -1000000000000.0) scores_normalized = F.softmax(scores, dim=2) scores_normalized = self.dropout(scores_normalized) context = torch.bmm(scores_normalized, values) if hasattr(self, 'linear_out'): context = self.linear_out(torch.cat([query, context], 2)) if self.output_nonlinearity == 'tanh': context = F.tanh(context) elif self.output_nonlinearity == 'relu': context = F.relu(context, inplace=True) if single_query: context = context.squeeze(1) scores_normalized = scores_normalized.squeeze(1) elif not self.batch_first: context = context.transpose(0, 1) scores_normalized = scores_normalized.transpose(0, 1) return context, scores_normalized def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'query_size': 4, 'key_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.onnx import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_tanh_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * (x1 // 4)), xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + 4 * (x1 // 16) + 16 * (x1 % 4)), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = libdevice.tanh(tmp4) tl.store(out_ptr0 + x2, tmp5, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_cat_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 % 4 x2 = xindex // 32 x3 = xindex // 8 x4 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x2 + 16 * x1 + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x3 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x4, tmp10, xmask) @triton.jit def triton_poi_fused_tanh_tanh_backward_5(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tmp4 = tmp3 * tmp3 tmp5 = 1.0 tmp6 = tmp5 - tmp4 tl.store(in_out_ptr0 + x2, tmp3, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (1, 4), (4, 1)) assert_size_stride(primals_6, (1,), (1,)) assert_size_stride(primals_7, (4, 8), (8, 1)) assert_size_stride(primals_8, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64)](primals_2, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) triton_poi_fused_tanh_1[grid(256)](buf1, primals_4, primals_1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_4 buf4 = reinterpret_tensor(buf1, (64, 1), (1, 1), 0) del buf1 extern_kernels.addmm(primals_6, buf2, reinterpret_tensor(primals_5, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_6 buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_2[grid(64)](buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0) del buf4 triton_poi_fused__softmax_3[grid(64)](buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) buf7 = buf5 del buf5 extern_kernels.bmm(buf6, reinterpret_tensor(primals_1, (4, 4, 4), ( 4, 16, 1), 0), out=buf7) buf8 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) triton_poi_fused_cat_4[grid(128)](primals_2, buf7, buf8, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0) del buf7 extern_kernels.mm(reinterpret_tensor(buf8, (16, 8), (8, 1), 0), reinterpret_tensor(primals_7, (8, 4), (1, 8), 0), out=buf9) buf10 = reinterpret_tensor(buf9, (4, 4, 4), (16, 4, 1), 0) del buf9 buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_tanh_tanh_backward_5[grid(64)](buf10, primals_8, buf11, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_8 return reinterpret_tensor(buf10, (4, 4, 4), (4, 16, 1), 0 ), reinterpret_tensor(buf6, (4, 4, 4), (4, 16, 1), 0 ), reinterpret_tensor(buf0, (16, 4), (4, 1), 0 ), buf2, buf6, reinterpret_tensor(buf8, (16, 8), (8, 1), 0 ), buf11, primals_7, reinterpret_tensor(primals_1, (4, 4, 4), (4, 1, 16), 0), primals_5 class AttentionLayerNew(nn.Module): """ Attention layer according to https://arxiv.org/abs/1409.0473. Params: num_units: Number of units used in the attention layer """ def __init__(self, query_size, key_size, value_size=None, mode= 'bahdanau', normalize=False, dropout=0, batch_first=False, weight_norm=False, bias=True, query_transform=True, output_transform=True, output_nonlinearity='tanh', output_size=None): super(AttentionLayerNew, self).__init__() assert mode == 'bahdanau' or mode == 'dot_prod' value_size = value_size or key_size self.mode = mode self.query_size = query_size self.key_size = key_size self.value_size = value_size self.normalize = normalize wn_func = wn if weight_norm else lambda x: x if mode == 'bahdanau': self.linear_att = nn.Linear(key_size, 1, bias=bias) if normalize: self.linear_att = nn.utils.weight_norm(self.linear_att) elif normalize: self.scale = nn.Parameter(torch.Tensor([1])) if output_transform: output_size = output_size or query_size self.linear_out = wn_func(nn.Linear(query_size + value_size, output_size, bias=bias)) self.output_size = output_size else: self.output_size = value_size if query_transform: self.linear_q = wn_func(nn.Linear(query_size, key_size, bias=bias)) self.dropout = nn.Dropout(dropout) self.batch_first = batch_first self.output_nonlinearity = output_nonlinearity self.mask = None def set_mask(self, mask): self.mask = mask if mask is not None and not self.batch_first: self.mask = self.mask.t() def calc_score(self, att_query, att_keys): """ att_query is: b x t_q x n att_keys is b x t_k x n return b x t_q x t_k scores """ b, t_k, n = list(att_keys.size()) t_q = att_query.size(1) if self.mode == 'bahdanau': att_query = att_query.unsqueeze(2).expand(b, t_q, t_k, n) att_keys = att_keys.unsqueeze(1).expand(b, t_q, t_k, n) sum_qk = att_query + att_keys sum_qk = sum_qk.view(b * t_k * t_q, n) out = self.linear_att(F.tanh(sum_qk)).view(b, t_q, t_k) elif self.mode == 'dot_prod': out = torch.bmm(att_query, att_keys.transpose(1, 2)) if hasattr(self, 'scale'): out = out * self.scale return out def forward(self, input_0, input_1): primals_5 = self.linear_att.weight primals_6 = self.linear_att.bias primals_7 = self.linear_out.weight primals_4 = self.linear_out.bias primals_3 = self.linear_q.weight primals_8 = self.linear_q.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0], output[1]
jonndoe/Character-Level-Language-Modeling-with-Deeper-Self-Attention-pytorch
AttentionLayer
false
3,781
[ "MIT" ]
0
d27d2d390f0831330405c16bd29c7f331ad2007a
https://github.com/jonndoe/Character-Level-Language-Modeling-with-Deeper-Self-Attention-pytorch/tree/d27d2d390f0831330405c16bd29c7f331ad2007a
FourierConv1d
import torch class FourierConv1d(torch.nn.Module): def __init__(self, in_channels, out_channels, size, bias=True, periodic =False): super(FourierConv1d, self).__init__() self.in_channels = in_channels self.out_channels = out_channels if not periodic: self.size = size else: self.size = size // 2 self.weights = torch.nn.Parameter(torch.view_as_real(1 / ( in_channels * out_channels) * torch.rand(in_channels, out_channels, self.size, dtype=torch.cfloat))) self.biases = torch.nn.Parameter(torch.view_as_real(1 / out_channels * torch.rand(out_channels, self.size, dtype=torch. cfloat))) self.bias = bias self.periodic = periodic def forward(self, x): if not self.periodic: padding = self.size x = torch.nn.functional.pad(x, [0, padding]) x_ft = torch.fft.rfft(x) out_ft = torch.zeros_like(x_ft) out_ft[:, :, :self.size] = torch.einsum('bix,iox->box', x_ft[:, :, :self.size], torch.view_as_complex(self.weights)) if self.bias: out_ft[:, :, :self.size] += torch.view_as_complex(self.biases) out = torch.fft.irfft(out_ft, n=x.size(-1)) if not self.periodic: out = out[..., :-padding] return out def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'size': 4}]
import torch from torch import device import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 4, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.load(in_ptr0 + (x0 + 4 * x1), tmp2 & xmask, other=0.0) tl.store(out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused_add_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x3 = xindex // 8 x4 = xindex % 32 x5 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 10 * x3), xmask) tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x5, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 2), (32, 8, 2, 1)) assert_size_stride(primals_3, (4, 4, 2), (8, 2, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(128)](primals_1, buf0, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = torch.ops.aten._fft_r2c.default(buf0, [2], 0, True) buf2 = buf1 del buf1 buf3 = torch.ops.aten.full.default([4, 4, 5], 0, dtype=torch. complex64, layout=torch.strided, device=device(type='cuda', index=0), pin_memory=False) buf4 = buf3 del buf3 buf5 = torch.ops.aten.slice.Tensor(buf2, 2, 0, 4) buf6 = buf5 buf7 = torch.ops.aten.view_as_complex.default(primals_2) buf8 = buf7 buf9 = torch.ops.aten.unsqueeze.default(buf6, 3) buf10 = buf9 buf11 = torch.ops.aten.permute.default(buf10, [0, 3, 2, 1]) buf12 = buf11 buf13 = torch.ops.aten.unsqueeze.default(buf8, 3) buf14 = buf13 buf15 = torch.ops.aten.permute.default(buf14, [3, 1, 2, 0]) buf16 = buf15 buf17 = torch.ops.aten.permute.default(buf12, [2, 0, 3, 1]) buf18 = buf17 buf19 = torch.ops.aten.reshape.default(buf18, [4, 4, 4]) buf20 = buf19 buf21 = torch.ops.aten.permute.default(buf16, [2, 3, 1, 0]) buf22 = buf21 buf23 = torch.ops.aten.reshape.default(buf22, [4, 4, 4]) buf24 = buf23 buf25 = torch.ops.aten.bmm.default(buf20, buf24) del buf13 del buf14 del buf15 del buf16 del buf21 del buf22 del buf23 del buf24 del buf7 del buf8 del primals_2 buf26 = buf25 del buf25 buf27 = torch.ops.aten.reshape.default(buf26, [4, 4, 1, 4]) buf28 = buf27 buf29 = torch.ops.aten.permute.default(buf28, [1, 3, 0, 2]) buf30 = buf29 buf31 = torch.ops.aten.reshape.default(buf30, [4, 4, 4]) buf32 = buf31 buf33 = torch.ops.aten.slice.Tensor(buf4, 2, 0, 4) buf34 = buf33 buf35 = torch.ops.aten.copy.default(buf34, buf32) del buf26 del buf27 del buf28 del buf29 del buf30 del buf31 del buf32 buf36 = buf35 del buf35 buf37 = torch.ops.aten.slice_scatter.default(buf4, buf36, 2, 0, 4) del buf36 buf38 = buf37 del buf37 buf39 = torch.ops.aten.view_as_complex.default(primals_3) buf40 = buf39 buf41 = torch.ops.aten.slice.Tensor(buf38, 2, 0, 4) buf42 = buf41 buf43 = torch.ops.aten.view.dtype(buf42, torch.float32) buf44 = buf43 buf45 = torch.ops.aten.view.dtype(buf40, torch.float32) buf46 = buf45 buf47 = reinterpret_tensor(buf0, (4, 4, 4, 2), (32, 8, 2, 1), 0) del buf0 triton_poi_fused_add_1[grid(128)](buf44, buf46, buf47, 128, XBLOCK= 128, num_warps=4, num_stages=1) del buf39 del buf40 del buf41 del buf42 del buf43 del buf44 del buf45 del buf46 del primals_3 buf48 = torch.ops.aten.view.dtype(reinterpret_tensor(buf47, (4, 4, 8), (32, 8, 1), 0), torch.complex64) buf49 = buf48 buf50 = torch.ops.aten.slice_scatter.default(buf38, buf49, 2, 0, 4) del buf38 del buf47 del buf48 del buf49 buf51 = buf50 del buf50 buf52 = torch.ops.aten.slice.Tensor(buf51, 2, 0, 4) buf53 = buf52 buf54 = torch.ops.aten.slice_scatter.default(buf51, buf53, 2, 0, 4) del buf51 del buf52 del buf53 buf55 = buf54 del buf54 buf56 = torch.ops.aten._fft_c2r.default(buf55, [2], 2, 8) del buf55 buf57 = buf56 del buf56 buf58 = torch.ops.aten.permute.default(buf20, [0, 2, 1]) buf59 = buf58 buf60 = torch.ops.aten._conj.default(buf59) buf61 = buf60 return reinterpret_tensor(buf57, (4, 4, 4), (32, 8, 1), 0), buf4, buf61 class FourierConv1dNew(torch.nn.Module): def __init__(self, in_channels, out_channels, size, bias=True, periodic =False): super(FourierConv1dNew, self).__init__() self.in_channels = in_channels self.out_channels = out_channels if not periodic: self.size = size else: self.size = size // 2 self.weights = torch.nn.Parameter(torch.view_as_real(1 / ( in_channels * out_channels) * torch.rand(in_channels, out_channels, self.size, dtype=torch.cfloat))) self.biases = torch.nn.Parameter(torch.view_as_real(1 / out_channels * torch.rand(out_channels, self.size, dtype=torch. cfloat))) self.bias = bias self.periodic = periodic def forward(self, input_0): primals_2 = self.weights primals_3 = self.biases primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
julian-parker/DAFX22_FNO
FourierConv1d
false
3,782
[ "MIT" ]
0
72f30144317a3f8ba8ea23ecf9a0333c81fc87db
https://github.com/julian-parker/DAFX22_FNO/tree/72f30144317a3f8ba8ea23ecf9a0333c81fc87db
Duel_QNetwork
import torch import torch.nn as nn import torch.nn.functional as F class Duel_QNetwork(nn.Module): """Actor (Policy) Model.""" def __init__(self, state_size, action_size, seed): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed """ super(Duel_QNetwork, self).__init__() self.seed = torch.manual_seed(seed) self.in_fc1 = nn.Linear(state_size, 64) self.hidden = nn.Linear(64, 64) self.value = nn.Linear(64, action_size) self.advantage = nn.Linear(64, action_size) self.dropout = nn.Dropout(0.4) def forward(self, state): """Build a network that maps state -> action values.""" state = F.relu(self.in_fc1(state)) state = F.relu(self.hidden(state)) value = self.value(state) advantage = self.advantage(state) state = value + (advantage - torch.mean(advantage)) return state def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'state_size': 4, 'action_size': 4, 'seed': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_per_fused_add_mean_sub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex r1 = rindex % 4 tmp0 = tl.load(in_ptr0 + r0, None) tmp4 = tl.load(in_out_ptr0 + r0, None) tmp5 = tl.load(in_ptr1 + r1, None, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0)) tmp6 = tmp4 + tmp5 tmp7 = 256.0 tmp8 = tmp3 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp6 + tmp9 tl.store(in_out_ptr0 + tl.broadcast_to(r0, [RBLOCK]), tmp10, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (64, 4), (4, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (64, 64), (64, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (4, 64), (64, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 64), (64, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf0 buf9 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool ) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(4096)](buf1, primals_2, buf9, 4096, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf2 buf8 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool ) triton_poi_fused_relu_threshold_backward_0[grid(4096)](buf3, primals_5, buf8, 4096, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 64), (64, 1), 0), reinterpret_tensor(primals_6, (64, 4), (1, 64), 0), out=buf4) buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(buf3, (64, 64), (64, 1), 0), reinterpret_tensor(primals_8, (64, 4), (1, 64), 0), alpha=1, beta=1, out=buf5) del primals_9 buf7 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 triton_per_fused_add_mean_sub_1[grid(1)](buf7, buf5, primals_7, 1, 256, num_warps=2, num_stages=1) del buf5 del primals_7 return buf7, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor( buf3, (64, 64), (64, 1), 0 ), primals_8, primals_6, buf8, primals_4, buf9 class Duel_QNetworkNew(nn.Module): """Actor (Policy) Model.""" def __init__(self, state_size, action_size, seed): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed """ super(Duel_QNetworkNew, self).__init__() self.seed = torch.manual_seed(seed) self.in_fc1 = nn.Linear(state_size, 64) self.hidden = nn.Linear(64, 64) self.value = nn.Linear(64, action_size) self.advantage = nn.Linear(64, action_size) self.dropout = nn.Dropout(0.4) def forward(self, input_0): primals_1 = self.in_fc1.weight primals_2 = self.in_fc1.bias primals_4 = self.hidden.weight primals_5 = self.hidden.bias primals_6 = self.value.weight primals_7 = self.value.bias primals_8 = self.advantage.weight primals_9 = self.advantage.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
jsztompka/DuelDQN
Duel_QNetwork
false
3,783
[ "MIT" ]
0
3b1234425b66034ef233ac988305dc13ffbf7ace
https://github.com/jsztompka/DuelDQN/tree/3b1234425b66034ef233ac988305dc13ffbf7ace
VAE
import torch from torch import nn from torch.nn import functional as F class Encoder(nn.Module): def __init__(self, latent_size): super().__init__() self.latent_size = latent_size self.conv1 = nn.Conv2d(3, 32, 4, stride=2) self.conv2 = nn.Conv2d(32, 64, 4, stride=2) self.conv3 = nn.Conv2d(64, 128, 4, stride=2) self.conv4 = nn.Conv2d(128, 256, 4, stride=2) self.flatten = nn.Flatten() self.fc_mu = nn.Linear(2 * 2 * 256, latent_size) self.fc_logsigma = nn.Linear(2 * 2 * 256, latent_size) def forward(self, x): x = F.relu(self.conv1(x)) x = F.relu(self.conv2(x)) x = F.relu(self.conv3(x)) x = F.relu(self.conv4(x)) x = self.flatten(x) mu = self.fc_mu(x) logsigma = self.fc_logsigma(x) return mu, logsigma class Decoder(nn.Module): def __init__(self, latent_size): super().__init__() self.latent_size = latent_size self.fc1 = nn.Linear(latent_size, 1024) self.deconv1 = nn.ConvTranspose2d(1024, 128, 5, stride=2) self.deconv2 = nn.ConvTranspose2d(128, 64, 5, stride=2) self.deconv3 = nn.ConvTranspose2d(64, 32, 6, stride=2) self.deconv4 = nn.ConvTranspose2d(32, 3, 6, stride=2) def forward(self, x): x = F.relu(self.fc1(x)) x = x.view(x.size(0), x.size(1), 1, 1) x = F.relu(self.deconv1(x)) x = F.relu(self.deconv2(x)) x = F.relu(self.deconv3(x)) x = torch.sigmoid(self.deconv4(x)) return x class VAE(nn.Module): def __init__(self, latent_size): super().__init__() self.encoder = Encoder(latent_size) self.decoder = Decoder(latent_size) def encode(self, x, reparameterize=False): mu, logsigma = self.encoder(x) if reparameterize: sigma = logsigma.exp() eps = torch.randn_like(sigma) z = eps.mul(sigma).add_(mu) return z return mu def forward(self, x): mu, logsigma = self.encoder(x) sigma = logsigma.exp() eps = torch.randn_like(sigma) z = eps.mul(sigma).add_(mu) recon_x = self.decoder(z) return recon_x, mu, logsigma def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {'latent_size': 4}]
import torch from torch import device from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn from torch.nn import functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 96 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 48 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 12 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = yindex // 32 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 32 * x2 + 512 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 64 * x2 + 1024 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 128 * x2 + 2048 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 25 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 128 * x2 + 3200 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 64 * x2 + 1600 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 36 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = yindex // 32 tmp0 = tl.load(in_ptr0 + (x2 + 36 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 32 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 96 xnumel = 36 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 36 * y3), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 108 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 123008 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 50176 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_12(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 256 y1 = yindex // 256 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 256 * x2 + 1024 * y1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask) tl.store(out_ptr1 + (y0 + 256 * x2 + 1024 * y1), tmp6, xmask) @triton.jit def triton_poi_fused_add_exp_mul_13(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask) tmp2 = tl_math.exp(tmp1) tmp3 = tmp0 * tmp2 tmp5 = tmp3 + tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_14(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 1024 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_15(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 12800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_16(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 43264 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_17(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 115200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_sigmoid_18(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 12 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y0 = yindex % 3 y1 = yindex // 3 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 3 * x2 + 12288 * y1), ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(out_ptr0 + (x2 + 4096 * y3), tmp3, ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23 ) = args args.clear() assert_size_stride(primals_1, (32, 3, 4, 4), (48, 16, 4, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (64, 32, 4, 4), (512, 16, 4, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (128, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (256, 128, 4, 4), (2048, 16, 4, 1)) assert_size_stride(primals_9, (256,), (1,)) assert_size_stride(primals_10, (4, 1024), (1024, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4, 1024), (1024, 1)) assert_size_stride(primals_13, (4,), (1,)) assert_size_stride(primals_14, (1024, 4), (4, 1)) assert_size_stride(primals_15, (1024,), (1,)) assert_size_stride(primals_16, (1024, 128, 5, 5), (3200, 25, 5, 1)) assert_size_stride(primals_17, (128,), (1,)) assert_size_stride(primals_18, (128, 64, 5, 5), (1600, 25, 5, 1)) assert_size_stride(primals_19, (64,), (1,)) assert_size_stride(primals_20, (64, 32, 6, 6), (1152, 36, 6, 1)) assert_size_stride(primals_21, (32,), (1,)) assert_size_stride(primals_22, (32, 3, 6, 6), (108, 36, 6, 1)) assert_size_stride(primals_23, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((32, 3, 4, 4), (48, 1, 12, 3), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(96, 16)](primals_1, buf0, 96, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch .float32) triton_poi_fused_1[grid(12, 4096)](primals_3, buf1, 12, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 32, 4, 4), (512, 1, 128, 32), torch. float32) triton_poi_fused_2[grid(2048, 16)](primals_4, buf2, 2048, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((128, 64, 4, 4), (1024, 1, 256, 64), torch.float32) triton_poi_fused_3[grid(8192, 16)](primals_6, buf3, 8192, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf4 = empty_strided_cuda((256, 128, 4, 4), (2048, 1, 512, 128), torch.float32) triton_poi_fused_4[grid(32768, 16)](primals_8, buf4, 32768, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_8 buf5 = empty_strided_cuda((1024, 128, 5, 5), (3200, 1, 640, 128), torch.float32) triton_poi_fused_5[grid(131072, 25)](primals_16, buf5, 131072, 25, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_16 buf6 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64), torch.float32) triton_poi_fused_6[grid(8192, 25)](primals_18, buf6, 8192, 25, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_18 buf7 = empty_strided_cuda((64, 32, 6, 6), (1152, 1, 192, 32), torch .float32) triton_poi_fused_7[grid(2048, 36)](primals_20, buf7, 2048, 36, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_20 buf8 = empty_strided_cuda((32, 3, 6, 6), (108, 1, 18, 3), torch.float32 ) triton_poi_fused_8[grid(96, 36)](primals_22, buf8, 96, 36, XBLOCK= 32, YBLOCK=32, num_warps=4, num_stages=1) del primals_22 buf9 = extern_kernels.convolution(buf1, buf0, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 32, 31, 31), (30752, 1, 992, 32)) buf10 = buf9 del buf9 triton_poi_fused_convolution_relu_9[grid(123008)](buf10, primals_2, 123008, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf11 = extern_kernels.convolution(buf10, buf2, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 64, 14, 14), (12544, 1, 896, 64)) buf12 = buf11 del buf11 triton_poi_fused_convolution_relu_10[grid(50176)](buf12, primals_5, 50176, XBLOCK=512, num_warps=4, num_stages=1) del primals_5 buf13 = extern_kernels.convolution(buf12, buf3, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf13, (4, 128, 6, 6), (4608, 1, 768, 128)) buf14 = buf13 del buf13 triton_poi_fused_convolution_relu_11[grid(18432)](buf14, primals_7, 18432, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf15 = extern_kernels.convolution(buf14, buf4, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 256, 2, 2), (1024, 1, 512, 256)) buf16 = empty_strided_cuda((4, 256, 2, 2), (1024, 4, 2, 1), torch. float32) buf33 = empty_strided_cuda((4, 256, 2, 2), (1024, 1, 512, 256), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_12[grid(1024, 4)]( buf15, primals_9, buf16, buf33, 1024, 4, XBLOCK=1, YBLOCK=256, num_warps=4, num_stages=1) del primals_9 buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_11, reinterpret_tensor(buf16, (4, 1024 ), (1024, 1), 0), reinterpret_tensor(primals_10, (1024, 4), (1, 1024), 0), alpha=1, beta=1, out=buf17) del primals_11 buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_13, reinterpret_tensor(buf16, (4, 1024 ), (1024, 1), 0), reinterpret_tensor(primals_12, (1024, 4), (1, 1024), 0), alpha=1, beta=1, out=buf18) del primals_13 buf19 = torch.ops.aten.randn.default([4, 4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf20 = buf19 del buf19 buf21 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_exp_mul_13[grid(16)](buf20, buf18, buf17, buf21, 16, XBLOCK=16, num_warps=1, num_stages=1) buf22 = reinterpret_tensor(buf15, (4, 1024), (1024, 1), 0) del buf15 extern_kernels.mm(buf21, reinterpret_tensor(primals_14, (4, 1024), (1, 4), 0), out=buf22) buf23 = buf22 del buf22 buf32 = empty_strided_cuda((4, 1024), (1024, 1), torch.bool) triton_poi_fused_relu_threshold_backward_14[grid(4096)](buf23, primals_15, buf32, 4096, XBLOCK=256, num_warps=4, num_stages=1) del primals_15 buf24 = extern_kernels.convolution(reinterpret_tensor(buf23, (4, 1024, 1, 1), (1024, 1, 0, 0), 0), buf5, stride=(2, 2), padding= (0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 128, 5, 5), (3200, 1, 640, 128)) buf25 = buf24 del buf24 triton_poi_fused_convolution_relu_15[grid(12800)](buf25, primals_17, 12800, XBLOCK=256, num_warps=4, num_stages=1) del primals_17 buf26 = extern_kernels.convolution(buf25, buf6, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 64, 13, 13), (10816, 1, 832, 64)) buf27 = buf26 del buf26 triton_poi_fused_convolution_relu_16[grid(43264)](buf27, primals_19, 43264, XBLOCK=256, num_warps=4, num_stages=1) del primals_19 buf28 = extern_kernels.convolution(buf27, buf7, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 32, 30, 30), (28800, 1, 960, 32)) buf29 = buf28 del buf28 triton_poi_fused_convolution_relu_17[grid(115200)](buf29, primals_21, 115200, XBLOCK=1024, num_warps=4, num_stages=1) del primals_21 buf30 = extern_kernels.convolution(buf29, buf8, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 3, 64, 64), (12288, 1, 192, 3)) buf31 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.float32) triton_poi_fused_convolution_sigmoid_18[grid(12, 4096)](buf30, primals_23, buf31, 12, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del buf30 del primals_23 return (buf31, buf17, buf18, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf8, buf10, buf12, buf14, reinterpret_tensor(buf16, (4, 1024 ), (1024, 1), 0), buf18, buf20, buf21, reinterpret_tensor(buf23, (4, 1024, 1, 1), (1024, 1, 1, 1), 0), buf25, buf27, buf29, buf31, buf32, primals_14, primals_12, primals_10, buf33) class Encoder(nn.Module): def __init__(self, latent_size): super().__init__() self.latent_size = latent_size self.conv1 = nn.Conv2d(3, 32, 4, stride=2) self.conv2 = nn.Conv2d(32, 64, 4, stride=2) self.conv3 = nn.Conv2d(64, 128, 4, stride=2) self.conv4 = nn.Conv2d(128, 256, 4, stride=2) self.flatten = nn.Flatten() self.fc_mu = nn.Linear(2 * 2 * 256, latent_size) self.fc_logsigma = nn.Linear(2 * 2 * 256, latent_size) def forward(self, x): x = F.relu(self.conv1(x)) x = F.relu(self.conv2(x)) x = F.relu(self.conv3(x)) x = F.relu(self.conv4(x)) x = self.flatten(x) mu = self.fc_mu(x) logsigma = self.fc_logsigma(x) return mu, logsigma class Decoder(nn.Module): def __init__(self, latent_size): super().__init__() self.latent_size = latent_size self.fc1 = nn.Linear(latent_size, 1024) self.deconv1 = nn.ConvTranspose2d(1024, 128, 5, stride=2) self.deconv2 = nn.ConvTranspose2d(128, 64, 5, stride=2) self.deconv3 = nn.ConvTranspose2d(64, 32, 6, stride=2) self.deconv4 = nn.ConvTranspose2d(32, 3, 6, stride=2) def forward(self, x): x = F.relu(self.fc1(x)) x = x.view(x.size(0), x.size(1), 1, 1) x = F.relu(self.deconv1(x)) x = F.relu(self.deconv2(x)) x = F.relu(self.deconv3(x)) x = torch.sigmoid(self.deconv4(x)) return x class VAENew(nn.Module): def __init__(self, latent_size): super().__init__() self.encoder = Encoder(latent_size) self.decoder = Decoder(latent_size) def encode(self, x, reparameterize=False): mu, logsigma = self.encoder(x) if reparameterize: sigma = logsigma.exp() eps = torch.randn_like(sigma) z = eps.mul(sigma).add_(mu) return z return mu def forward(self, input_0): primals_1 = self.encoder.conv1.weight primals_2 = self.encoder.conv1.bias primals_4 = self.encoder.conv2.weight primals_5 = self.encoder.conv2.bias primals_6 = self.encoder.conv3.weight primals_7 = self.encoder.conv3.bias primals_8 = self.encoder.conv4.weight primals_9 = self.encoder.conv4.bias primals_10 = self.encoder.fc_mu.weight primals_11 = self.encoder.fc_mu.bias primals_12 = self.encoder.fc_logsigma.weight primals_13 = self.encoder.fc_logsigma.bias primals_14 = self.decoder.fc1.weight primals_15 = self.decoder.fc1.bias primals_16 = self.decoder.deconv1.weight primals_17 = self.decoder.deconv1.bias primals_18 = self.decoder.deconv2.weight primals_19 = self.decoder.deconv2.bias primals_20 = self.decoder.deconv3.weight primals_21 = self.decoder.deconv3.bias primals_22 = self.decoder.deconv4.weight primals_23 = self.decoder.deconv4.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23]) return output[0], output[1], output[2]
jinyeom/ga-plastic-models
VAE
false
3,784
[ "MIT" ]
0
e38b245ae51c35a5f32679cc9f215463a3d58f1a
https://github.com/jinyeom/ga-plastic-models/tree/e38b245ae51c35a5f32679cc9f215463a3d58f1a
ContrastiveLoss
import torch from torchvision import transforms as transforms import torch.nn as nn import torch.nn.functional as F class ContrastiveLoss(nn.Module): """ Contrastive loss function. Based on: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf """ def __init__(self, margin): super(ContrastiveLoss, self).__init__() self.margin = margin def forward(self, output1, output2, label): euclidean_distance = F.pairwise_distance(output1, output2, keepdim=True ) None loss_contrastive = torch.mean((1 - label) * torch.pow( euclidean_distance, 2) + label * torch.pow(torch.clamp(self. margin - euclidean_distance, min=0.0), 2)) return loss_contrastive def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'margin': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torchvision import transforms as transforms import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_norm_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 - tmp1 tmp3 = 1e-06 tmp4 = tmp2 + tmp3 tmp5 = tmp4 * tmp4 tmp8 = tmp6 - tmp7 tmp9 = tmp8 + tmp3 tmp10 = tmp9 * tmp9 tmp11 = tmp5 + tmp10 tmp14 = tmp12 - tmp13 tmp15 = tmp14 + tmp3 tmp16 = tmp15 * tmp15 tmp17 = tmp11 + tmp16 tmp20 = tmp18 - tmp19 tmp21 = tmp20 + tmp3 tmp22 = tmp21 * tmp21 tmp23 = tmp17 + tmp22 tmp24 = libdevice.sqrt(tmp23) tl.store(out_ptr0 + x0, tmp24, xmask) @triton.jit def triton_per_fused_add_clamp_mean_mul_pow_rsub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex r1 = rindex // 4 tmp0 = tl.load(in_ptr0 + r2, None) tmp3 = tl.load(in_ptr1 + r1, None, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp3 * tmp3 tmp5 = tmp2 * tmp4 tmp6 = 4.0 tmp7 = tmp6 - tmp3 tmp8 = 0.0 tmp9 = triton_helpers.maximum(tmp7, tmp8) tmp10 = tmp9 * tmp9 tmp11 = tmp0 * tmp10 tmp12 = tmp5 + tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 256.0 tmp17 = tmp15 / tmp16 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_add_norm_sub_0[grid(64)](arg1_1, arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 triton_per_fused_add_clamp_mean_mul_pow_rsub_1[grid(1)](buf2, arg2_1, buf0, 1, 256, num_warps=2, num_stages=1) del arg2_1 del buf0 return buf2, class ContrastiveLossNew(nn.Module): """ Contrastive loss function. Based on: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf """ def __init__(self, margin): super(ContrastiveLossNew, self).__init__() self.margin = margin def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
justinluyao/phd_thesis
ContrastiveLoss
false
3,785
[ "MIT" ]
0
0a61f5deaac86dd34839ce24c2ad89e1411a8540
https://github.com/justinluyao/phd_thesis/tree/0a61f5deaac86dd34839ce24c2ad89e1411a8540
MultiHeadSelfAttention
from torch.nn import Module import torch from torch.nn import Dropout from torch.nn import Linear def masked_softmax(vector: 'torch.Tensor', mask: 'torch.Tensor', dim: 'int'=-1 ) ->torch.Tensor: """ ``torch.nn.functional.softmax(vector)`` does not work if some elements of ``vector`` should be masked. This performs a softmax on just the non-masked portions of ``vector``. Passing ``None`` in for the mask is also acceptable; you'll just get a regular softmax. ``vector`` can have an arbitrary number of dimensions; the only requirement is that ``mask`` is broadcastable to ``vector's`` shape. If ``mask`` has fewer dimensions than ``vector``, we will unsqueeze on dimension 1 until they match. If you need a different unsqueezing of your mask, do it yourself before passing the mask into this function. In the case that the input vector is completely masked, this function returns an array of ``0.0``. This behavior may cause ``NaN`` if this is used as the last layer of a model that uses categorical cross-entropy loss. """ if mask is None: result = torch.nn.functional.softmax(vector, dim=dim) else: mask = mask.float() while mask.dim() < vector.dim(): mask = mask.unsqueeze(1) result = torch.nn.functional.softmax(vector + (1 - mask) * - 10000000000.0, dim=dim) return result def weighted_sum(matrix: 'torch.Tensor', attention: 'torch.Tensor' ) ->torch.Tensor: """ Takes a matrix of vectors and a set of weights over the rows in the matrix (which we call an "attention" vector), and returns a weighted sum of the rows in the matrix. This is the typical computation performed after an attention mechanism. Note that while we call this a "matrix" of vectors and an attention "vector", we also handle higher-order tensors. We always sum over the second-to-last dimension of the "matrix", and we assume that all dimensions in the "matrix" prior to the last dimension are matched in the "vector". Non-matched dimensions in the "vector" must be `directly after the batch dimension`. For example, say I have a "matrix" with dimensions ``(batch_size, num_queries, num_words, embedding_dim)``. The attention "vector" then must have at least those dimensions, and could have more. Both: - ``(batch_size, num_queries, num_words)`` (distribution over words for each query) - ``(batch_size, num_documents, num_queries, num_words)`` (distribution over words in a query for each document) are valid input "vectors", producing tensors of shape: ``(batch_size, num_queries, embedding_dim)`` and ``(batch_size, num_documents, num_queries, embedding_dim)`` respectively. """ if attention.dim() == 2 and matrix.dim() == 3: return attention.unsqueeze(1).bmm(matrix).squeeze(1) if attention.dim() == 3 and matrix.dim() == 3: return attention.bmm(matrix) if matrix.dim() - 1 < attention.dim(): expanded_size = list(matrix.size()) for i in range(attention.dim() - matrix.dim() + 1): matrix = matrix.unsqueeze(1) expanded_size.insert(i + 1, attention.size(i + 1)) matrix = matrix.expand(*expanded_size) intermediate = attention.unsqueeze(-1).expand_as(matrix) * matrix return intermediate.sum(dim=-2) class MultiHeadSelfAttention(Module): """ This class implements the key-value scaled dot product attention mechanism detailed in the paper `Attention is all you Need <https://www.semanticscholar.org/paper/Attention-Is-All-You-Need-Vaswani-Shazeer/0737da0767d77606169cbf4187b83e1ab62f6077>`_ . The attention mechanism is a weighted sum of a projection V of the inputs, with respect to the scaled, normalised dot product of Q and K, which are also both linear projections of the input. This procedure is repeated for each attention head, using different parameters. Parameters ---------- num_heads : ``int``, required. The number of attention heads to use. input_dim : ``int``, required. The size of the last dimension of the input tensor. attention_dim ``int``, required. The total dimension of the query and key projections which comprise the dot product attention function. Must be divisible by ``num_heads``. values_dim : ``int``, required. The total dimension which the input is projected to for representing the values, which are combined using the attention. Must be divisible by ``num_heads``. output_projection_dim : ``int``, optional (default = None) The dimensionality of the final output projection. If this is not passed explicitly, the projection has size `input_size`. attention_dropout_prob : ``float``, optional (default = 0.1). The dropout probability applied to the normalised attention distributions. """ def __init__(self, input_dim: 'int', attention_dim: 'int', values_dim: 'int', num_heads: 'int'=1, output_projection_dim: 'int'=None, attention_dropout_prob: 'float'=0.1) ->None: super(MultiHeadSelfAttention, self).__init__() self._num_heads = num_heads self._input_dim = input_dim self._output_dim = output_projection_dim or input_dim self._attention_dim = attention_dim self._values_dim = values_dim if attention_dim % num_heads != 0: raise ValueError( f'Key size ({attention_dim}) must be divisible by the number of attention heads ({num_heads}).' ) if values_dim % num_heads != 0: raise ValueError( f'Value size ({values_dim}) must be divisible by the number of attention heads ({num_heads}).' ) self._combined_projection = Linear(input_dim, 2 * attention_dim + values_dim) self._scale = (input_dim // num_heads) ** 0.5 self._output_projection = Linear(values_dim, self._output_dim) self._attention_dropout = Dropout(attention_dropout_prob) def get_input_dim(self): return self._input_dim def get_output_dim(self): return self._output_dim def is_bidirectional(self): return False def forward(self, inputs: 'torch.Tensor', mask: 'torch.LongTensor'=None ) ->torch.FloatTensor: """ Parameters ---------- inputs : ``torch.FloatTensor``, required. A tensor of shape (batch_size, timesteps, input_dim) mask : ``torch.FloatTensor``, optional (default = None). A tensor of shape (batch_size, timesteps). Returns ------- A tensor of shape (batch_size, timesteps, output_projection_dim), where output_projection_dim = input_dim by default. """ num_heads = self._num_heads batch_size, timesteps, _ = inputs.size() if mask is None: mask = inputs.new_ones(batch_size, timesteps) combined_projection = self._combined_projection(inputs) queries, keys, *values = combined_projection.split(self. _attention_dim, -1) queries = queries.contiguous() keys = keys.contiguous() values = torch.cat(values, -1).contiguous() values_per_head = values.view(batch_size, timesteps, num_heads, int (self._values_dim / num_heads)) values_per_head = values_per_head.transpose(1, 2).contiguous() values_per_head = values_per_head.view(batch_size * num_heads, timesteps, int(self._values_dim / num_heads)) queries_per_head = queries.view(batch_size, timesteps, num_heads, int(self._attention_dim / num_heads)) queries_per_head = queries_per_head.transpose(1, 2).contiguous() queries_per_head = queries_per_head.view(batch_size * num_heads, timesteps, int(self._attention_dim / num_heads)) keys_per_head = keys.view(batch_size, timesteps, num_heads, int( self._attention_dim / num_heads)) keys_per_head = keys_per_head.transpose(1, 2).contiguous() keys_per_head = keys_per_head.view(batch_size * num_heads, timesteps, int(self._attention_dim / num_heads)) scaled_similarities = torch.bmm(queries_per_head / self._scale, keys_per_head.transpose(1, 2)) attention = masked_softmax(scaled_similarities, mask.repeat(1, num_heads).view(batch_size * num_heads, timesteps)) attention = self._attention_dropout(attention) outputs = weighted_sum(values_per_head, attention) outputs = outputs.view(batch_size, num_heads, timesteps, int(self. _values_dim / num_heads)) outputs = outputs.transpose(1, 2).contiguous() outputs = outputs.view(batch_size, timesteps, self._values_dim) outputs = self._output_projection(outputs) return outputs def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'attention_dim': 4, 'values_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch.nn import Module from torch.nn import Dropout from torch.nn import Linear assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (8 + x0 + 12 * x1), xmask) tmp1 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_div_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 12 * x1), xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.5 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (4 + x0 + 12 * x1), xmask) tmp1 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (12, 4), (4, 1)) assert_size_stride(primals_3, (12,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 12), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(64)](buf0, primals_3, buf1, 64, XBLOCK= 64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_div_1[grid(64)](buf0, primals_3, buf2, 64, XBLOCK= 64, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_2[grid(64)](buf0, primals_3, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf0 del primals_3 buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf2, reinterpret_tensor(buf3, (4, 4, 4), (16, 1, 4), 0), out=buf4) buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_3[grid(64)](buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = buf4 del buf4 triton_poi_fused__softmax_4[grid(64)](buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) buf7 = buf5 del buf5 extern_kernels.bmm(buf6, buf1, out=buf7) buf8 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf7, (16, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf8) del primals_5 return reinterpret_tensor(buf8, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), buf6, reinterpret_tensor(buf7, (16, 4), (4, 1), 0 ), primals_4, reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), buf3 def masked_softmax(vector: 'torch.Tensor', mask: 'torch.Tensor', dim: 'int'=-1 ) ->torch.Tensor: """ ``torch.nn.functional.softmax(vector)`` does not work if some elements of ``vector`` should be masked. This performs a softmax on just the non-masked portions of ``vector``. Passing ``None`` in for the mask is also acceptable; you'll just get a regular softmax. ``vector`` can have an arbitrary number of dimensions; the only requirement is that ``mask`` is broadcastable to ``vector's`` shape. If ``mask`` has fewer dimensions than ``vector``, we will unsqueeze on dimension 1 until they match. If you need a different unsqueezing of your mask, do it yourself before passing the mask into this function. In the case that the input vector is completely masked, this function returns an array of ``0.0``. This behavior may cause ``NaN`` if this is used as the last layer of a model that uses categorical cross-entropy loss. """ if mask is None: result = torch.nn.functional.softmax(vector, dim=dim) else: mask = mask.float() while mask.dim() < vector.dim(): mask = mask.unsqueeze(1) result = torch.nn.functional.softmax(vector + (1 - mask) * - 10000000000.0, dim=dim) return result def weighted_sum(matrix: 'torch.Tensor', attention: 'torch.Tensor' ) ->torch.Tensor: """ Takes a matrix of vectors and a set of weights over the rows in the matrix (which we call an "attention" vector), and returns a weighted sum of the rows in the matrix. This is the typical computation performed after an attention mechanism. Note that while we call this a "matrix" of vectors and an attention "vector", we also handle higher-order tensors. We always sum over the second-to-last dimension of the "matrix", and we assume that all dimensions in the "matrix" prior to the last dimension are matched in the "vector". Non-matched dimensions in the "vector" must be `directly after the batch dimension`. For example, say I have a "matrix" with dimensions ``(batch_size, num_queries, num_words, embedding_dim)``. The attention "vector" then must have at least those dimensions, and could have more. Both: - ``(batch_size, num_queries, num_words)`` (distribution over words for each query) - ``(batch_size, num_documents, num_queries, num_words)`` (distribution over words in a query for each document) are valid input "vectors", producing tensors of shape: ``(batch_size, num_queries, embedding_dim)`` and ``(batch_size, num_documents, num_queries, embedding_dim)`` respectively. """ if attention.dim() == 2 and matrix.dim() == 3: return attention.unsqueeze(1).bmm(matrix).squeeze(1) if attention.dim() == 3 and matrix.dim() == 3: return attention.bmm(matrix) if matrix.dim() - 1 < attention.dim(): expanded_size = list(matrix.size()) for i in range(attention.dim() - matrix.dim() + 1): matrix = matrix.unsqueeze(1) expanded_size.insert(i + 1, attention.size(i + 1)) matrix = matrix.expand(*expanded_size) intermediate = attention.unsqueeze(-1).expand_as(matrix) * matrix return intermediate.sum(dim=-2) class MultiHeadSelfAttentionNew(Module): """ This class implements the key-value scaled dot product attention mechanism detailed in the paper `Attention is all you Need <https://www.semanticscholar.org/paper/Attention-Is-All-You-Need-Vaswani-Shazeer/0737da0767d77606169cbf4187b83e1ab62f6077>`_ . The attention mechanism is a weighted sum of a projection V of the inputs, with respect to the scaled, normalised dot product of Q and K, which are also both linear projections of the input. This procedure is repeated for each attention head, using different parameters. Parameters ---------- num_heads : ``int``, required. The number of attention heads to use. input_dim : ``int``, required. The size of the last dimension of the input tensor. attention_dim ``int``, required. The total dimension of the query and key projections which comprise the dot product attention function. Must be divisible by ``num_heads``. values_dim : ``int``, required. The total dimension which the input is projected to for representing the values, which are combined using the attention. Must be divisible by ``num_heads``. output_projection_dim : ``int``, optional (default = None) The dimensionality of the final output projection. If this is not passed explicitly, the projection has size `input_size`. attention_dropout_prob : ``float``, optional (default = 0.1). The dropout probability applied to the normalised attention distributions. """ def __init__(self, input_dim: 'int', attention_dim: 'int', values_dim: 'int', num_heads: 'int'=1, output_projection_dim: 'int'=None, attention_dropout_prob: 'float'=0.1) ->None: super(MultiHeadSelfAttentionNew, self).__init__() self._num_heads = num_heads self._input_dim = input_dim self._output_dim = output_projection_dim or input_dim self._attention_dim = attention_dim self._values_dim = values_dim if attention_dim % num_heads != 0: raise ValueError( f'Key size ({attention_dim}) must be divisible by the number of attention heads ({num_heads}).' ) if values_dim % num_heads != 0: raise ValueError( f'Value size ({values_dim}) must be divisible by the number of attention heads ({num_heads}).' ) self._combined_projection = Linear(input_dim, 2 * attention_dim + values_dim) self._scale = (input_dim // num_heads) ** 0.5 self._output_projection = Linear(values_dim, self._output_dim) self._attention_dropout = Dropout(attention_dropout_prob) def get_input_dim(self): return self._input_dim def get_output_dim(self): return self._output_dim def is_bidirectional(self): return False def forward(self, input_0): primals_2 = self._combined_projection.weight primals_3 = self._combined_projection.bias primals_4 = self._output_projection.weight primals_5 = self._output_projection.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
jsonW0/StrokeOrderEmbeddings
MultiHeadSelfAttention
false
3,786
[ "Apache-2.0" ]
0
aa73b216a118de2efba1d299b96990ba9244fa3f
https://github.com/jsonW0/StrokeOrderEmbeddings/tree/aa73b216a118de2efba1d299b96990ba9244fa3f
CustomGruCell
import torch import numpy as np from torch import nn class CustomGruCell(nn.Module): """ A forward only GRU cell. Input should be: (sequence length x batch size x input_size). The output is the output of the final forward call. It's not clear if it would be possible to use the output from each cell in a Plan because of the assumptions of 2D tensors in backprop. """ def __init__(self, input_size, hidden_size, bias=True): super(CustomGruCell, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.fc_ir = nn.Linear(input_size, hidden_size, bias=bias) self.fc_hr = nn.Linear(hidden_size, hidden_size, bias=bias) self.fc_iz = nn.Linear(input_size, hidden_size, bias=bias) self.fc_hz = nn.Linear(hidden_size, hidden_size, bias=bias) self.fc_in = nn.Linear(input_size, hidden_size, bias=bias) self.fc_hn = nn.Linear(hidden_size, hidden_size, bias=bias) self.init_parameters() def init_parameters(self): std = 1.0 / np.sqrt(self.hidden_size) for w in self.parameters(): w.data.uniform_(-std, std) def forward(self, x, h): i_r = self.fc_ir(x) h_r = self.fc_hr(h) i_z = self.fc_iz(x) h_z = self.fc_hz(h) i_n = self.fc_in(x) h_n = self.fc_hn(h) resetgate = (i_r + h_r).sigmoid() inputgate = (i_z + h_z).sigmoid() newgate = (i_n + resetgate * h_n).tanh() hy = newgate + inputgate * (h - newgate) return hy def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import numpy as np from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_sigmoid_sub_tanh_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_out_ptr1 + x2, xmask) tmp9 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr4 + x2, xmask) tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr6 + x2, xmask) tmp17 = tl.load(in_ptr7 + x2, xmask) tmp21 = tl.load(in_ptr8 + x2, xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = tl.sigmoid(tmp6) tmp10 = tmp8 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = tl.sigmoid(tmp14) tmp18 = tmp7 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = libdevice.tanh(tmp19) tmp22 = tmp21 - tmp20 tmp23 = tmp15 * tmp22 tmp24 = tmp20 + tmp23 tl.store(in_out_ptr0 + x2, tmp7, xmask) tl.store(in_out_ptr1 + x2, tmp15, xmask) tl.store(out_ptr0 + x2, tmp24, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4, 4), (4, 1)) assert_size_stride(primals_14, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2) del primals_7 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf3) del primals_9 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_12, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_11 del primals_12 buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_14, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf5) del primals_13 del primals_14 buf6 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf7 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_sigmoid_sub_tanh_0[grid(256)](buf6, buf7, primals_2, buf1, primals_5, primals_8, buf3, primals_10, buf4, buf5, primals_6, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf1 del buf3 del primals_10 del primals_2 del primals_5 del primals_8 return buf8, primals_6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf4, buf5, buf6, buf7 class CustomGruCellNew(nn.Module): """ A forward only GRU cell. Input should be: (sequence length x batch size x input_size). The output is the output of the final forward call. It's not clear if it would be possible to use the output from each cell in a Plan because of the assumptions of 2D tensors in backprop. """ def __init__(self, input_size, hidden_size, bias=True): super(CustomGruCellNew, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.fc_ir = nn.Linear(input_size, hidden_size, bias=bias) self.fc_hr = nn.Linear(hidden_size, hidden_size, bias=bias) self.fc_iz = nn.Linear(input_size, hidden_size, bias=bias) self.fc_hz = nn.Linear(hidden_size, hidden_size, bias=bias) self.fc_in = nn.Linear(input_size, hidden_size, bias=bias) self.fc_hn = nn.Linear(hidden_size, hidden_size, bias=bias) self.init_parameters() def init_parameters(self): std = 1.0 / np.sqrt(self.hidden_size) for w in self.parameters(): w.data.uniform_(-std, std) def forward(self, input_0, input_1): primals_1 = self.fc_ir.weight primals_2 = self.fc_ir.bias primals_4 = self.fc_hr.weight primals_5 = self.fc_hr.bias primals_7 = self.fc_iz.weight primals_8 = self.fc_iz.bias primals_9 = self.fc_hz.weight primals_10 = self.fc_hz.bias primals_11 = self.fc_in.weight primals_12 = self.fc_in.bias primals_13 = self.fc_hn.weight primals_14 = self.fc_hn.bias primals_3 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14]) return output[0]
juharris/PySyft
CustomGruCell
false
3,787
[ "Apache-2.0" ]
0
dbb70f24cc55a7dca032fb06f1a8662cb15092a9
https://github.com/juharris/PySyft/tree/dbb70f24cc55a7dca032fb06f1a8662cb15092a9
EncoderImagePrecomp
import torch import numpy as np from collections import OrderedDict import torch.nn as nn import torch.nn.init def l2norm(X): """L2-normalize columns of X """ norm = torch.pow(X, 2).sum(dim=1, keepdim=True).sqrt() a = norm.expand_as(X) X = torch.div(X, a) return X class EncoderImagePrecomp(nn.Module): def __init__(self, img_dim, embed_size, use_abs=False, no_imgnorm=False): super(EncoderImagePrecomp, self).__init__() self.embed_size = embed_size self.no_imgnorm = no_imgnorm self.use_abs = use_abs self.fc = False if img_dim != embed_size or True: self.fc = nn.Linear(img_dim, embed_size) self.init_weights() def init_weights(self): """Xavier initialization for the fully connected layer """ if self.fc: r = np.sqrt(6.0) / np.sqrt(self.fc.in_features + self.fc. out_features) self.fc.weight.data.uniform_(-r, r) self.fc.bias.data.fill_(0) def forward(self, images): """Extract image feature vectors.""" features = images if self.fc: features = self.fc(images) if not self.no_imgnorm: features = l2norm(features) if self.use_abs: features = torch.abs(features) return features def load_state_dict(self, state_dict): """Copies parameters. overwritting the default one to accept state_dict from Full model """ own_state = self.state_dict() new_state = OrderedDict() for name, param in state_dict.items(): if name in own_state: new_state[name] = param super(EncoderImagePrecomp, self).load_state_dict(new_state) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'img_dim': 4, 'embed_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import numpy as np from collections import OrderedDict import torch.nn as nn import torch.nn.init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = tmp0 / tmp12 tl.store(out_ptr0 + x3, tmp13, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(256)](buf0, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf1, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf0 def l2norm(X): """L2-normalize columns of X """ norm = torch.pow(X, 2).sum(dim=1, keepdim=True).sqrt() a = norm.expand_as(X) X = torch.div(X, a) return X class EncoderImagePrecompNew(nn.Module): def __init__(self, img_dim, embed_size, use_abs=False, no_imgnorm=False): super(EncoderImagePrecompNew, self).__init__() self.embed_size = embed_size self.no_imgnorm = no_imgnorm self.use_abs = use_abs self.fc = False if img_dim != embed_size or True: self.fc = nn.Linear(img_dim, embed_size) self.init_weights() def init_weights(self): """Xavier initialization for the fully connected layer """ if self.fc: r = np.sqrt(6.0) / np.sqrt(self.fc.in_features + self.fc. out_features) self.fc.weight.data.uniform_(-r, r) self.fc.bias.data.fill_(0) def load_state_dict(self, state_dict): """Copies parameters. overwritting the default one to accept state_dict from Full model """ own_state = self.state_dict() new_state = OrderedDict() for name, param in state_dict.items(): if name in own_state: new_state[name] = param super(EncoderImagePrecompNew, self).load_state_dict(new_state) def forward(self, input_0): primals_2 = self.fc.weight primals_3 = self.fc.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
jwehrmann/seamretrieval
EncoderImagePrecomp
false
3,788
[ "Apache-2.0" ]
0
ff94dccc28d56ffbbb7813832c0adbab7b7c6107
https://github.com/jwehrmann/seamretrieval/tree/ff94dccc28d56ffbbb7813832c0adbab7b7c6107
ATANLoss
import torch import torch.nn as nn class ATANLoss(nn.Module): def __init__(self): super(ATANLoss, self).__init__() def forward(self, inputs, targets): loss = torch.mean(torch.atan(torch.abs(inputs - targets))) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_atan_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = libdevice.atan(tmp3) tmp5 = tl.broadcast_to(tmp4, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp8 = 256.0 tmp9 = tmp7 / tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp9, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_atan_mean_sub_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class ATANLossNew(nn.Module): def __init__(self): super(ATANLossNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
kamomehz/waveletCodingCNN
ATANLoss
false
3,789
[ "MIT" ]
0
50c7db9d986039ded38999b7e4f4265e2250fb90
https://github.com/kamomehz/waveletCodingCNN/tree/50c7db9d986039ded38999b7e4f4265e2250fb90
Hidden2DiscreteDeal
import torch import torch.nn as nn import torch.nn.functional as F import torch.nn.init class Hidden2DiscreteDeal(nn.Module): def __init__(self, input_size, z_size, is_lstm=False, has_bias=True): super(Hidden2DiscreteDeal, self).__init__() self.z_size = z_size latent_size = self.z_size if is_lstm: self.p_h = nn.Linear(input_size, latent_size, bias=has_bias) self.p_c = nn.Linear(input_size, latent_size, bias=has_bias) else: self.p_h = nn.Linear(input_size, latent_size, bias=has_bias) self.is_lstm = is_lstm def forward(self, inputs, mask=None): """ :param inputs: batch_size x input_size :return: """ if self.is_lstm: h, c = inputs if h.dim() == 3: h = h.squeeze(0) c = c.squeeze(0) logits = self.p_h(h) + self.p_c(c) else: logits = self.p_h(inputs) log_pz = F.log_softmax(logits, dim=-1) return logits, log_pz def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'z_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](buf0, buf1, 256, XBLOCK= 256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__log_softmax_1[grid(256)](buf1, buf2, 256, XBLOCK= 256, num_warps=4, num_stages=1) del buf1 return reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2 class Hidden2DiscreteDealNew(nn.Module): def __init__(self, input_size, z_size, is_lstm=False, has_bias=True): super(Hidden2DiscreteDealNew, self).__init__() self.z_size = z_size latent_size = self.z_size if is_lstm: self.p_h = nn.Linear(input_size, latent_size, bias=has_bias) self.p_c = nn.Linear(input_size, latent_size, bias=has_bias) else: self.p_h = nn.Linear(input_size, latent_size, bias=has_bias) self.is_lstm = is_lstm def forward(self, input_0): primals_1 = self.p_h.weight primals_2 = self.p_h.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0], output[1]
justinchiu/NeuralDialog
Hidden2DiscreteDeal
false
3,790
[ "Apache-2.0" ]
0
f272cc2e12ffdd44c94263ee373208a22c057129
https://github.com/justinchiu/NeuralDialog/tree/f272cc2e12ffdd44c94263ee373208a22c057129
ConvDenoiser
import torch import torch.nn.init import torch.nn as nn import torch.nn.functional as F class ConvDenoiser(nn.Module): def __init__(self): super(ConvDenoiser, self).__init__() self.conv1 = nn.Conv2d(3, 32, 3, padding=1) self.conv2 = nn.Conv2d(32, 16, 3, padding=1) self.conv3 = nn.Conv2d(16, 8, 3, padding=1) self.pool = nn.MaxPool2d(2, 2) self.t_conv1 = nn.ConvTranspose2d(8, 8, 2, stride=2) self.t_conv2 = nn.ConvTranspose2d(8, 16, 2, stride=2) self.t_conv3 = nn.ConvTranspose2d(16, 32, 3, stride=2) self.conv_out = nn.Conv2d(32, 3, 3, padding=1) def forward(self, x): x = F.relu(self.conv1(x)) x = self.pool(x) x = F.relu(self.conv2(x)) x = self.pool(x) x = F.relu(self.conv3(x)) x = self.pool(x) x = F.relu(self.t_conv1(x)) x = F.relu(self.t_conv2(x)) x = F.relu(self.t_conv3(x)) x = F.sigmoid(self.conv_out(x)) return x def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn.init import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = xindex // 32 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 16 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 8 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), None, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), None, eviction_policy ='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 540800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4225 % 32 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_sigmoid_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 50700 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4225 % 3 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x3, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15) = args args.clear() assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (16, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (8, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_7, (8,), (1,)) assert_size_stride(primals_8, (8, 8, 2, 2), (32, 4, 2, 1)) assert_size_stride(primals_9, (8,), (1,)) assert_size_stride(primals_10, (8, 16, 2, 2), (64, 4, 2, 1)) assert_size_stride(primals_11, (16,), (1,)) assert_size_stride(primals_12, (16, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_13, (32,), (1,)) assert_size_stride(primals_14, (3, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_15, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(524288)](buf1, primals_2, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.float32) buf3 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(131072)](buf1, buf2, buf3, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 16, 32, 32), (16384, 1024, 32, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_2[grid(65536)](buf5, primals_5, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 16, 16, 16), (4096, 256, 16, 1), torch.float32) buf7 = empty_strided_cuda((4, 16, 16, 16), (4096, 256, 16, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_3[grid(16384)](buf5, buf6, buf7, 16384, XBLOCK=256, num_warps=4, num_stages=1) buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 8, 16, 16), (2048, 256, 16, 1)) buf9 = buf8 del buf8 triton_poi_fused_convolution_relu_4[grid(8192)](buf9, primals_7, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf10 = empty_strided_cuda((4, 8, 8, 8), (512, 64, 8, 1), torch.float32 ) buf11 = empty_strided_cuda((4, 8, 8, 8), (512, 64, 8, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_5[grid(2048)](buf9, buf10, buf11, 2048, XBLOCK=256, num_warps=4, num_stages=1) buf12 = extern_kernels.convolution(buf10, primals_8, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 8, 16, 16), (2048, 256, 16, 1)) buf13 = buf12 del buf12 triton_poi_fused_convolution_relu_4[grid(8192)](buf13, primals_9, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 buf14 = extern_kernels.convolution(buf13, primals_10, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 16, 32, 32), (16384, 1024, 32, 1)) buf15 = buf14 del buf14 triton_poi_fused_convolution_relu_2[grid(65536)](buf15, primals_11, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_11 buf16 = extern_kernels.convolution(buf15, primals_12, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 32, 65, 65), (135200, 4225, 65, 1)) buf17 = buf16 del buf16 triton_poi_fused_convolution_relu_6[grid(540800)](buf17, primals_13, 540800, XBLOCK=1024, num_warps=4, num_stages=1) del primals_13 buf18 = extern_kernels.convolution(buf17, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 3, 65, 65), (12675, 4225, 65, 1)) buf19 = buf18 del buf18 triton_poi_fused_convolution_sigmoid_7[grid(50700)](buf19, primals_15, 50700, XBLOCK=512, num_warps=4, num_stages=1) del primals_15 return (buf19, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, buf1, buf2, buf3, buf5, buf6, buf7, buf9, buf10, buf11, buf13, buf15, buf17, buf19) class ConvDenoiserNew(nn.Module): def __init__(self): super(ConvDenoiserNew, self).__init__() self.conv1 = nn.Conv2d(3, 32, 3, padding=1) self.conv2 = nn.Conv2d(32, 16, 3, padding=1) self.conv3 = nn.Conv2d(16, 8, 3, padding=1) self.pool = nn.MaxPool2d(2, 2) self.t_conv1 = nn.ConvTranspose2d(8, 8, 2, stride=2) self.t_conv2 = nn.ConvTranspose2d(8, 16, 2, stride=2) self.t_conv3 = nn.ConvTranspose2d(16, 32, 3, stride=2) self.conv_out = nn.Conv2d(32, 3, 3, padding=1) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.t_conv1.weight primals_9 = self.t_conv1.bias primals_10 = self.t_conv2.weight primals_11 = self.t_conv2.bias primals_12 = self.t_conv3.weight primals_13 = self.t_conv3.bias primals_14 = self.conv_out.weight primals_15 = self.conv_out.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15]) return output[0]
joydeba/autocount
ConvDenoiser
false
3,791
[ "MIT" ]
0
52ddb47726fa34d5f54e2850dc6690b67c768728
https://github.com/joydeba/autocount/tree/52ddb47726fa34d5f54e2850dc6690b67c768728
SelfAttn
import torch from torch import nn from torch.nn import functional as F class SelfAttn(nn.Module): """ self-attention with learnable parameters """ def __init__(self, dhid): super().__init__() self.scorer = nn.Linear(dhid, 1) def forward(self, inp): scores = F.softmax(self.scorer(inp), dim=1) cont = scores.transpose(1, 2).bmm(inp).squeeze(1) return cont def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'dhid': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4), (4, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_1 del primals_2 buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(16)](buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) triton_poi_fused__softmax_1[grid(16)](buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 1, 4), (4, 4, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf3, (4, 1, 4), (4, 0, 1), 0 ), primals_3, out=buf4) del buf3 return reinterpret_tensor(buf4, (4, 4), (4, 1), 0), primals_3, buf1 class SelfAttnNew(nn.Module): """ self-attention with learnable parameters """ def __init__(self, dhid): super().__init__() self.scorer = nn.Linear(dhid, 1) def forward(self, input_0): primals_1 = self.scorer.weight primals_2 = self.scorer.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
jzhanson/alfred
SelfAttn
false
3,792
[ "MIT" ]
0
d5b540e7c9b53d3f70cc2907503935fecff00018
https://github.com/jzhanson/alfred/tree/d5b540e7c9b53d3f70cc2907503935fecff00018
FourierConv2d
import torch class FourierConv2d(torch.nn.Module): def __init__(self, in_channels, out_channels, size_x, size_y, bias=True, periodic=False): super(FourierConv2d, self).__init__() self.in_channels = in_channels self.out_channels = out_channels if not periodic: self.size_x = size_x self.size_y = size_y else: self.size_x = size_x // 2 self.size_y = size_y // 2 self.weights = torch.nn.Parameter(torch.view_as_real(1 / ( in_channels * out_channels) * torch.rand(in_channels, out_channels, self.size_x, self.size_y, dtype=torch.cfloat))) self.biases = torch.nn.Parameter(torch.view_as_real(1 / out_channels * torch.rand(out_channels, self.size_x, self. size_y, dtype=torch.cfloat))) self.bias = bias self.periodic = periodic def forward(self, x): if not self.periodic: x = torch.nn.functional.pad(x, [0, self.size_y, 0, self.size_x]) x_ft = torch.fft.rfft2(x) out_ft = torch.zeros_like(x_ft) out_ft[:, :, :self.size_x, :self.size_y] = torch.einsum( 'bixy,ioxy->boxy', x_ft[:, :, :self.size_x, :self.size_y], torch.view_as_complex(self.weights)) if self.bias: out_ft[:, :, :self.size_x, :self.size_y] += torch.view_as_complex( self.biases) out = torch.fft.irfft2(out_ft) if not self.periodic: out = out[..., :self.size_x, :self.size_y] return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'size_x': 4, 'size_y': 4} ]
import torch from torch import device import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 8 % 8 x0 = xindex % 8 x2 = xindex // 64 x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 4, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = x0 tmp4 = tmp3 < tmp1 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp5 & xmask, other=0.0) tl.store(out_ptr0 + x3, tmp6, xmask) @triton.jit def triton_poi_fused_add_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 % 4 x4 = xindex // 32 x5 = xindex % 128 x6 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 10 * x1 + 80 * x4), xmask) tmp1 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x6, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4, 2), (128, 32, 8, 2, 1)) assert_size_stride(primals_3, (4, 4, 4, 2), (32, 8, 2, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(1024)](primals_1, buf0, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = torch.ops.aten._fft_r2c.default(buf0, [2, 3], 0, True) del buf0 buf2 = buf1 del buf1 buf3 = torch.ops.aten.full.default([4, 4, 8, 5], 0, dtype=torch. complex64, layout=torch.strided, device=device(type='cuda', index=0), pin_memory=False) buf4 = buf3 del buf3 buf5 = torch.ops.aten.slice.Tensor(buf2, 2, 0, 4) buf6 = buf5 buf7 = torch.ops.aten.slice.Tensor(buf6, 3, 0, 4) buf8 = buf7 buf9 = torch.ops.aten.view_as_complex.default(primals_2) buf10 = buf9 buf11 = torch.ops.aten.unsqueeze.default(buf8, 4) buf12 = buf11 buf13 = torch.ops.aten.permute.default(buf12, [0, 4, 2, 3, 1]) buf14 = buf13 buf15 = torch.ops.aten.unsqueeze.default(buf10, 4) buf16 = buf15 buf17 = torch.ops.aten.permute.default(buf16, [4, 1, 2, 3, 0]) buf18 = buf17 buf19 = torch.ops.aten.permute.default(buf14, [2, 3, 0, 4, 1]) buf20 = buf19 buf21 = torch.ops.aten.clone.default(buf20, memory_format=torch. contiguous_format) del buf11 del buf12 del buf13 del buf14 del buf19 del buf2 del buf20 del buf5 del buf6 del buf7 del buf8 buf22 = buf21 del buf21 buf23 = torch.ops.aten.reshape.default(buf22, [16, 4, 4]) buf24 = buf23 buf25 = torch.ops.aten.permute.default(buf18, [2, 3, 4, 1, 0]) buf26 = buf25 buf27 = torch.ops.aten.reshape.default(buf26, [16, 4, 4]) buf28 = buf27 buf29 = torch.ops.aten.bmm.default(buf24, buf28) del buf10 del buf15 del buf16 del buf17 del buf18 del buf25 del buf26 del buf27 del buf28 del buf9 del primals_2 buf30 = buf29 del buf29 buf31 = torch.ops.aten.reshape.default(buf30, [4, 4, 4, 1, 4]) buf32 = buf31 buf33 = torch.ops.aten.permute.default(buf32, [2, 4, 0, 1, 3]) buf34 = buf33 buf35 = torch.ops.aten.reshape.default(buf34, [4, 4, 4, 4]) buf36 = buf35 buf37 = torch.ops.aten.slice.Tensor(buf4, 2, 0, 4) buf38 = buf37 buf39 = torch.ops.aten.slice.Tensor(buf38, 3, 0, 4) buf40 = buf39 buf41 = torch.ops.aten.copy.default(buf40, buf36) del buf30 del buf31 del buf32 del buf33 del buf34 del buf35 del buf36 buf42 = buf41 del buf41 buf43 = torch.ops.aten.slice.Tensor(buf4, 2, 0, 4) buf44 = buf43 buf45 = torch.ops.aten.slice_scatter.default(buf44, buf42, 3, 0, 4) del buf42 buf46 = buf45 del buf45 buf47 = torch.ops.aten.slice_scatter.default(buf4, buf46, 2, 0, 4) del buf46 buf48 = buf47 del buf47 buf49 = torch.ops.aten.view_as_complex.default(primals_3) buf50 = buf49 buf51 = torch.ops.aten.slice.Tensor(buf48, 2, 0, 4) buf52 = buf51 buf53 = torch.ops.aten.slice.Tensor(buf52, 3, 0, 4) buf54 = buf53 buf55 = torch.ops.aten.view.dtype(buf54, torch.float32) buf56 = buf55 buf57 = torch.ops.aten.view.dtype(buf50, torch.float32) buf58 = buf57 buf59 = empty_strided_cuda((4, 4, 4, 4, 2), (128, 32, 8, 2, 1), torch.float32) triton_poi_fused_add_1[grid(512)](buf56, buf58, buf59, 512, XBLOCK= 256, num_warps=4, num_stages=1) del buf49 del buf50 del buf51 del buf52 del buf53 del buf54 del buf55 del buf56 del buf57 del buf58 del primals_3 buf60 = torch.ops.aten.view.dtype(reinterpret_tensor(buf59, (4, 4, 4, 8), (128, 32, 8, 1), 0), torch.complex64) buf61 = buf60 buf62 = torch.ops.aten.slice.Tensor(buf48, 2, 0, 4) buf63 = buf62 buf64 = torch.ops.aten.slice_scatter.default(buf63, buf61, 3, 0, 4) del buf59 del buf60 del buf61 del buf62 del buf63 buf65 = buf64 del buf64 buf66 = torch.ops.aten.slice_scatter.default(buf48, buf65, 2, 0, 4) del buf48 del buf65 buf67 = buf66 del buf66 buf68 = torch.ops.aten.slice.Tensor(buf67, 2, 0, 4) buf69 = buf68 buf70 = torch.ops.aten.slice.Tensor(buf69, 3, 0, 4) buf71 = buf70 buf72 = torch.ops.aten.slice.Tensor(buf67, 2, 0, 4) buf73 = buf72 buf74 = torch.ops.aten.slice_scatter.default(buf73, buf71, 3, 0, 4) del buf68 del buf69 del buf70 del buf71 del buf72 del buf73 buf75 = buf74 del buf74 buf76 = torch.ops.aten.slice_scatter.default(buf67, buf75, 2, 0, 4) del buf67 del buf75 buf77 = buf76 del buf76 buf78 = torch.ops.aten._fft_c2r.default(buf77, [2, 3], 2, 8) del buf77 buf79 = buf78 del buf78 buf80 = torch.ops.aten.permute.default(buf24, [0, 2, 1]) buf81 = buf80 buf82 = torch.ops.aten._conj.default(buf81) buf83 = buf82 return reinterpret_tensor(buf79, (4, 4, 4, 4), (256, 64, 8, 1), 0 ), buf4, buf83 class FourierConv2dNew(torch.nn.Module): def __init__(self, in_channels, out_channels, size_x, size_y, bias=True, periodic=False): super(FourierConv2dNew, self).__init__() self.in_channels = in_channels self.out_channels = out_channels if not periodic: self.size_x = size_x self.size_y = size_y else: self.size_x = size_x // 2 self.size_y = size_y // 2 self.weights = torch.nn.Parameter(torch.view_as_real(1 / ( in_channels * out_channels) * torch.rand(in_channels, out_channels, self.size_x, self.size_y, dtype=torch.cfloat))) self.biases = torch.nn.Parameter(torch.view_as_real(1 / out_channels * torch.rand(out_channels, self.size_x, self. size_y, dtype=torch.cfloat))) self.bias = bias self.periodic = periodic def forward(self, input_0): primals_2 = self.weights primals_3 = self.biases primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
julian-parker/DAFX22_FNO
FourierConv2d
false
3,793
[ "MIT" ]
0
72f30144317a3f8ba8ea23ecf9a0333c81fc87db
https://github.com/julian-parker/DAFX22_FNO/tree/72f30144317a3f8ba8ea23ecf9a0333c81fc87db
RMSELoss
import torch import torch.nn as nn class RMSELoss(nn.Module): def __init__(self): super(RMSELoss, self).__init__() def forward(self, inputs, targets): tmp = (inputs - targets) ** 2 loss = torch.mean(tmp) return torch.sqrt(loss) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mean_pow_sqrt_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp9, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_pow_sqrt_sub_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class RMSELossNew(nn.Module): def __init__(self): super(RMSELossNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
kamomehz/waveletCodingCNN
RMSELoss
false
3,794
[ "MIT" ]
0
50c7db9d986039ded38999b7e4f4265e2250fb90
https://github.com/kamomehz/waveletCodingCNN/tree/50c7db9d986039ded38999b7e4f4265e2250fb90
Net_L2
import torch import torch.nn as nn import torch.nn.functional as F class Net_L2(nn.Module): def __init__(self, inputSize, kernel=64): super(Net_L2, self).__init__() self.inputSize = inputSize self.kernel = kernel self.fc1 = nn.Linear(self.inputSize, 256) self.fc2 = nn.Linear(256, 32) self.fc3 = nn.Linear(32, 2) self.drop1 = nn.Dropout(0.1) def forward(self, x): x = torch.flatten(x, 1) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'inputSize': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (256, 4), (4, 1)) assert_size_stride(primals_3, (256,), (1,)) assert_size_stride(primals_4, (32, 256), (256, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (2, 32), (32, 1)) assert_size_stride(primals_7, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 256), (256, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 256), (1, 4), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(1024)](buf1, primals_3, 1024, XBLOCK= 128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 32), (32, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (256, 32), (1, 256), 0), out=buf2) buf3 = buf2 del buf2 triton_poi_fused_relu_1[grid(128)](buf3, primals_5, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (32, 2), (1, 32), 0), alpha=1, beta=1, out=buf4) del primals_7 return buf4, primals_1, buf1, buf3, primals_6, primals_4 class Net_L2New(nn.Module): def __init__(self, inputSize, kernel=64): super(Net_L2New, self).__init__() self.inputSize = inputSize self.kernel = kernel self.fc1 = nn.Linear(self.inputSize, 256) self.fc2 = nn.Linear(256, 32) self.fc3 = nn.Linear(32, 2) self.drop1 = nn.Dropout(0.1) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
kamomehz/waveletCodingCNN
Net_L2
false
3,795
[ "MIT" ]
0
50c7db9d986039ded38999b7e4f4265e2250fb90
https://github.com/kamomehz/waveletCodingCNN/tree/50c7db9d986039ded38999b7e4f4265e2250fb90
ToContinuous
import torch import torch.nn as nn import torch.nn.parallel import torch.utils.data class ToContinuous(nn.Module): def __init__(self): super(ToContinuous, self).__init__() def forward(self, x): """ :param x: tensor with dimension opt(batch x _ x bins x H x W :return: """ assert len(x.shape) >= 3 and x.shape[-3] >= 2 *other_dims, C, H, W = x.shape x = x.reshape(-1, C, H, W) x = torch.max(x, dim=1).indices sil = x > 0 sil_float = sil.float() x = (x.float() - 1) / (C - 2) x = 2 * x - 1 x[~sil] = -1 x = torch.stack((x, sil_float), dim=0).permute(1, 0, 2, 3).contiguous() x = x.reshape(other_dims + [2, H, W]) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.parallel import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__to_copy_div_index_put_lift_fresh_max_mul_sub_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp17 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp32 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 > tmp1 tmp3 = tmp0 == tmp1 tmp4 = tmp0 != tmp0 tmp5 = tmp1 != tmp1 tmp6 = tmp4 > tmp5 tmp7 = tmp2 | tmp6 tmp8 = tmp4 & tmp5 tmp9 = tmp3 | tmp8 tmp10 = tl.full([1], 0, tl.int64) tmp11 = tl.full([1], 1, tl.int64) tmp12 = tmp10 < tmp11 tmp13 = tmp9 & tmp12 tmp14 = tmp7 | tmp13 tmp15 = tl.where(tmp14, tmp0, tmp1) tmp16 = tl.where(tmp14, tmp10, tmp11) tmp18 = tmp15 > tmp17 tmp19 = tmp15 == tmp17 tmp20 = tmp15 != tmp15 tmp21 = tmp17 != tmp17 tmp22 = tmp20 > tmp21 tmp23 = tmp18 | tmp22 tmp24 = tmp20 & tmp21 tmp25 = tmp19 | tmp24 tmp26 = tl.full([1], 2, tl.int64) tmp27 = tmp16 < tmp26 tmp28 = tmp25 & tmp27 tmp29 = tmp23 | tmp28 tmp30 = tl.where(tmp29, tmp15, tmp17) tmp31 = tl.where(tmp29, tmp16, tmp26) tmp33 = tmp30 > tmp32 tmp34 = tmp30 == tmp32 tmp35 = tmp30 != tmp30 tmp36 = tmp32 != tmp32 tmp37 = tmp35 > tmp36 tmp38 = tmp33 | tmp37 tmp39 = tmp35 & tmp36 tmp40 = tmp34 | tmp39 tmp41 = tl.full([1], 3, tl.int64) tmp42 = tmp31 < tmp41 tmp43 = tmp40 & tmp42 tmp44 = tmp38 | tmp43 tl.where(tmp44, tmp30, tmp32) tmp46 = tl.where(tmp44, tmp31, tmp41) tmp47 = tmp46 > tmp10 tmp48 = tmp47 == 0 tmp49 = tmp46.to(tl.float32) tmp50 = 1.0 tmp51 = tmp49 - tmp50 tmp52 = 0.5 tmp53 = tmp51 * tmp52 tmp54 = 2.0 tmp55 = tmp53 * tmp54 tmp56 = tmp55 - tmp50 tmp57 = -1.0 tmp58 = tl.where(tmp48, tmp57, tmp56) tl.store(out_ptr0 + x2, tmp46, xmask) tl.store(out_ptr1 + x2, tmp58, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 2 x2 = xindex // 32 x0 = xindex % 16 x3 = xindex tmp0 = x2 + 4 * x1 tmp1 = tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * (x2 + 4 * x1)), tmp4 & xmask, other=0.0 ) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x2 + 4 * x1)), tmp6 & xmask, other=0.0) tmp10 = tmp9 > tmp1 tmp11 = tmp10.to(tl.float32) tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp6, tmp11, tmp12) tmp14 = tl.where(tmp4, tmp5, tmp13) tl.store(out_ptr0 + x3, tmp14, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__to_copy_div_index_put_lift_fresh_max_mul_sub_0[grid (64)](arg0_1, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 buf2 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32) triton_poi_fused_clone_1[grid(128)](buf1, buf0, buf2, 128, XBLOCK= 128, num_warps=4, num_stages=1) del buf0 del buf1 return buf2, class ToContinuousNew(nn.Module): def __init__(self): super(ToContinuousNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
kampta/multiview-shapes
ToContinuous
false
3,796
[ "MIT" ]
0
a79eb4b492be8c2c279e2c69b13d5a19dff1621b
https://github.com/kampta/multiview-shapes/tree/a79eb4b492be8c2c279e2c69b13d5a19dff1621b
DGMNConv3DLayer
from _paritybench_helpers import _mock_config import torch import torch.nn as nn import torch.nn.init as init class DGMNConv3DLayer(nn.Module): def __init__(self, args): self.args = args super(DGMNConv3DLayer, self).__init__() self.conv1 = nn.Conv3d(in_channels=1, out_channels=32, kernel_size= (3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1)) self.pool1 = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=(3, 3, 3), padding=(1, 0, 0)) self.conv2 = nn.Conv3d(in_channels=32, out_channels=16, kernel_size =(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1)) self.pool2 = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=(3, 3, 3), padding=(1, 0, 0)) self.flatten = nn.Flatten() self.init_weight() def init_weight(self): init.xavier_uniform_(self.conv1.weight) init.constant_(self.conv1.bias, 0.0) init.xavier_uniform_(self.conv2.weight) init.constant_(self.conv2.bias, 0.0) def forward(self, cube): outputs = self.pool1(torch.relu(self.conv1(cube))) outputs = self.pool2(torch.relu(self.conv2(outputs))) outputs = self.flatten(outputs) return outputs def get_inputs(): return [torch.rand([4, 1, 64, 64, 64])] def get_init_inputs(): return [[], {'args': _mock_config()}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.init as init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 262144 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 620928 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 9702 % 16 x0 = xindex % 9702 x4 = xindex // 9702 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + 9728 * x4), tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (32, 1, 3, 3, 3), (27, 27, 9, 3, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 1, 64, 64, 64), (262144, 262144, 4096, 64, 1)) assert_size_stride(primals_4, (16, 32, 3, 3, 3), (864, 27, 9, 3, 1)) assert_size_stride(primals_5, (16,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 64, 64, 64), (8388608, 262144, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(33554432)](buf1, primals_2, 33554432, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = torch.ops.aten.max_pool3d_with_indices.default(buf1, [3, 3, 3], [3, 3, 3], [1, 0, 0]) buf3 = buf2[0] buf4 = buf2[1] del buf2 buf5 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 16, 22, 21, 21), (155232, 9702, 441, 21, 1)) buf6 = empty_strided_cuda((4, 16, 22, 21, 21), (155648, 9728, 441, 21, 1), torch.float32) triton_poi_fused_convolution_relu_1[grid(620928)](buf5, primals_5, buf6, 620928, XBLOCK=512, num_warps=8, num_stages=1) del buf5 del primals_5 buf7 = torch.ops.aten.max_pool3d_with_indices.default(buf6, [3, 3, 3], [3, 3, 3], [1, 0, 0]) buf8 = buf7[0] buf9 = buf7[1] del buf7 return reinterpret_tensor(buf8, (4, 6272), (6272, 1), 0 ), primals_1, primals_3, primals_4, buf1, buf3, buf4, buf6, buf9 class DGMNConv3DLayerNew(nn.Module): def __init__(self, args): self.args = args super(DGMNConv3DLayerNew, self).__init__() self.conv1 = nn.Conv3d(in_channels=1, out_channels=32, kernel_size= (3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1)) self.pool1 = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=(3, 3, 3), padding=(1, 0, 0)) self.conv2 = nn.Conv3d(in_channels=32, out_channels=16, kernel_size =(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1)) self.pool2 = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=(3, 3, 3), padding=(1, 0, 0)) self.flatten = nn.Flatten() self.init_weight() def init_weight(self): init.xavier_uniform_(self.conv1.weight) init.constant_(self.conv1.bias, 0.0) init.xavier_uniform_(self.conv2.weight) init.constant_(self.conv2.bias, 0.0) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Coldog2333/DGMN-pytorch
DGMNConv3DLayer
false
3,797
[ "Apache-2.0" ]
0
c34248afca516625c2ac2fc6d6f4ce8fe2988c99
https://github.com/Coldog2333/DGMN-pytorch/tree/c34248afca516625c2ac2fc6d6f4ce8fe2988c99
teacherNet
import torch import torch.nn as nn import torch.nn.functional as F class teacherNet(nn.Module): def __init__(self): super(teacherNet, self).__init__() self.fc1 = nn.Linear(28 * 28, 1200) self.fc2 = nn.Linear(1200, 1200) self.fc3 = nn.Linear(1200, 10) def forward(self, x): x = x.view(-1, 28 * 28) x = F.relu(self.fc1(x)) x = F.dropout(x, p=0.8, training=self.training) x = F.relu(self.fc2(x)) x = F.dropout(x, p=0.8, training=self.training) x = self.fc3(x) return x def get_inputs(): return [torch.rand([4, 784])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 4800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 1200 x1 = xindex // 1200 tmp0 = tl.load(in_out_ptr0 + (x0 + 1216 * x1), xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x0 + 1216 * x1), tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 784), (784, 1)) assert_size_stride(primals_2, (1200, 784), (784, 1)) assert_size_stride(primals_3, (1200,), (1,)) assert_size_stride(primals_4, (1200, 1200), (1200, 1)) assert_size_stride(primals_5, (1200,), (1,)) assert_size_stride(primals_6, (10, 1200), (1200, 1)) assert_size_stride(primals_7, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1200), (1216, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784, 1200), (1, 784), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(4800)](buf1, primals_3, 4800, XBLOCK= 128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 1200), (1216, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (1200, 1200), (1, 1200), 0), out=buf2) buf3 = buf2 del buf2 triton_poi_fused_relu_0[grid(4800)](buf3, primals_5, 4800, XBLOCK= 128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (1200, 10), (1, 1200), 0), alpha=1, beta=1, out=buf4) del primals_7 return buf4, primals_1, buf1, buf3, primals_6, primals_4 class teacherNetNew(nn.Module): def __init__(self): super(teacherNetNew, self).__init__() self.fc1 = nn.Linear(28 * 28, 1200) self.fc2 = nn.Linear(1200, 1200) self.fc3 = nn.Linear(1200, 10) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
kamiyakenta/knowledge-distillation-pytorch
teacherNet
false
3,798
[ "MIT" ]
0
749c6bb353961147718371b2b694046af0a6e3f1
https://github.com/kamiyakenta/knowledge-distillation-pytorch/tree/749c6bb353961147718371b2b694046af0a6e3f1
ToRGB
from torch.autograd import Function import math import torch import torch.nn as nn from torch.nn import functional as F import torch.nn.parallel import torch.utils.data def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])) return out def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale) class UpFirDn2dBackward(Function): @staticmethod def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size): up_x, up_y = up down_x, down_y = down g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel, down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) ctx.save_for_backward(kernel) pad_x0, pad_x1, pad_y0, pad_y1 = pad ctx.up_x = up_x ctx.up_y = up_y ctx.down_x = down_x ctx.down_y = down_y ctx.pad_x0 = pad_x0 ctx.pad_x1 = pad_x1 ctx.pad_y0 = pad_y0 ctx.pad_y1 = pad_y1 ctx.in_size = in_size ctx.out_size = out_size return grad_input @staticmethod def backward(ctx, gradgrad_input): kernel, = ctx.saved_tensors gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx. in_size[3], 1) gradgrad_out = upfirdn2d_op.upfirdn2d(gradgrad_input, kernel, ctx. up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1, ctx.pad_y0, ctx.pad_y1) gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]) return gradgrad_out, None, None, None, None, None, None, None, None class UpFirDn2d(Function): @staticmethod def forward(ctx, input, kernel, up, down, pad): up_x, up_y = up down_x, down_y = down pad_x0, pad_x1, pad_y0, pad_y1 = pad kernel_h, kernel_w = kernel.shape _batch, channel, in_h, in_w = input.shape ctx.in_size = input.shape input = input.reshape(-1, in_h, in_w, 1) ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 ctx.out_size = out_h, out_w ctx.up = up_x, up_y ctx.down = down_x, down_y ctx.pad = pad_x0, pad_x1, pad_y0, pad_y1 g_pad_x0 = kernel_w - pad_x0 - 1 g_pad_y0 = kernel_h - pad_y0 - 1 g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 ctx.g_pad = g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 out = upfirdn2d_op.upfirdn2d(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1) out = out.view(-1, channel, out_h, out_w) return out @staticmethod def backward(ctx, grad_output): kernel, grad_kernel = ctx.saved_tensors grad_input = UpFirDn2dBackward.apply(grad_output, kernel, grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size, ctx.out_size) return grad_input, None, None, None, None class Upsample(nn.Module): def __init__(self, kernel, factor=2): super().__init__() self.factor = factor kernel = make_kernel(kernel) * factor ** 2 self.register_buffer('kernel', kernel) p = kernel.shape[0] - factor pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 self.pad = pad0, pad1 def forward(self, input): out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad= self.pad) return out class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class FusedLeakyReLUFunctionBackward(Function): @staticmethod def forward(ctx, grad_output, out, negative_slope, scale): ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale empty = grad_output.new_empty(0) grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1, negative_slope, scale) dim = [0] if grad_input.ndim > 2: dim += list(range(2, grad_input.ndim)) grad_bias = grad_input.sum(dim).detach() return grad_input, grad_bias @staticmethod def backward(ctx, gradgrad_input, gradgrad_bias): out, = ctx.saved_tensors gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale) return gradgrad_out, None, None, None class FusedLeakyReLUFunction(Function): @staticmethod def forward(ctx, input, bias, negative_slope, scale): empty = input.new_empty(0) out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale) ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale return out @staticmethod def backward(ctx, grad_output): out, = ctx.saved_tensors grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply( grad_output, out, ctx.negative_slope, ctx.scale) return grad_input, grad_bias, None, None class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class ModulatedConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1]): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = 1 / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, input, style): batch, in_channel, height, width = input.shape style = self.modulation(style).view(batch, 1, in_channel, 1, 1) weight = self.scale * self.weight * style if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08) weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) weight = weight.view(batch * self.out_channel, in_channel, self. kernel_size, self.kernel_size) if self.upsample: input = input.view(1, batch * in_channel, height, width) weight = weight.view(batch, self.out_channel, in_channel, self. kernel_size, self.kernel_size) weight = weight.transpose(1, 2).reshape(batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size) out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) out = self.blur(out) elif self.downsample: input = self.blur(input) _, _, height, width = input.shape input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) else: input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=self.padding, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) return out class ToRGB(nn.Module): def __init__(self, in_channel, style_dim, output_channels=1, upsample= True, blur_kernel=[1, 3, 3, 1]): super().__init__() if upsample: self.upsample = Upsample(blur_kernel) self.conv = ModulatedConv2d(in_channel, output_channels, 1, style_dim, demodulate=False) self.bias = nn.Parameter(torch.zeros(1, output_channels, 1, 1)) def forward(self, input, style, skip=None): out = self.conv(input, style) out = out + self.bias if skip is not None: skip = self.upsample(skip) out = out + skip return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'style_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.autograd import Function import math import torch.nn as nn from torch.nn import functional as F import torch.nn.parallel import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (1, 1, 4, 1, 1), (4, 4, 1, 1, 1)) assert_size_stride(primals_6, (1, 1, 1, 1), (1, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_2, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mul_1[grid(4)](primals_3, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(buf1, primals_4, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf1 buf3 = reinterpret_tensor(buf0, (4, 1, 4, 1, 1), (4, 1, 1, 4, 4), 0) del buf0 triton_poi_fused_mul_2[grid(16)](primals_5, buf2, buf3, 16, XBLOCK= 16, num_warps=1, num_stages=1) buf4 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf3, (4, 4, 1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf4, (1, 4, 4, 4), (64, 16, 4, 1)) buf5 = reinterpret_tensor(buf4, (4, 1, 4, 4), (16, 16, 4, 1), 0) del buf4 triton_poi_fused_add_3[grid(64)](buf5, primals_6, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_6 return buf5, primals_4, primals_5, buf2, reinterpret_tensor(buf3, (4, 4, 1, 1), (4, 1, 1, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4, 4 ), (256, 16, 4, 1), 0) def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])) return out def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale) class UpFirDn2dBackward(Function): @staticmethod def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size): up_x, up_y = up down_x, down_y = down g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel, down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) ctx.save_for_backward(kernel) pad_x0, pad_x1, pad_y0, pad_y1 = pad ctx.up_x = up_x ctx.up_y = up_y ctx.down_x = down_x ctx.down_y = down_y ctx.pad_x0 = pad_x0 ctx.pad_x1 = pad_x1 ctx.pad_y0 = pad_y0 ctx.pad_y1 = pad_y1 ctx.in_size = in_size ctx.out_size = out_size return grad_input @staticmethod def backward(ctx, gradgrad_input): kernel, = ctx.saved_tensors gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx. in_size[3], 1) gradgrad_out = upfirdn2d_op.upfirdn2d(gradgrad_input, kernel, ctx. up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1, ctx.pad_y0, ctx.pad_y1) gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]) return gradgrad_out, None, None, None, None, None, None, None, None class UpFirDn2d(Function): @staticmethod def forward(ctx, input, kernel, up, down, pad): up_x, up_y = up down_x, down_y = down pad_x0, pad_x1, pad_y0, pad_y1 = pad kernel_h, kernel_w = kernel.shape _batch, channel, in_h, in_w = input.shape ctx.in_size = input.shape input = input.reshape(-1, in_h, in_w, 1) ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 ctx.out_size = out_h, out_w ctx.up = up_x, up_y ctx.down = down_x, down_y ctx.pad = pad_x0, pad_x1, pad_y0, pad_y1 g_pad_x0 = kernel_w - pad_x0 - 1 g_pad_y0 = kernel_h - pad_y0 - 1 g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 ctx.g_pad = g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 out = upfirdn2d_op.upfirdn2d(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1) out = out.view(-1, channel, out_h, out_w) return out @staticmethod def backward(ctx, grad_output): kernel, grad_kernel = ctx.saved_tensors grad_input = UpFirDn2dBackward.apply(grad_output, kernel, grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size, ctx.out_size) return grad_input, None, None, None, None class Upsample(nn.Module): def __init__(self, kernel, factor=2): super().__init__() self.factor = factor kernel = make_kernel(kernel) * factor ** 2 self.register_buffer('kernel', kernel) p = kernel.shape[0] - factor pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 self.pad = pad0, pad1 def forward(self, input): out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad= self.pad) return out class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class FusedLeakyReLUFunctionBackward(Function): @staticmethod def forward(ctx, grad_output, out, negative_slope, scale): ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale empty = grad_output.new_empty(0) grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1, negative_slope, scale) dim = [0] if grad_input.ndim > 2: dim += list(range(2, grad_input.ndim)) grad_bias = grad_input.sum(dim).detach() return grad_input, grad_bias @staticmethod def backward(ctx, gradgrad_input, gradgrad_bias): out, = ctx.saved_tensors gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale) return gradgrad_out, None, None, None class FusedLeakyReLUFunction(Function): @staticmethod def forward(ctx, input, bias, negative_slope, scale): empty = input.new_empty(0) out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale) ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale return out @staticmethod def backward(ctx, grad_output): out, = ctx.saved_tensors grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply( grad_output, out, ctx.negative_slope, ctx.scale) return grad_input, grad_bias, None, None class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class ModulatedConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1]): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = 1 / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, input, style): batch, in_channel, height, width = input.shape style = self.modulation(style).view(batch, 1, in_channel, 1, 1) weight = self.scale * self.weight * style if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08) weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) weight = weight.view(batch * self.out_channel, in_channel, self. kernel_size, self.kernel_size) if self.upsample: input = input.view(1, batch * in_channel, height, width) weight = weight.view(batch, self.out_channel, in_channel, self. kernel_size, self.kernel_size) weight = weight.transpose(1, 2).reshape(batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size) out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) out = self.blur(out) elif self.downsample: input = self.blur(input) _, _, height, width = input.shape input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) else: input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=self.padding, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) return out class ToRGBNew(nn.Module): def __init__(self, in_channel, style_dim, output_channels=1, upsample= True, blur_kernel=[1, 3, 3, 1]): super().__init__() if upsample: self.upsample = Upsample(blur_kernel) self.conv = ModulatedConv2d(in_channel, output_channels, 1, style_dim, demodulate=False) self.bias = nn.Parameter(torch.zeros(1, output_channels, 1, 1)) def forward(self, input_0, input_1): primals_6 = self.bias primals_5 = self.conv.weight primals_2 = self.conv.modulation.weight primals_3 = self.conv.modulation.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
kampta/multiview-shapes
ToRGB
false
3,799
[ "MIT" ]
0
a79eb4b492be8c2c279e2c69b13d5a19dff1621b
https://github.com/kampta/multiview-shapes/tree/a79eb4b492be8c2c279e2c69b13d5a19dff1621b
Actor
import torch import numpy as np import torch.nn.functional as F import torch.nn as nn def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class Actor(nn.Module): """Actor (Policy) Model.""" def __init__(self, input_size, output_size, seed, fc1_units=200, fc2_units=150): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed fc1_units (int): Number of nodes in first hidden layer fc2_units (int): Number of nodes in second hidden layer """ super(Actor, self).__init__() self.seed = torch.manual_seed(seed) self.fc1 = nn.Linear(input_size, fc1_units) self.fc2 = nn.Linear(fc1_units, fc2_units) self.fc3 = nn.Linear(fc2_units, output_size) self.reset_parameters() def reset_parameters(self): self.fc1.weight.data.uniform_(*hidden_init(self.fc1)) self.fc2.weight.data.uniform_(*hidden_init(self.fc2)) self.fc3.weight.data.uniform_(-0.003, 0.003) def forward(self, state): """Build an actor (policy) network that maps states -> actions.""" x = F.relu(self.fc1(state)) x = F.relu(self.fc2(x)) return F.tanh(self.fc3(x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'output_size': 4, 'seed': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 12800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 200 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 9600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 150 x2 = xindex % 2400 x3 = xindex // 2400 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr0 + (x2 + 2432 * x3), tmp6, xmask) @triton.jit def triton_poi_fused_tanh_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (200, 4), (4, 1)) assert_size_stride(primals_2, (200,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (150, 200), (200, 1)) assert_size_stride(primals_5, (150,), (1,)) assert_size_stride(primals_6, (4, 150), (150, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 200), (200, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 200), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 200), (3200, 800, 200, 1), 0) del buf0 buf7 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(12800)](buf1, primals_2, buf7, 12800, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 150), (150, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 200), (200, 1), 0), reinterpret_tensor(primals_4, (200, 150), (1, 200), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 150), (2400, 600, 150, 1), 0) del buf2 buf6 = empty_strided_cuda((4, 4, 4, 150), (2432, 600, 150, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(9600)](buf3, primals_5, buf6, 9600, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 150), (150, 1), 0), reinterpret_tensor(primals_6, (150, 4), (1, 150), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 triton_poi_fused_tanh_2[grid(256)](buf5, primals_7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 200), (200, 1), 0 ), reinterpret_tensor(buf3, (64, 150), (150, 1), 0 ), buf5, primals_6, buf6, primals_4, buf7 def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class ActorNew(nn.Module): """Actor (Policy) Model.""" def __init__(self, input_size, output_size, seed, fc1_units=200, fc2_units=150): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed fc1_units (int): Number of nodes in first hidden layer fc2_units (int): Number of nodes in second hidden layer """ super(ActorNew, self).__init__() self.seed = torch.manual_seed(seed) self.fc1 = nn.Linear(input_size, fc1_units) self.fc2 = nn.Linear(fc1_units, fc2_units) self.fc3 = nn.Linear(fc2_units, output_size) self.reset_parameters() def reset_parameters(self): self.fc1.weight.data.uniform_(*hidden_init(self.fc1)) self.fc2.weight.data.uniform_(*hidden_init(self.fc2)) self.fc3.weight.data.uniform_(-0.003, 0.003) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
kangjie-chen/deep-reinforcement-learning
Actor
false
3,801
[ "MIT" ]
0
0706f136834ecafc7391f483a6b3c84365a349eb
https://github.com/kangjie-chen/deep-reinforcement-learning/tree/0706f136834ecafc7391f483a6b3c84365a349eb
Feature_extraction
import torch from torchvision import transforms as transforms import torch.nn as nn class Feature_extraction(nn.Module): def __init__(self, k, p): super(Feature_extraction, self).__init__() self.conv_1 = nn.Conv2d(3, 64, kernel_size=5, padding=2) self.conv_2 = nn.Conv2d(64, 64, kernel_size=k, padding=p) self.conv_3 = nn.Conv2d(64, 128, kernel_size=k, padding=p) self.conv_4 = nn.Conv2d(128, 128, kernel_size=k, padding=p) self.conv_5 = nn.Conv2d(128, 256, kernel_size=k, padding=p) self.conv_6 = nn.Conv2d(256, 256, kernel_size=k, padding=p) self.conv_7 = nn.Conv2d(256, 128, 1) self.conv_8 = nn.Conv2d(128, 22, 1) self.pool = nn.MaxPool2d(3, stride=2, padding=1) def forward(self, x): out1 = self.conv_1(x) out2 = self.conv_2(out1) pool1 = self.pool(out2) out3 = self.conv_3(pool1) out4 = self.conv_4(out3) pool2 = self.pool(out4) out5 = self.conv_5(pool2) out6 = self.conv_6(out5) out7 = self.conv_7(out6) out = self.conv_8(out7) return out def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {'k': 4, 'p': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torchvision import transforms as transforms import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 192 xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 75 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 12 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 64 * x2 + 1024 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 64 * x2 + 1024 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 128 * x2 + 2048 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 128 * x2 + 2048 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 256 * x2 + 4096 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, None) @triton.jit def triton_poi_fused_convolution_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 1218816 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_9(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 313600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 2240 % 35 x1 = xindex // 64 % 35 x0 = xindex % 64 x3 = xindex // 78400 x6 = xindex tmp0 = -1 + 2 * x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 69, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = -1 + 2 * x1 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (-4480 + x0 + 128 * x1 + 8832 * x2 + 304704 * x3), tmp10 & xmask, other=float('-inf')) tmp12 = 2 * x1 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (-4416 + x0 + 128 * x1 + 8832 * x2 + 304704 * x3), tmp16 & xmask, other=float('-inf')) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 1 + 2 * x1 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp5 & tmp22 tmp24 = tl.load(in_ptr0 + (-4352 + x0 + 128 * x1 + 8832 * x2 + 304704 * x3), tmp23 & xmask, other=float('-inf')) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = 2 * x2 tmp27 = tmp26 >= tmp1 tmp28 = tmp26 < tmp3 tmp29 = tmp27 & tmp28 tmp30 = tmp29 & tmp9 tmp31 = tl.load(in_ptr0 + (-64 + x0 + 128 * x1 + 8832 * x2 + 304704 * x3), tmp30 & xmask, other=float('-inf')) tmp32 = triton_helpers.maximum(tmp31, tmp25) tmp33 = tmp29 & tmp15 tmp34 = tl.load(in_ptr0 + (x0 + 128 * x1 + 8832 * x2 + 304704 * x3), tmp33 & xmask, other=float('-inf')) tmp35 = triton_helpers.maximum(tmp34, tmp32) tmp36 = tmp29 & tmp22 tmp37 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 8832 * x2 + 304704 * x3 ), tmp36 & xmask, other=float('-inf')) tmp38 = triton_helpers.maximum(tmp37, tmp35) tmp39 = 1 + 2 * x2 tmp40 = tmp39 >= tmp1 tmp41 = tmp39 < tmp3 tmp42 = tmp40 & tmp41 tmp43 = tmp42 & tmp9 tmp44 = tl.load(in_ptr0 + (4352 + x0 + 128 * x1 + 8832 * x2 + 304704 * x3), tmp43 & xmask, other=float('-inf')) tmp45 = triton_helpers.maximum(tmp44, tmp38) tmp46 = tmp42 & tmp15 tmp47 = tl.load(in_ptr0 + (4416 + x0 + 128 * x1 + 8832 * x2 + 304704 * x3), tmp46 & xmask, other=float('-inf')) tmp48 = triton_helpers.maximum(tmp47, tmp45) tmp49 = tmp42 & tmp22 tmp50 = tl.load(in_ptr0 + (4480 + x0 + 128 * x1 + 8832 * x2 + 304704 * x3), tmp49 & xmask, other=float('-inf')) tmp51 = triton_helpers.maximum(tmp50, tmp48) tmp52 = tmp17 > tmp11 tmp53 = tl.full([1], 1, tl.int8) tmp54 = tl.full([1], 0, tl.int8) tmp55 = tl.where(tmp52, tmp53, tmp54) tmp56 = tmp24 > tmp18 tmp57 = tl.full([1], 2, tl.int8) tmp58 = tl.where(tmp56, tmp57, tmp55) tmp59 = tmp31 > tmp25 tmp60 = tl.full([1], 3, tl.int8) tmp61 = tl.where(tmp59, tmp60, tmp58) tmp62 = tmp34 > tmp32 tmp63 = tl.full([1], 4, tl.int8) tmp64 = tl.where(tmp62, tmp63, tmp61) tmp65 = tmp37 > tmp35 tmp66 = tl.full([1], 5, tl.int8) tmp67 = tl.where(tmp65, tmp66, tmp64) tmp68 = tmp44 > tmp38 tmp69 = tl.full([1], 6, tl.int8) tmp70 = tl.where(tmp68, tmp69, tmp67) tmp71 = tmp47 > tmp45 tmp72 = tl.full([1], 7, tl.int8) tmp73 = tl.where(tmp71, tmp72, tmp70) tmp74 = tmp50 > tmp48 tmp75 = tl.full([1], 8, tl.int8) tmp76 = tl.where(tmp74, tmp75, tmp73) tl.store(out_ptr0 + x6, tmp51, xmask) tl.store(out_ptr1 + x6, tmp76, xmask) @triton.jit def triton_poi_fused_convolution_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, None) @triton.jit def triton_poi_fused_convolution_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1036800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 270848 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 2944 % 23 x1 = xindex // 128 % 23 x0 = xindex % 128 x3 = xindex // 67712 x6 = xindex tmp0 = -1 + 2 * x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 45, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = -1 + 2 * x1 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (-5888 + x0 + 256 * x1 + 11520 * x2 + 259200 * x3), tmp10 & xmask, other=float('-inf')) tmp12 = 2 * x1 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (-5760 + x0 + 256 * x1 + 11520 * x2 + 259200 * x3), tmp16 & xmask, other=float('-inf')) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 1 + 2 * x1 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp5 & tmp22 tmp24 = tl.load(in_ptr0 + (-5632 + x0 + 256 * x1 + 11520 * x2 + 259200 * x3), tmp23 & xmask, other=float('-inf')) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = 2 * x2 tmp27 = tmp26 >= tmp1 tmp28 = tmp26 < tmp3 tmp29 = tmp27 & tmp28 tmp30 = tmp29 & tmp9 tmp31 = tl.load(in_ptr0 + (-128 + x0 + 256 * x1 + 11520 * x2 + 259200 * x3), tmp30 & xmask, other=float('-inf')) tmp32 = triton_helpers.maximum(tmp31, tmp25) tmp33 = tmp29 & tmp15 tmp34 = tl.load(in_ptr0 + (x0 + 256 * x1 + 11520 * x2 + 259200 * x3), tmp33 & xmask, other=float('-inf')) tmp35 = triton_helpers.maximum(tmp34, tmp32) tmp36 = tmp29 & tmp22 tmp37 = tl.load(in_ptr0 + (128 + x0 + 256 * x1 + 11520 * x2 + 259200 * x3), tmp36 & xmask, other=float('-inf')) tmp38 = triton_helpers.maximum(tmp37, tmp35) tmp39 = 1 + 2 * x2 tmp40 = tmp39 >= tmp1 tmp41 = tmp39 < tmp3 tmp42 = tmp40 & tmp41 tmp43 = tmp42 & tmp9 tmp44 = tl.load(in_ptr0 + (5632 + x0 + 256 * x1 + 11520 * x2 + 259200 * x3), tmp43 & xmask, other=float('-inf')) tmp45 = triton_helpers.maximum(tmp44, tmp38) tmp46 = tmp42 & tmp15 tmp47 = tl.load(in_ptr0 + (5760 + x0 + 256 * x1 + 11520 * x2 + 259200 * x3), tmp46 & xmask, other=float('-inf')) tmp48 = triton_helpers.maximum(tmp47, tmp45) tmp49 = tmp42 & tmp22 tmp50 = tl.load(in_ptr0 + (5888 + x0 + 256 * x1 + 11520 * x2 + 259200 * x3), tmp49 & xmask, other=float('-inf')) tmp51 = triton_helpers.maximum(tmp50, tmp48) tmp52 = tmp17 > tmp11 tmp53 = tl.full([1], 1, tl.int8) tmp54 = tl.full([1], 0, tl.int8) tmp55 = tl.where(tmp52, tmp53, tmp54) tmp56 = tmp24 > tmp18 tmp57 = tl.full([1], 2, tl.int8) tmp58 = tl.where(tmp56, tmp57, tmp55) tmp59 = tmp31 > tmp25 tmp60 = tl.full([1], 3, tl.int8) tmp61 = tl.where(tmp59, tmp60, tmp58) tmp62 = tmp34 > tmp32 tmp63 = tl.full([1], 4, tl.int8) tmp64 = tl.where(tmp62, tmp63, tmp61) tmp65 = tmp37 > tmp35 tmp66 = tl.full([1], 5, tl.int8) tmp67 = tl.where(tmp65, tmp66, tmp64) tmp68 = tmp44 > tmp38 tmp69 = tl.full([1], 6, tl.int8) tmp70 = tl.where(tmp68, tmp69, tmp67) tmp71 = tmp47 > tmp45 tmp72 = tl.full([1], 7, tl.int8) tmp73 = tl.where(tmp71, tmp72, tmp70) tmp74 = tmp50 > tmp48 tmp75 = tl.full([1], 8, tl.int8) tmp76 = tl.where(tmp74, tmp75, tmp73) tl.store(out_ptr0 + x6, tmp51, xmask) tl.store(out_ptr1 + x6, tmp76, xmask) @triton.jit def triton_poi_fused_convolution_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, None) @triton.jit def triton_poi_fused_convolution_14(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1115136 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_15(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 557568 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_16(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 88 xnumel = 1089 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 22 y1 = yindex // 22 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 22 * x2 + 23958 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 1089 * y3), tmp2, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17) = args args.clear() assert_size_stride(primals_1, (64, 3, 5, 5), (75, 25, 5, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (64, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (128, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (128, 128, 4, 4), (2048, 16, 4, 1)) assert_size_stride(primals_9, (128,), (1,)) assert_size_stride(primals_10, (256, 128, 4, 4), (2048, 16, 4, 1)) assert_size_stride(primals_11, (256,), (1,)) assert_size_stride(primals_12, (256, 256, 4, 4), (4096, 16, 4, 1)) assert_size_stride(primals_13, (256,), (1,)) assert_size_stride(primals_14, (128, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_15, (128,), (1,)) assert_size_stride(primals_16, (22, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_17, (22,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 3, 5, 5), (75, 1, 15, 3), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(192, 25)](primals_1, buf0, 192, 25, XBLOCK= 32, YBLOCK=32, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch .float32) triton_poi_fused_1[grid(12, 4096)](primals_3, buf1, 12, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 64, 4, 4), (1024, 1, 256, 64), torch .float32) triton_poi_fused_2[grid(4096, 16)](primals_4, buf2, 4096, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((128, 64, 4, 4), (1024, 1, 256, 64), torch.float32) triton_poi_fused_3[grid(8192, 16)](primals_6, buf3, 8192, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf4 = empty_strided_cuda((128, 128, 4, 4), (2048, 1, 512, 128), torch.float32) triton_poi_fused_4[grid(16384, 16)](primals_8, buf4, 16384, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_8 buf5 = empty_strided_cuda((256, 128, 4, 4), (2048, 1, 512, 128), torch.float32) triton_poi_fused_5[grid(32768, 16)](primals_10, buf5, 32768, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_10 buf6 = empty_strided_cuda((256, 256, 4, 4), (4096, 1, 1024, 256), torch.float32) triton_poi_fused_6[grid(65536, 16)](primals_12, buf6, 65536, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_12 buf7 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf8 = buf7 del buf7 triton_poi_fused_convolution_7[grid(1048576)](buf8, primals_2, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf9 = extern_kernels.convolution(buf8, buf2, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 64, 69, 69), (304704, 1, 4416, 64)) buf10 = buf9 del buf9 triton_poi_fused_convolution_8[grid(1218816)](buf10, primals_5, 1218816, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf11 = empty_strided_cuda((4, 64, 35, 35), (78400, 1, 2240, 64), torch.float32) buf12 = empty_strided_cuda((4, 64, 35, 35), (78400, 1, 2240, 64), torch.int8) triton_poi_fused_max_pool2d_with_indices_9[grid(313600)](buf10, buf11, buf12, 313600, XBLOCK=512, num_warps=8, num_stages=1) buf13 = extern_kernels.convolution(buf11, buf3, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf13, (4, 128, 40, 40), (204800, 1, 5120, 128)) buf14 = buf13 del buf13 triton_poi_fused_convolution_10[grid(819200)](buf14, primals_7, 819200, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf15 = extern_kernels.convolution(buf14, buf4, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 128, 45, 45), (259200, 1, 5760, 128)) buf16 = buf15 del buf15 triton_poi_fused_convolution_11[grid(1036800)](buf16, primals_9, 1036800, XBLOCK=1024, num_warps=4, num_stages=1) del primals_9 buf17 = empty_strided_cuda((4, 128, 23, 23), (67712, 1, 2944, 128), torch.float32) buf18 = empty_strided_cuda((4, 128, 23, 23), (67712, 1, 2944, 128), torch.int8) triton_poi_fused_max_pool2d_with_indices_12[grid(270848)](buf16, buf17, buf18, 270848, XBLOCK=512, num_warps=8, num_stages=1) buf19 = extern_kernels.convolution(buf17, buf5, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 256, 28, 28), (200704, 1, 7168, 256)) buf20 = buf19 del buf19 triton_poi_fused_convolution_13[grid(802816)](buf20, primals_11, 802816, XBLOCK=1024, num_warps=4, num_stages=1) del primals_11 buf21 = extern_kernels.convolution(buf20, buf6, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf21, (4, 256, 33, 33), (278784, 1, 8448, 256)) buf22 = buf21 del buf21 triton_poi_fused_convolution_14[grid(1115136)](buf22, primals_13, 1115136, XBLOCK=1024, num_warps=4, num_stages=1) del primals_13 buf23 = extern_kernels.convolution(buf22, primals_14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf23, (4, 128, 33, 33), (139392, 1, 4224, 128)) buf24 = buf23 del buf23 triton_poi_fused_convolution_15[grid(557568)](buf24, primals_15, 557568, XBLOCK=512, num_warps=8, num_stages=1) del primals_15 buf25 = extern_kernels.convolution(buf24, primals_16, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf25, (4, 22, 33, 33), (23958, 1, 726, 22)) buf26 = empty_strided_cuda((4, 22, 33, 33), (23958, 1089, 33, 1), torch.float32) triton_poi_fused_convolution_16[grid(88, 1089)](buf25, primals_17, buf26, 88, 1089, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del buf25 del primals_17 return (buf26, buf0, buf1, buf2, buf3, buf4, buf5, buf6, primals_14, primals_16, buf8, buf10, buf11, buf12, buf14, buf16, buf17, buf18, buf20, buf22, buf24) class Feature_extractionNew(nn.Module): def __init__(self, k, p): super(Feature_extractionNew, self).__init__() self.conv_1 = nn.Conv2d(3, 64, kernel_size=5, padding=2) self.conv_2 = nn.Conv2d(64, 64, kernel_size=k, padding=p) self.conv_3 = nn.Conv2d(64, 128, kernel_size=k, padding=p) self.conv_4 = nn.Conv2d(128, 128, kernel_size=k, padding=p) self.conv_5 = nn.Conv2d(128, 256, kernel_size=k, padding=p) self.conv_6 = nn.Conv2d(256, 256, kernel_size=k, padding=p) self.conv_7 = nn.Conv2d(256, 128, 1) self.conv_8 = nn.Conv2d(128, 22, 1) self.pool = nn.MaxPool2d(3, stride=2, padding=1) def forward(self, input_0): primals_1 = self.conv_1.weight primals_2 = self.conv_1.bias primals_4 = self.conv_2.weight primals_5 = self.conv_2.bias primals_6 = self.conv_3.weight primals_7 = self.conv_3.bias primals_8 = self.conv_4.weight primals_9 = self.conv_4.bias primals_10 = self.conv_5.weight primals_11 = self.conv_5.bias primals_12 = self.conv_6.weight primals_13 = self.conv_6.bias primals_14 = self.conv_7.weight primals_15 = self.conv_7.bias primals_16 = self.conv_8.weight primals_17 = self.conv_8.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return output[0]
justinluyao/phd_thesis
Feature_extraction
false
3,803
[ "MIT" ]
0
0a61f5deaac86dd34839ce24c2ad89e1411a8540
https://github.com/justinluyao/phd_thesis/tree/0a61f5deaac86dd34839ce24c2ad89e1411a8540
CmapPafHeadAttention
import torch import torch.utils.data import torch.nn import torch.optim class UpsampleCBR(torch.nn.Sequential): def __init__(self, input_channels, output_channels, count=1, num_flat=0): layers = [] for i in range(count): if i == 0: inch = input_channels else: inch = output_channels layers += [torch.nn.ConvTranspose2d(inch, output_channels, kernel_size=4, stride=2, padding=1), torch.nn.BatchNorm2d( output_channels), torch.nn.ReLU()] for i in range(num_flat): layers += [torch.nn.Conv2d(output_channels, output_channels, kernel_size=3, stride=1, padding=1), torch.nn. BatchNorm2d(output_channels), torch.nn.ReLU()] super(UpsampleCBR, self).__init__(*layers) class CmapPafHeadAttention(torch.nn.Module): def __init__(self, input_channels, cmap_channels, paf_channels, upsample_channels=256, num_upsample=0, num_flat=0): super(CmapPafHeadAttention, self).__init__() self.cmap_up = UpsampleCBR(input_channels, upsample_channels, num_upsample, num_flat) self.paf_up = UpsampleCBR(input_channels, upsample_channels, num_upsample, num_flat) self.cmap_att = torch.nn.Conv2d(upsample_channels, upsample_channels, kernel_size=3, stride=1, padding=1) self.paf_att = torch.nn.Conv2d(upsample_channels, upsample_channels, kernel_size=3, stride=1, padding=1) self.cmap_conv = torch.nn.Conv2d(upsample_channels, cmap_channels, kernel_size=1, stride=1, padding=0) self.paf_conv = torch.nn.Conv2d(upsample_channels, paf_channels, kernel_size=1, stride=1, padding=0) def forward(self, x): xc = self.cmap_up(x) ac = torch.sigmoid(self.cmap_att(xc)) xp = self.paf_up(x) ap = torch.tanh(self.paf_att(xp)) return self.cmap_conv(xc * ac), self.paf_conv(xp * ap) def get_inputs(): return [torch.rand([4, 256, 64, 64])] def get_init_inputs(): return [[], {'input_channels': 4, 'cmap_channels': 4, 'paf_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch.nn import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 256 * x2 + 1048576 * y1), tmp0, None) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_mul_sigmoid_tanh_2(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr1 + x2, None) tmp4 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + x2, None) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp7 = tl.sigmoid(tmp5) tmp8 = tmp6 * tmp7 tmp9 = libdevice.tanh(tmp2) tmp10 = tmp6 * tmp9 tl.store(in_out_ptr0 + x2, tmp2, None) tl.store(in_out_ptr1 + x2, tmp5, None) tl.store(out_ptr0 + x2, tmp8, None) tl.store(out_ptr1 + x2, tmp10, None) @triton.jit def triton_poi_fused_convolution_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16384 * y1), ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4096 * y3), tmp2, ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 256, 64, 64), (1048576, 4096, 64, 1)) assert_size_stride(primals_2, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_3, (256,), (1,)) assert_size_stride(primals_4, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_5, (256,), (1,)) assert_size_stride(primals_6, (4, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 256, 64, 64), (1048576, 1, 16384, 256 ), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(1024, 4096)](primals_1, buf0, 1024, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_1[grid(65536, 9)](primals_2, buf1, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_1[grid(65536, 9)](primals_4, buf2, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf3 = extern_kernels.convolution(buf0, buf1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 256, 64, 64), (1048576, 1, 16384, 256)) buf5 = extern_kernels.convolution(buf0, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 256, 64, 64), (1048576, 1, 16384, 256)) buf6 = buf5 del buf5 buf4 = buf3 del buf3 buf7 = empty_strided_cuda((4, 256, 64, 64), (1048576, 1, 16384, 256 ), torch.float32) buf10 = empty_strided_cuda((4, 256, 64, 64), (1048576, 1, 16384, 256), torch.float32) triton_poi_fused_convolution_mul_sigmoid_tanh_2[grid(4194304)](buf6, buf4, primals_5, primals_3, buf0, buf7, buf10, 4194304, XBLOCK= 512, num_warps=8, num_stages=1) del primals_3 del primals_5 buf8 = extern_kernels.convolution(buf7, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 4, 64, 64), (16384, 1, 256, 4)) buf9 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1), torch.float32) triton_poi_fused_convolution_3[grid(16, 4096)](buf8, primals_7, buf9, 16, 4096, XBLOCK=32, YBLOCK=16, num_warps=4, num_stages=1) del primals_7 buf11 = extern_kernels.convolution(buf10, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 4, 64, 64), (16384, 1, 256, 4)) buf12 = reinterpret_tensor(buf8, (4, 4, 64, 64), (16384, 4096, 64, 1), 0) del buf8 triton_poi_fused_convolution_3[grid(16, 4096)](buf11, primals_9, buf12, 16, 4096, XBLOCK=32, YBLOCK=16, num_warps=4, num_stages=1) del buf11 del primals_9 return (buf9, buf12, buf0, buf1, buf2, primals_6, primals_8, buf4, buf6, buf7, buf10) class UpsampleCBR(torch.nn.Sequential): def __init__(self, input_channels, output_channels, count=1, num_flat=0): layers = [] for i in range(count): if i == 0: inch = input_channels else: inch = output_channels layers += [torch.nn.ConvTranspose2d(inch, output_channels, kernel_size=4, stride=2, padding=1), torch.nn.BatchNorm2d( output_channels), torch.nn.ReLU()] for i in range(num_flat): layers += [torch.nn.Conv2d(output_channels, output_channels, kernel_size=3, stride=1, padding=1), torch.nn. BatchNorm2d(output_channels), torch.nn.ReLU()] super(UpsampleCBR, self).__init__(*layers) class CmapPafHeadAttentionNew(torch.nn.Module): def __init__(self, input_channels, cmap_channels, paf_channels, upsample_channels=256, num_upsample=0, num_flat=0): super(CmapPafHeadAttentionNew, self).__init__() self.cmap_up = UpsampleCBR(input_channels, upsample_channels, num_upsample, num_flat) self.paf_up = UpsampleCBR(input_channels, upsample_channels, num_upsample, num_flat) self.cmap_att = torch.nn.Conv2d(upsample_channels, upsample_channels, kernel_size=3, stride=1, padding=1) self.paf_att = torch.nn.Conv2d(upsample_channels, upsample_channels, kernel_size=3, stride=1, padding=1) self.cmap_conv = torch.nn.Conv2d(upsample_channels, cmap_channels, kernel_size=1, stride=1, padding=0) self.paf_conv = torch.nn.Conv2d(upsample_channels, paf_channels, kernel_size=1, stride=1, padding=0) def forward(self, input_0): primals_2 = self.cmap_att.weight primals_3 = self.cmap_att.bias primals_4 = self.paf_att.weight primals_5 = self.paf_att.bias primals_6 = self.cmap_conv.weight primals_7 = self.cmap_conv.bias primals_8 = self.paf_conv.weight primals_9 = self.paf_conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0], output[1]
intflow/trt_openpose
CmapPafHeadAttention
false
3,805
[ "MIT" ]
0
526b1b0d463f1c86a45ca4d4cd77a41732c7654b
https://github.com/intflow/trt_openpose/tree/526b1b0d463f1c86a45ca4d4cd77a41732c7654b
KLNormal
import torch import torch.nn as nn import torch.utils.data import torch.utils.data.distributed class KLNormal(nn.Module): def __init__(self): super(KLNormal, self).__init__() def forward(self, qm, qv, pm, pv): element_wise = 0.5 * (torch.log(pv) - torch.log(qv) + qv / pv + (qm - pm).pow(2) / pv - 1) kl = element_wise.sum(-1) return kl def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.utils.data import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_log_mul_pow_sub_sum_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + 4 * x0, xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp24 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp25 = tl.load(in_ptr3 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp33 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp35 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp40 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp41 = tl.load(in_ptr3 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp49 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp51 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp56 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp57 = tl.load(in_ptr3 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = tl_math.log(tmp0) tmp3 = tl_math.log(tmp2) tmp4 = tmp1 - tmp3 tmp5 = tmp2 / tmp0 tmp6 = tmp4 + tmp5 tmp9 = tmp7 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp10 / tmp0 tmp12 = tmp6 + tmp11 tmp13 = 1.0 tmp14 = tmp12 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp18 = tl_math.log(tmp17) tmp20 = tl_math.log(tmp19) tmp21 = tmp18 - tmp20 tmp22 = tmp19 / tmp17 tmp23 = tmp21 + tmp22 tmp26 = tmp24 - tmp25 tmp27 = tmp26 * tmp26 tmp28 = tmp27 / tmp17 tmp29 = tmp23 + tmp28 tmp30 = tmp29 - tmp13 tmp31 = tmp30 * tmp15 tmp32 = tmp16 + tmp31 tmp34 = tl_math.log(tmp33) tmp36 = tl_math.log(tmp35) tmp37 = tmp34 - tmp36 tmp38 = tmp35 / tmp33 tmp39 = tmp37 + tmp38 tmp42 = tmp40 - tmp41 tmp43 = tmp42 * tmp42 tmp44 = tmp43 / tmp33 tmp45 = tmp39 + tmp44 tmp46 = tmp45 - tmp13 tmp47 = tmp46 * tmp15 tmp48 = tmp32 + tmp47 tmp50 = tl_math.log(tmp49) tmp52 = tl_math.log(tmp51) tmp53 = tmp50 - tmp52 tmp54 = tmp51 / tmp49 tmp55 = tmp53 + tmp54 tmp58 = tmp56 - tmp57 tmp59 = tmp58 * tmp58 tmp60 = tmp59 / tmp49 tmp61 = tmp55 + tmp60 tmp62 = tmp61 - tmp13 tmp63 = tmp62 * tmp15 tmp64 = tmp48 + tmp63 tl.store(out_ptr0 + x0, tmp64, xmask) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_log_mul_pow_sub_sum_0[grid(64)](arg0_1, arg1_1, arg2_1, arg3_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 del arg2_1 del arg3_1 return buf0, class KLNormalNew(nn.Module): def __init__(self): super(KLNormalNew, self).__init__() def forward(self, input_0, input_1, input_2, input_3): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0]
kayburns/craftassist
KLNormal
false
3,806
[ "MIT" ]
0
07909493d320afc2c9ff428d0891bc3acd4dc68f
https://github.com/kayburns/craftassist/tree/07909493d320afc2c9ff428d0891bc3acd4dc68f
LabelSmoothingBCE
import torch import torch.nn as nn import torch.utils.data import torch.utils.data.distributed class LabelSmoothingBCE(nn.Module): def __init__(self, smoothing=0.0): super(LabelSmoothingBCE, self).__init__() self.criterion = nn.BCEWithLogitsLoss(reduction='none') self.confidence = 1.0 - smoothing self.smoothing = smoothing def forward(self, x, target): smooth_target = target.clone().masked_fill(target == 1, self.confidence ) smooth_target = smooth_target.masked_fill(target == 0, self.smoothing) return self.criterion(x, smooth_target) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.utils.data import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_binary_cross_entropy_with_logits_eq_masked_fill_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp8 = tl.load(in_ptr1 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tmp3 = 1.0 tmp4 = tmp0 == tmp3 tmp5 = tl.where(tmp4, tmp3, tmp0) tmp6 = tl.where(tmp2, tmp1, tmp5) tmp7 = tmp3 - tmp6 tmp9 = tmp7 * tmp8 tmp10 = triton_helpers.minimum(tmp1, tmp8) tmp11 = tl_math.abs(tmp8) tmp12 = -tmp11 tmp13 = tl_math.exp(tmp12) tmp14 = libdevice.log1p(tmp13) tmp15 = tmp10 - tmp14 tmp16 = tmp9 - tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_binary_cross_entropy_with_logits_eq_masked_fill_0[grid (256)](arg0_1, arg1_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class LabelSmoothingBCENew(nn.Module): def __init__(self, smoothing=0.0): super(LabelSmoothingBCENew, self).__init__() self.criterion = nn.BCEWithLogitsLoss(reduction='none') self.confidence = 1.0 - smoothing self.smoothing = smoothing def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
kayburns/craftassist
LabelSmoothingBCE
false
3,810
[ "MIT" ]
0
07909493d320afc2c9ff428d0891bc3acd4dc68f
https://github.com/kayburns/craftassist/tree/07909493d320afc2c9ff428d0891bc3acd4dc68f
HighwayLayer
import torch import torch.nn as nn import torch.utils.data import torch.utils.data.distributed def my_xavier_init(m, gain=1): for p in m.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p, gain) else: nn.init.constant_(p, 0) class HighwayLayer(torch.nn.Module): def __init__(self, dim): super(HighwayLayer, self).__init__() self.gate_proj = nn.Linear(dim, dim, bias=True) self.nlin_proj = nn.Linear(dim, dim, bias=True) my_xavier_init(self.nlin_proj) my_xavier_init(self.gate_proj) nn.init.constant_(self.gate_proj.bias, -1) def forward(self, x): gate = torch.sigmoid(self.gate_proj(x)) nlin = torch.tanh(self.nlin_proj(x)) res = gate * nlin + (1 - gate) * x return res def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.data import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_tanh_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp7 = tl.load(in_ptr2 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tmp3 = libdevice.tanh(tmp2) tmp4 = tmp1 * tmp3 tmp5 = 1.0 tmp6 = tmp5 - tmp1 tmp8 = tmp6 * tmp7 tmp9 = tmp4 + tmp8 tl.store(out_ptr0 + x0, tmp9, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_rsub_sigmoid_tanh_0[grid(256)](buf0, buf1, primals_3, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf2, primals_3, buf0, buf1 def my_xavier_init(m, gain=1): for p in m.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p, gain) else: nn.init.constant_(p, 0) class HighwayLayerNew(torch.nn.Module): def __init__(self, dim): super(HighwayLayerNew, self).__init__() self.gate_proj = nn.Linear(dim, dim, bias=True) self.nlin_proj = nn.Linear(dim, dim, bias=True) my_xavier_init(self.nlin_proj) my_xavier_init(self.gate_proj) nn.init.constant_(self.gate_proj.bias, -1) def forward(self, input_0): primals_1 = self.gate_proj.weight primals_2 = self.gate_proj.bias primals_4 = self.nlin_proj.weight primals_5 = self.nlin_proj.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
kayburns/craftassist
HighwayLayer
false
3,811
[ "MIT" ]
0
07909493d320afc2c9ff428d0891bc3acd4dc68f
https://github.com/kayburns/craftassist/tree/07909493d320afc2c9ff428d0891bc3acd4dc68f
HighwayNetwork
import torch import torch.nn as nn import torch.utils.data import torch.utils.data.distributed class HighwayNetwork(nn.Module): def __init__(self, in_dim, out_dim): super(HighwayNetwork, self).__init__() self.gate_proj = nn.Linear(in_dim, out_dim) self.lin_proj = nn.Linear(in_dim, out_dim) self.nonlin_proj = nn.Linear(in_dim, out_dim) for p in self.parameters(): if p.dim() > 1: torch.nn.init.xavier_normal_(p) else: torch.nn.init.constant_(p, 0) def forward(self, x): gate = torch.sigmoid(self.gate_proj(x) - 2) lin = self.lin_proj(x) nonlin = torch.relu(self.nonlin_proj(x)) res = gate * nonlin + (1 - gate) * lin return res def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4, 'out_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.data import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_relu_rsub_sigmoid_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp4 = tl.load(in_ptr1 + x0, xmask) tmp10 = tl.load(in_ptr2 + x0, xmask) tmp1 = 2.0 tmp2 = tmp0 - tmp1 tmp3 = tl.sigmoid(tmp2) tmp5 = tl.full([1], 0, tl.int32) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp3 * tmp6 tmp8 = 1.0 tmp9 = tmp8 - tmp3 tmp11 = tmp9 * tmp10 tmp12 = tmp7 + tmp11 tl.store(out_ptr0 + x0, tmp12, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf2) del primals_6 del primals_7 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_relu_rsub_sigmoid_sub_0[grid(256)](buf0, buf2, buf1, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, buf1, buf2 class HighwayNetworkNew(nn.Module): def __init__(self, in_dim, out_dim): super(HighwayNetworkNew, self).__init__() self.gate_proj = nn.Linear(in_dim, out_dim) self.lin_proj = nn.Linear(in_dim, out_dim) self.nonlin_proj = nn.Linear(in_dim, out_dim) for p in self.parameters(): if p.dim() > 1: torch.nn.init.xavier_normal_(p) else: torch.nn.init.constant_(p, 0) def forward(self, input_0): primals_1 = self.gate_proj.weight primals_2 = self.gate_proj.bias primals_4 = self.lin_proj.weight primals_5 = self.lin_proj.bias primals_6 = self.nonlin_proj.weight primals_7 = self.nonlin_proj.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
kayburns/craftassist
HighwayNetwork
false
3,813
[ "MIT" ]
0
07909493d320afc2c9ff428d0891bc3acd4dc68f
https://github.com/kayburns/craftassist/tree/07909493d320afc2c9ff428d0891bc3acd4dc68f
SoftmaxRegression
import torch import torch.nn.functional as F class SoftmaxRegression(torch.nn.Module): def __init__(self, num_features, num_classes): super(SoftmaxRegression, self).__init__() self.linear = torch.nn.Linear(num_features, num_classes) def forward(self, x): logits = self.linear(x) probas = F.softmax(logits, dim=1) return logits, probas def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_features': 4, 'num_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf1 return reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2 class SoftmaxRegressionNew(torch.nn.Module): def __init__(self, num_features, num_classes): super(SoftmaxRegressionNew, self).__init__() self.linear = torch.nn.Linear(num_features, num_classes) def forward(self, input_0): primals_1 = self.linear.weight primals_2 = self.linear.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0], output[1]
kbrezinski/stat-453-deep-learning
SoftmaxRegression
false
3,817
[ "BSD-3-Clause" ]
0
b10240b5c3a970231dcea9221d3d179d26fc197d
https://github.com/kbrezinski/stat-453-deep-learning/tree/b10240b5c3a970231dcea9221d3d179d26fc197d
CustomizedNet
import torch import torch.nn as nn import torch.utils.data.distributed class CustomizedNet(nn.Module): def __init__(self, dropout, input_size, input_feature_num, hidden_dim, output_size): """ Simply use linear layers for multi-variate single-step forecasting. """ super().__init__() self.fc1 = nn.Linear(input_size * input_feature_num, hidden_dim) self.dropout = nn.Dropout(dropout) self.relu1 = nn.ReLU() self.fc2 = nn.Linear(hidden_dim, output_size) def forward(self, x): x = x.view(-1, x.shape[1] * x.shape[2]) x = self.fc1(x) x = self.dropout(x) x = self.relu1(x) x = self.fc2(x) x = torch.unsqueeze(x, 1) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dropout': 0.5, 'input_size': 4, 'input_feature_num': 4, 'hidden_dim': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 16), (16, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 16), (16, 1), 0), reinterpret_tensor(primals_2, (16, 4), (1, 16), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(64)](buf1, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (16, 1, 4), (4, 4, 1), 0 ), reinterpret_tensor(primals_1, (16, 16), (16, 1), 0), buf1, primals_4 class CustomizedNetNew(nn.Module): def __init__(self, dropout, input_size, input_feature_num, hidden_dim, output_size): """ Simply use linear layers for multi-variate single-step forecasting. """ super().__init__() self.fc1 = nn.Linear(input_size * input_feature_num, hidden_dim) self.dropout = nn.Dropout(dropout) self.relu1 = nn.ReLU() self.fc2 = nn.Linear(hidden_dim, output_size) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
jason-dai/BigDL
CustomizedNet
false
3,818
[ "Apache-2.0" ]
0
81ee60a73707d91c58d9bcd5b17c8e5731741a85
https://github.com/jason-dai/BigDL/tree/81ee60a73707d91c58d9bcd5b17c8e5731741a85
DQN
import torch import torch.nn as nn class DQN(nn.Module): def __init__(self, obs_size: 'int', num_actions: 'int', hidden_size: 'int'=20): super(DQN, self).__init__() self.l1 = nn.Linear(obs_size, hidden_size) self.n1 = nn.LayerNorm(hidden_size, elementwise_affine=True) self.l3 = nn.Linear(hidden_size, num_actions) self.activ = torch.nn.LeakyReLU() def forward(self, x): hidden = self.activ(self.n1(self.l1(x))) output = self.l3(hidden) return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'obs_size': 4, 'num_actions': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_leaky_relu_native_layer_norm_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 rnumel = 20 RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 20 * x0), rmask & xmask, other=0.0) tmp24 = tl.load(in_ptr1 + r1, rmask, eviction_policy='evict_last', other=0.0) tmp26 = tl.load(in_ptr2 + r1, rmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(rmask & xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(rmask & xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 20, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(rmask & xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 20.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tmp22 = tmp0 - tmp10 tmp23 = tmp22 * tmp21 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp28 = 0.0 tmp29 = tmp27 > tmp28 tmp30 = 0.01 tmp31 = tmp27 * tmp30 tmp32 = tl.where(tmp29, tmp27, tmp31) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp21, xmask) tl.store(in_out_ptr1 + (r1 + 20 * x0), tmp32, rmask & xmask) tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (20, 4), (4, 1)) assert_size_stride(primals_2, (20,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (20,), (1,)) assert_size_stride(primals_5, (20,), (1,)) assert_size_stride(primals_6, (4, 20), (20, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 20), (20, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 20), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf2 buf5 = empty_strided_cuda((4, 4, 4, 20), (320, 80, 20, 1), torch. float32) buf6 = buf5 del buf5 get_raw_stream(0) triton_per_fused_leaky_relu_native_layer_norm_0[grid(64)](buf4, buf6, buf0, primals_4, primals_5, buf1, 64, 20, XBLOCK=8, num_warps=2, num_stages=1) buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf6, (64, 20), (20, 1), 0), reinterpret_tensor(primals_6, (20, 4), (1, 20), 0), alpha=1, beta=1, out=buf7) del primals_7 return reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_4, primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, buf1, buf4, reinterpret_tensor(buf6, (64, 20), (20, 1), 0 ), primals_6 class DQNNew(nn.Module): def __init__(self, obs_size: 'int', num_actions: 'int', hidden_size: 'int'=20): super(DQNNew, self).__init__() self.l1 = nn.Linear(obs_size, hidden_size) self.n1 = nn.LayerNorm(hidden_size, elementwise_affine=True) self.l3 = nn.Linear(hidden_size, num_actions) self.activ = torch.nn.LeakyReLU() def forward(self, input_0): primals_1 = self.l1.weight primals_2 = self.l1.bias primals_4 = self.n1.weight primals_5 = self.n1.bias primals_6 = self.l3.weight primals_7 = self.l3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
kcorder/vcg_dqn
DQN
false
3,819
[ "MIT" ]
0
da43892f701fe88a4c751f209da2743fd824d2f5
https://github.com/kcorder/vcg_dqn/tree/da43892f701fe88a4c751f209da2743fd824d2f5
ActorNN
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F def init_hidden(layer): """ Initialize NN layers """ input_size = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(input_size) return -lim, lim class ActorNN(nn.Module): """ Actor Class """ def __init__(self, state_size, action_size, hidden_size1=512, hidden_size2=256): """ Initialize parameters """ super(ActorNN, self).__init__() self.state_size = state_size self.hidden_size1 = hidden_size1 self.hidden_size2 = hidden_size2 self.action_size = action_size self.FC1 = nn.Linear(self.state_size, self.hidden_size1) self.FC2 = nn.Linear(self.hidden_size1, self.hidden_size2) self.FC3 = nn.Linear(self.hidden_size2, self.action_size) self.reset_parameters() def forward(self, state): x = F.relu(self.FC1(state)) x = F.relu(self.FC2(x)) x = torch.tanh(self.FC3(x)) return x def reset_parameters(self): self.FC1.weight.data.uniform_(*init_hidden(self.FC1)) self.FC2.weight.data.uniform_(*init_hidden(self.FC2)) self.FC3.weight.data.uniform_(-0.003, 0.003) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'state_size': 4, 'action_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_tanh_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (512, 4), (4, 1)) assert_size_stride(primals_2, (512,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (256, 512), (512, 1)) assert_size_stride(primals_5, (256,), (1,)) assert_size_stride(primals_6, (4, 256), (256, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 512), (512, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 512), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 512), (8192, 2048, 512, 1), 0 ) del buf0 buf7 = empty_strided_cuda((4, 4, 4, 512), (8192, 2048, 512, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(32768)](buf1, primals_2, buf7, 32768, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 256), (256, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 512), (512, 1), 0), reinterpret_tensor(primals_4, (512, 256), (1, 512), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 256), (4096, 1024, 256, 1), 0 ) del buf2 buf6 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(16384)](buf3, primals_5, buf6, 16384, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 256), (256, 1), 0), reinterpret_tensor(primals_6, (256, 4), (1, 256), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 triton_poi_fused_tanh_2[grid(256)](buf5, primals_7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 512), (512, 1), 0 ), reinterpret_tensor(buf3, (64, 256), (256, 1), 0 ), buf5, primals_6, buf6, primals_4, buf7 def init_hidden(layer): """ Initialize NN layers """ input_size = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(input_size) return -lim, lim class ActorNNNew(nn.Module): """ Actor Class """ def __init__(self, state_size, action_size, hidden_size1=512, hidden_size2=256): """ Initialize parameters """ super(ActorNNNew, self).__init__() self.state_size = state_size self.hidden_size1 = hidden_size1 self.hidden_size2 = hidden_size2 self.action_size = action_size self.FC1 = nn.Linear(self.state_size, self.hidden_size1) self.FC2 = nn.Linear(self.hidden_size1, self.hidden_size2) self.FC3 = nn.Linear(self.hidden_size2, self.action_size) self.reset_parameters() def reset_parameters(self): self.FC1.weight.data.uniform_(*init_hidden(self.FC1)) self.FC2.weight.data.uniform_(*init_hidden(self.FC2)) self.FC3.weight.data.uniform_(-0.003, 0.003) def forward(self, input_0): primals_1 = self.FC1.weight primals_2 = self.FC1.bias primals_4 = self.FC2.weight primals_5 = self.FC2.bias primals_6 = self.FC3.weight primals_7 = self.FC3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
kaustav1987/Tennis-Collaboration-and-Competition-Continuous-Control
ActorNN
false
3,821
[ "MIT" ]
0
d724e09d7a5948e2023fb86bf977455f3c507054
https://github.com/kaustav1987/Tennis-Collaboration-and-Competition-Continuous-Control/tree/d724e09d7a5948e2023fb86bf977455f3c507054
FeaturewiseAffine
import torch from typing import Union import torch.nn as nn class FeaturewiseAffine(nn.Module): """Feature-wise affine layer.""" def __init__(self): super().__init__() def forward(self, x, scale: 'Union[float, torch.Tensor]', shift: 'Union[float, torch.Tensor]'): res = scale * x + shift return res def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp3 = tl.load(in_ptr2 + x0, xmask) tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_0[grid(256)](arg0_1, arg1_1, arg2_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf0, class FeaturewiseAffineNew(nn.Module): """Feature-wise affine layer.""" def __init__(self): super().__init__() def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
ketan0/ddim
FeaturewiseAffine
false
3,822
[ "MIT" ]
0
26f2de1107885a3f332dd8435b73a1eaedbe10a8
https://github.com/ketan0/ddim/tree/26f2de1107885a3f332dd8435b73a1eaedbe10a8
BiAttention
import torch from typing import Optional import torch.nn as nn from torch.nn.parameter import Parameter class BiAttention(nn.Module): def __init__(self, input_size_encoder: 'int', input_size_decoder: 'int', num_labels: 'int', biaffine: 'bool'=True, **kwargs) ->None: super(BiAttention, self).__init__() self.input_size_encoder = input_size_encoder self.input_size_decoder = input_size_decoder self.num_labels = num_labels self.biaffine = biaffine self.W_e = Parameter(torch.Tensor(self.num_labels, self. input_size_encoder)) self.W_d = Parameter(torch.Tensor(self.num_labels, self. input_size_decoder)) self.b = Parameter(torch.Tensor(self.num_labels, 1, 1)) if self.biaffine: self.U = Parameter(torch.Tensor(self.num_labels, self. input_size_decoder, self.input_size_encoder)) else: self.register_parameter('U', None) self.reset_parameters() def reset_parameters(self) ->None: nn.init.xavier_uniform_(self.W_e) nn.init.xavier_uniform_(self.W_d) nn.init.constant_(self.b, 0.0) if self.biaffine: nn.init.xavier_uniform_(self.U) def forward(self, input_d: 'torch.Tensor', input_e: 'torch.Tensor', mask_d: 'Optional[torch.Tensor]'=None, mask_e: 'Optional[torch.Tensor]'=None) ->torch.Tensor: assert input_d.size(0) == input_e.size(0) _batch, _length_decoder, _ = input_d.size() _, _length_encoder, _ = input_e.size() out_d = torch.matmul(self.W_d, input_d.transpose(1, 2)).unsqueeze(3) out_e = torch.matmul(self.W_e, input_e.transpose(1, 2)).unsqueeze(2) if self.biaffine: output = torch.matmul(input_d.unsqueeze(1), self.U) output = torch.matmul(output, input_e.unsqueeze(1).transpose(2, 3)) output = output + out_d + out_e + self.b else: output = out_d + out_d + self.b if mask_d is not None: output = output * mask_d.unsqueeze(1).unsqueeze(3 ) * mask_e.unsqueeze(1).unsqueeze(2) return output def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'input_size_encoder': 4, 'input_size_decoder': 4, 'num_labels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torch.nn.parameter import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x1 + 4 * x0 + 16 * x3), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x4, tmp0, xmask) @triton.jit def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + (x2 + 4 * x1 + 16 * x3), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2 + 4 * x0 + 16 * x3), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tl.store(in_out_ptr0 + x4, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_6, (4, 1, 1), (1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0) del primals_3 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](primals_1, buf2, 256, XBLOCK= 256, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_1[grid(256)](primals_5, buf3, 256, XBLOCK= 256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), out=buf4) buf5 = buf3 del buf3 triton_poi_fused_clone_2[grid(256)](primals_2, buf5, 256, XBLOCK= 128, num_warps=4, num_stages=1) buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf4, reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0), out=buf6) del buf4 buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf6 triton_poi_fused_add_3[grid(256)](buf7, buf0, buf1, primals_6, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del buf1 del primals_6 return buf7, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0 ), reinterpret_tensor(buf5, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf2, (16, 4, 4), (16, 1, 4), 0) class BiAttentionNew(nn.Module): def __init__(self, input_size_encoder: 'int', input_size_decoder: 'int', num_labels: 'int', biaffine: 'bool'=True, **kwargs) ->None: super(BiAttentionNew, self).__init__() self.input_size_encoder = input_size_encoder self.input_size_decoder = input_size_decoder self.num_labels = num_labels self.biaffine = biaffine self.W_e = Parameter(torch.Tensor(self.num_labels, self. input_size_encoder)) self.W_d = Parameter(torch.Tensor(self.num_labels, self. input_size_decoder)) self.b = Parameter(torch.Tensor(self.num_labels, 1, 1)) if self.biaffine: self.U = Parameter(torch.Tensor(self.num_labels, self. input_size_decoder, self.input_size_encoder)) else: self.register_parameter('U', None) self.reset_parameters() def reset_parameters(self) ->None: nn.init.xavier_uniform_(self.W_e) nn.init.xavier_uniform_(self.W_d) nn.init.constant_(self.b, 0.0) if self.biaffine: nn.init.xavier_uniform_(self.U) def forward(self, input_0, input_1): primals_3 = self.W_e primals_4 = self.W_d primals_6 = self.b primals_1 = self.U primals_2 = input_0 primals_5 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
katie0809/KLUE-baseline
BiAttention
false
3,823
[ "Apache-2.0" ]
0
144973359e9dc3bbbb3ce7a0cc765b0207f63775
https://github.com/katie0809/KLUE-baseline/tree/144973359e9dc3bbbb3ce7a0cc765b0207f63775
Mish
import torch from torch import nn from torch.nn import functional as F class Mish(nn.Module): def forward(self, x): return x.mul_(F.softplus(x).tanh()) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_mul_softplus_tanh_0(in_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 20.0 tmp2 = tmp0 > tmp1 tmp3 = tl_math.exp(tmp0) tmp4 = libdevice.log1p(tmp3) tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = libdevice.tanh(tmp5) tmp7 = tmp0 * tmp6 tl.store(out_ptr1 + x0, tmp7, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) get_raw_stream(0) triton_poi_fused_mul_softplus_tanh_0[grid(256)](arg0_1, arg0_1, 256, XBLOCK=256, num_warps=4, num_stages=1) return arg0_1, class MishNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
khayliang/single_person_tracking
Mish
false
3,824
[ "MIT" ]
0
d93aae3742ba3c77f00b3917b182784f03b5d597
https://github.com/khayliang/single_person_tracking/tree/d93aae3742ba3c77f00b3917b182784f03b5d597
TripletLoss
import torch import torch.utils.data import torch import torch.nn as nn class TripletLoss(nn.Module): def __init__(self, margin=1.0): super(TripletLoss, self).__init__() self.margin = margin def calc_euclidean(self, x1, x2): return (x1 - x2).pow(2).sum(1) def forward(self, anchor: 'torch.Tensor', positive: 'torch.Tensor', negative: 'torch.Tensor') ->torch.Tensor: distance_positive = self.calc_euclidean(anchor, positive) distance_negative = self.calc_euclidean(anchor, negative) losses = torch.relu(distance_positive - distance_negative + self.margin ) return losses.mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data import torch import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_mean_pow_relu_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), None) tmp4 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp5 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None) tmp9 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp10 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None) tmp14 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp15 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None) tmp19 = tl.load(in_ptr2 + (r0 + 64 * r1), None) tmp22 = tl.load(in_ptr2 + (16 + r0 + 64 * r1), None) tmp26 = tl.load(in_ptr2 + (32 + r0 + 64 * r1), None) tmp30 = tl.load(in_ptr2 + (48 + r0 + 64 * r1), None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tmp20 = tmp0 - tmp19 tmp21 = tmp20 * tmp20 tmp23 = tmp4 - tmp22 tmp24 = tmp23 * tmp23 tmp25 = tmp21 + tmp24 tmp27 = tmp9 - tmp26 tmp28 = tmp27 * tmp27 tmp29 = tmp25 + tmp28 tmp31 = tmp14 - tmp30 tmp32 = tmp31 * tmp31 tmp33 = tmp29 + tmp32 tmp34 = tmp18 - tmp33 tmp35 = 1.0 tmp36 = tmp34 + tmp35 tmp37 = tl.full([1, 1], 0, tl.int32) tmp38 = triton_helpers.maximum(tmp37, tmp36) tmp39 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK]) tmp41 = tl.sum(tmp39, 1)[:, None] tmp42 = 64.0 tmp43 = tmp41 / tmp42 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp43, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 get_raw_stream(0) triton_per_fused_add_mean_pow_relu_sub_sum_0[grid(1)](buf2, arg0_1, arg1_1, arg2_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf2, class TripletLossNew(nn.Module): def __init__(self, margin=1.0): super(TripletLossNew, self).__init__() self.margin = margin def calc_euclidean(self, x1, x2): return (x1 - x2).pow(2).sum(1) def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
ketan-lambat/contrastive-unpaired-translation
TripletLoss
false
3,825
[ "BSD-3-Clause" ]
0
ea71b3a9603a51b97f1fa8426d5a1beae9260a0d
https://github.com/ketan-lambat/contrastive-unpaired-translation/tree/ea71b3a9603a51b97f1fa8426d5a1beae9260a0d
CriticNN
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F def init_hidden(layer): """ Initialize NN layers """ input_size = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(input_size) return -lim, lim class CriticNN(nn.Module): """ Critic class """ def __init__(self, state_size, action_size, hidden_size1=512, hidden_size2=256): """ Initialize parameters """ super(CriticNN, self).__init__() self.state_size = state_size self.hidden_size1 = hidden_size1 self.hidden_size2 = hidden_size2 self.action_size = action_size self.FC1 = nn.Linear(self.state_size, self.hidden_size1) self.FC2 = nn.Linear(self.hidden_size1 + self.action_size, self. hidden_size2) self.FC3 = nn.Linear(self.hidden_size2, 1) self.reset_parameters() def forward(self, state, action): x = F.relu(self.FC1(state)) x = torch.cat((x, action), dim=1) x = F.relu(self.FC2(x)) x = self.FC3(x) return x def reset_parameters(self): self.FC1.weight.data.uniform_(*init_hidden(self.FC1)) self.FC2.weight.data.uniform_(*init_hidden(self.FC2)) self.FC3.weight.data.uniform_(-0.003, 0.003) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'state_size': 4, 'action_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 2064 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 516 x1 = xindex // 516 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 512, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (512 * x1 + x0), tmp4 & xmask, eviction_policy ='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 516, tl.int64) tmp15 = tl.load(in_ptr2 + (4 * x1 + (-512 + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (512, 4), (4, 1)) assert_size_stride(primals_2, (512,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (256, 516), (516, 1)) assert_size_stride(primals_6, (256,), (1,)) assert_size_stride(primals_7, (1, 256), (256, 1)) assert_size_stride(primals_8, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 512), (512, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 512), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 516), (516, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(2064)](buf0, primals_2, primals_4, buf1, 2064, XBLOCK=256, num_warps=4, num_stages=1) del primals_4 buf2 = empty_strided_cuda((4, 256), (256, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_5, (516, 256), ( 1, 516), 0), out=buf2) buf3 = buf2 del buf2 triton_poi_fused_relu_1[grid(1024)](buf3, primals_6, 1024, XBLOCK= 256, num_warps=4, num_stages=1) del primals_6 buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_8, buf3, reinterpret_tensor(primals_7, (256, 1), (1, 256), 0), alpha=1, beta=1, out=buf5) del primals_8 buf6 = empty_strided_cuda((4, 512), (512, 1), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(2048)](buf0, primals_2, buf6, 2048, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_2 return buf5, primals_3, buf1, buf3, primals_7, primals_5, buf6 def init_hidden(layer): """ Initialize NN layers """ input_size = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(input_size) return -lim, lim class CriticNNNew(nn.Module): """ Critic class """ def __init__(self, state_size, action_size, hidden_size1=512, hidden_size2=256): """ Initialize parameters """ super(CriticNNNew, self).__init__() self.state_size = state_size self.hidden_size1 = hidden_size1 self.hidden_size2 = hidden_size2 self.action_size = action_size self.FC1 = nn.Linear(self.state_size, self.hidden_size1) self.FC2 = nn.Linear(self.hidden_size1 + self.action_size, self. hidden_size2) self.FC3 = nn.Linear(self.hidden_size2, 1) self.reset_parameters() def reset_parameters(self): self.FC1.weight.data.uniform_(*init_hidden(self.FC1)) self.FC2.weight.data.uniform_(*init_hidden(self.FC2)) self.FC3.weight.data.uniform_(-0.003, 0.003) def forward(self, input_0, input_1): primals_1 = self.FC1.weight primals_2 = self.FC1.bias primals_5 = self.FC2.weight primals_6 = self.FC2.bias primals_7 = self.FC3.weight primals_8 = self.FC3.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
kaustav1987/Tennis-Collaboration-and-Competition-Continuous-Control
CriticNN
false
3,826
[ "MIT" ]
0
d724e09d7a5948e2023fb86bf977455f3c507054
https://github.com/kaustav1987/Tennis-Collaboration-and-Competition-Continuous-Control/tree/d724e09d7a5948e2023fb86bf977455f3c507054
AmdimNCELoss
import torch import torch.nn as nn def tanh_clip(x, clip_val=10.0): """ soft clip values to the range [-clip_val, +clip_val] """ if clip_val is not None: x_clip = clip_val * torch.tanh(1.0 / clip_val * x) else: x_clip = x return x_clip class AmdimNCELoss(nn.Module): """ Compute the NCE scores for predicting r_src->r_trg. """ def __init__(self, tclip): super().__init__() self.tclip = tclip def forward(self, anchor_representations, positive_representations, mask_mat): """ Args: anchor_representations: (batch_size, emb_dim) positive_representations: (emb_dim, n_batch * w* h) (ie: nb_feat_vectors x embedding_dim) mask_mat: (n_batch_gpu, n_batch) Output: raw_scores: (n_batch_gpu, n_locs) nce_scores: (n_batch_gpu, n_locs) lgt_reg : scalar """ r_src = anchor_representations r_trg = positive_representations batch_size, emb_dim = r_src.size() nb_feat_vectors = r_trg.size(1) // batch_size mask_pos = mask_mat.unsqueeze(dim=2).expand(-1, -1, nb_feat_vectors ).float() mask_neg = 1.0 - mask_pos raw_scores = torch.mm(r_src, r_trg).float() raw_scores = raw_scores.reshape(batch_size, batch_size, nb_feat_vectors ) raw_scores = raw_scores / emb_dim ** 0.5 lgt_reg = 0.05 * (raw_scores ** 2.0).mean() raw_scores = tanh_clip(raw_scores, clip_val=self.tclip) """ pos_scores includes scores for all the positive samples neg_scores includes scores for all the negative samples, with scores for positive samples set to the min score (-self.tclip here) """ pos_scores = (mask_pos * raw_scores).sum(dim=1) neg_scores = mask_neg * raw_scores - self.tclip * mask_pos neg_scores = neg_scores.reshape(batch_size, -1) mask_neg = mask_neg.reshape(batch_size, -1) neg_maxes = torch.max(neg_scores, dim=1, keepdim=True)[0] neg_sumexp = (mask_neg * torch.exp(neg_scores - neg_maxes)).sum(dim =1, keepdim=True) all_logsumexp = torch.log(torch.exp(pos_scores - neg_maxes) + neg_sumexp) pos_shiftexp = pos_scores - neg_maxes nce_scores = pos_shiftexp - all_logsumexp nce_scores = -nce_scores.mean() return nce_scores, lgt_reg def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'tclip': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_exp_log_max_mean_mul_neg_sub_sum_tanh_0( in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp36 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp38 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = 0.5 tmp5 = tmp3 * tmp4 tmp6 = 0.25 tmp7 = tmp5 * tmp6 tmp8 = libdevice.tanh(tmp7) tmp9 = 4.0 tmp10 = tmp8 * tmp9 tmp11 = tmp2 * tmp10 tmp12 = tmp0 * tmp9 tmp13 = tmp11 - tmp12 tmp15 = tmp1 - tmp14 tmp17 = tmp16 * tmp4 tmp18 = tmp17 * tmp6 tmp19 = libdevice.tanh(tmp18) tmp20 = tmp19 * tmp9 tmp21 = tmp15 * tmp20 tmp22 = tmp14 * tmp9 tmp23 = tmp21 - tmp22 tmp24 = triton_helpers.maximum(tmp13, tmp23) tmp26 = tmp1 - tmp25 tmp28 = tmp27 * tmp4 tmp29 = tmp28 * tmp6 tmp30 = libdevice.tanh(tmp29) tmp31 = tmp30 * tmp9 tmp32 = tmp26 * tmp31 tmp33 = tmp25 * tmp9 tmp34 = tmp32 - tmp33 tmp35 = triton_helpers.maximum(tmp24, tmp34) tmp37 = tmp1 - tmp36 tmp39 = tmp38 * tmp4 tmp40 = tmp39 * tmp6 tmp41 = libdevice.tanh(tmp40) tmp42 = tmp41 * tmp9 tmp43 = tmp37 * tmp42 tmp44 = tmp36 * tmp9 tmp45 = tmp43 - tmp44 tmp46 = triton_helpers.maximum(tmp35, tmp45) tmp47 = tmp13 - tmp46 tmp48 = tl_math.exp(tmp47) tmp49 = tmp2 * tmp48 tmp50 = tmp23 - tmp46 tmp51 = tl_math.exp(tmp50) tmp52 = tmp15 * tmp51 tmp53 = tmp49 + tmp52 tmp54 = tmp34 - tmp46 tmp55 = tl_math.exp(tmp54) tmp56 = tmp26 * tmp55 tmp57 = tmp53 + tmp56 tmp58 = tmp45 - tmp46 tmp59 = tl_math.exp(tmp58) tmp60 = tmp37 * tmp59 tmp61 = tmp57 + tmp60 tmp62 = tmp0 * tmp10 tmp63 = tmp14 * tmp20 tmp64 = tmp62 + tmp63 tmp65 = tmp25 * tmp31 tmp66 = tmp64 + tmp65 tmp67 = tmp36 * tmp42 tmp68 = tmp66 + tmp67 tmp69 = tmp68 - tmp46 tmp70 = tl_math.exp(tmp69) tmp71 = tmp70 + tmp61 tmp72 = tl_math.log(tmp71) tmp73 = tmp69 - tmp72 tmp74 = tl.broadcast_to(tmp73, [XBLOCK, RBLOCK]) tmp76 = tl.sum(tmp74, 1)[:, None] tmp77 = tmp76 / tmp9 tmp78 = -tmp77 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp78, None) @triton.jit def triton_per_fused_div_mean_mul_pow_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.sum(tmp4, 1)[:, None] tmp7 = 16.0 tmp8 = tmp6 / tmp7 tmp9 = 0.05 tmp10 = tmp8 * tmp9 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp10, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) assert_size_stride(arg2_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(arg0_1, arg1_1, out=buf0) del arg0_1 del arg1_1 buf4 = empty_strided_cuda((), (), torch.float32) buf6 = buf4 del buf4 get_raw_stream(0) triton_per_fused_add_div_exp_log_max_mean_mul_neg_sub_sum_tanh_0[grid (1)](buf6, arg2_1, buf0, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del arg2_1 buf5 = empty_strided_cuda((), (), torch.float32) buf7 = buf5 del buf5 triton_per_fused_div_mean_mul_pow_1[grid(1)](buf7, buf0, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf0 return buf6, buf7 def tanh_clip(x, clip_val=10.0): """ soft clip values to the range [-clip_val, +clip_val] """ if clip_val is not None: x_clip = clip_val * torch.tanh(1.0 / clip_val * x) else: x_clip = x return x_clip class AmdimNCELossNew(nn.Module): """ Compute the NCE scores for predicting r_src->r_trg. """ def __init__(self, tclip): super().__init__() self.tclip = tclip def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0], output[1]
jfrancis71/pytorch-lightning-bolts
AmdimNCELoss
false
3,827
[ "Apache-2.0" ]
0
8a4cf8f61644c28d6df54ccffe3a52d6f5fce5a6
https://github.com/jfrancis71/pytorch-lightning-bolts/tree/8a4cf8f61644c28d6df54ccffe3a52d6f5fce5a6
Swish
import torch from torch import nn class Swish(nn.Module): def forward(self, x): return x.mul_(torch.sigmoid(x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr1, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tmp2 = tmp0 * tmp1 tl.store(out_ptr1 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) get_raw_stream(0) triton_poi_fused_mul_sigmoid_0[grid(256)](arg0_1, arg0_1, 256, XBLOCK=256, num_warps=4, num_stages=1) return arg0_1, class SwishNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
khayliang/single_person_tracking
Swish
false
3,828
[ "MIT" ]
0
d93aae3742ba3c77f00b3917b182784f03b5d597
https://github.com/khayliang/single_person_tracking/tree/d93aae3742ba3c77f00b3917b182784f03b5d597
FakeRKHSConvNet
import math import torch import numpy as np import torch.nn as nn class MaybeBatchNorm2d(nn.Module): def __init__(self, n_ftr, affine, use_bn): super(MaybeBatchNorm2d, self).__init__() self.bn = nn.BatchNorm2d(n_ftr, affine=affine) self.use_bn = use_bn def forward(self, x): if self.use_bn: x = self.bn(x) return x class FakeRKHSConvNet(nn.Module): def __init__(self, n_input, n_output, use_bn=False): super(FakeRKHSConvNet, self).__init__() self.conv1 = nn.Conv2d(n_input, n_output, kernel_size=1, stride=1, padding=0, bias=False) self.bn1 = MaybeBatchNorm2d(n_output, True, use_bn) self.relu1 = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(n_output, n_output, kernel_size=1, stride=1, padding=0, bias=False) self.bn_out = MaybeBatchNorm2d(n_output, True, True) self.shortcut = nn.Conv2d(n_input, n_output, kernel_size=1, stride= 1, padding=0, bias=True) if n_output >= n_input: eye_mask = np.zeros((n_output, n_input, 1, 1), dtype=np.bool) for i in range(n_input): eye_mask[i, i, 0, 0] = 1 self.shortcut.weight.data.uniform_(-0.01, 0.01) self.shortcut.weight.data.masked_fill_(torch.tensor(eye_mask), 1.0) def init_weights(self, init_scale=1.0): nn.init.kaiming_uniform_(self.conv1.weight, a=math.sqrt(5)) self.conv1.weight.data.mul_(init_scale) nn.init.constant_(self.conv2.weight, 0.0) def forward(self, x): h_res = self.conv2(self.relu1(self.bn1(self.conv1(x)))) h = self.bn_out(h_res + self.shortcut(x)) return h def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_input': 4, 'n_output': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import math import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(in_out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__native_batch_norm_legit_no_training_add_convolution_native_batch_norm_backward_1( in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x3, xmask) tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr5 + x1, xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr6 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp6 = tmp4 - tmp5 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.sqrt(tmp9) tmp11 = tl.full([1], 1, tl.int32) tmp12 = tmp11 / tmp10 tmp13 = 1.0 tmp14 = tmp12 * tmp13 tmp15 = tmp6 * tmp14 tmp17 = tmp15 * tmp16 tmp19 = tmp17 + tmp18 tl.store(out_ptr0 + x3, tmp19, xmask) tl.store(out_ptr1 + x3, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(256)](buf1, 256, XBLOCK=128, num_warps =4, num_stages=1) buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = extern_kernels.convolution(primals_2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__native_batch_norm_legit_no_training_add_convolution_native_batch_norm_backward_1[ grid(256)](buf2, buf3, primals_5, primals_6, primals_7, primals_8, primals_9, buf4, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf2 del buf3 del primals_5 del primals_6 del primals_9 return (buf4, primals_1, primals_2, primals_3, primals_4, primals_7, primals_8, buf1, buf5) class MaybeBatchNorm2d(nn.Module): def __init__(self, n_ftr, affine, use_bn): super(MaybeBatchNorm2d, self).__init__() self.bn = nn.BatchNorm2d(n_ftr, affine=affine) self.use_bn = use_bn def forward(self, x): if self.use_bn: x = self.bn(x) return x class FakeRKHSConvNetNew(nn.Module): def __init__(self, n_input, n_output, use_bn=False): super(FakeRKHSConvNetNew, self).__init__() self.conv1 = nn.Conv2d(n_input, n_output, kernel_size=1, stride=1, padding=0, bias=False) self.bn1 = MaybeBatchNorm2d(n_output, True, use_bn) self.relu1 = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(n_output, n_output, kernel_size=1, stride=1, padding=0, bias=False) self.bn_out = MaybeBatchNorm2d(n_output, True, True) self.shortcut = nn.Conv2d(n_input, n_output, kernel_size=1, stride= 1, padding=0, bias=True) if n_output >= n_input: eye_mask = np.zeros((n_output, n_input, 1, 1), dtype=np.bool) for i in range(n_input): eye_mask[i, i, 0, 0] = 1 self.shortcut.weight.data.uniform_(-0.01, 0.01) self.shortcut.weight.data.masked_fill_(torch.tensor(eye_mask), 1.0) def init_weights(self, init_scale=1.0): nn.init.kaiming_uniform_(self.conv1.weight, a=math.sqrt(5)) self.conv1.weight.data.mul_(init_scale) nn.init.constant_(self.conv2.weight, 0.0) def forward(self, input_0): primals_1 = self.conv1.weight primals_5 = self.bn1.bn.weight primals_6 = self.bn1.bn.bias primals_3 = self.conv2.weight primals_7 = self.bn_out.bn.weight primals_8 = self.bn_out.bn.bias primals_4 = self.shortcut.weight primals_9 = self.shortcut.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
jfrancis71/pytorch-lightning-bolts
FakeRKHSConvNet
false
3,829
[ "Apache-2.0" ]
0
8a4cf8f61644c28d6df54ccffe3a52d6f5fce5a6
https://github.com/jfrancis71/pytorch-lightning-bolts/tree/8a4cf8f61644c28d6df54ccffe3a52d6f5fce5a6
SchedulerTestNet
import torch from torch.nn import functional as F class SchedulerTestNet(torch.nn.Module): """ adapted from: https://github.com/pytorch/pytorch/blob/master/test/test_optim.py """ def __init__(self): super(SchedulerTestNet, self).__init__() self.conv1 = torch.nn.Conv2d(1, 1, 1) self.conv2 = torch.nn.Conv2d(1, 1, 1) def forward(self, x): return self.conv2(F.relu(self.conv1(x))) def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tl.store(in_out_ptr0 + x0, tmp5, None) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, None) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (1, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (1, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_5, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(16384)](buf1, primals_2, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_1[grid(16384)](buf3, primals_5, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 return buf3, primals_1, primals_3, primals_4, buf1 class SchedulerTestNetNew(torch.nn.Module): """ adapted from: https://github.com/pytorch/pytorch/blob/master/test/test_optim.py """ def __init__(self): super(SchedulerTestNetNew, self).__init__() self.conv1 = torch.nn.Conv2d(1, 1, 1) self.conv2 = torch.nn.Conv2d(1, 1, 1) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
jfrancis71/pytorch-lightning-bolts
SchedulerTestNet
false
3,830
[ "Apache-2.0" ]
0
8a4cf8f61644c28d6df54ccffe3a52d6f5fce5a6
https://github.com/jfrancis71/pytorch-lightning-bolts/tree/8a4cf8f61644c28d6df54ccffe3a52d6f5fce5a6